File: service_runner.go

package info (click to toggle)
vagrant 2.3.7%2Bgit20230731.5fc64cde%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 17,616 kB
  • sloc: ruby: 111,820; sh: 462; makefile: 123; ansic: 34; lisp: 1
file content (387 lines) | stat: -rw-r--r-- 9,798 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
package singleprocess

import (
	"context"
	"io"

	"github.com/hashicorp/go-hclog"
	"github.com/hashicorp/go-memdb"
	"google.golang.org/grpc/codes"
	"google.golang.org/grpc/status"

	"github.com/hashicorp/vagrant/internal/server/logbuffer"
	"github.com/hashicorp/vagrant/internal/server/proto/vagrant_server"
	"github.com/hashicorp/vagrant/internal/server/singleprocess/state"
)

// TODO: test
func (s *service) GetRunner(
	ctx context.Context,
	req *vagrant_server.GetRunnerRequest,
) (*vagrant_server.Runner, error) {
	return s.state.RunnerById(req.RunnerId)
}

// TODO: test
func (s *service) RunnerConfig(
	srv vagrant_server.Vagrant_RunnerConfigServer,
) error {
	log := hclog.FromContext(srv.Context())
	ctx, cancel := context.WithCancel(srv.Context())
	defer cancel()

	// Get the request
	event, err := srv.Recv()
	if err != nil {
		return err
	}
	req, ok := event.Event.(*vagrant_server.RunnerConfigRequest_Open_)
	if !ok {
		return status.Errorf(codes.FailedPrecondition,
			"expected open event, got %T", event)
	}
	record := req.Open.Runner

	// Create our record
	log = log.With("runner_id", record.Id)
	log.Trace("registering runner")
	if err := s.state.RunnerCreate(record); err != nil {
		return err
	}

	// Defer deleting this.
	// TODO(mitchellh): this is too aggressive and we want to have some grace
	// period for reconnecting clients. We should clean this up.
	defer func() {
		log.Trace("deleting runner")
		if err := s.state.RunnerDelete(record.Id); err != nil {
			log.Error("failed to delete runner data. This should not happen.", "err", err)
		}
	}()

	// Start a goroutine that listens on the recvmsg so we can detect
	// when the client exited.
	go func() {
		defer cancel()

		for {
			_, err := srv.Recv()
			if err != nil {
				if err != io.EOF {
					log.Warn("unknown error from recvmsg", "err", err)
				}

				return
			}
		}
	}()

	// Build our config in a loop.
	for {
		ws := memdb.NewWatchSet()

		// Build our config
		config := &vagrant_server.RunnerConfig{}

		// Get our config vars
		vars, err := s.state.ConfigGetWatch(&vagrant_server.ConfigGetRequest{
			Scope: &vagrant_server.ConfigGetRequest_Runner{
				Runner: &vagrant_server.Ref_RunnerId{
					Id: record.Id,
				},
			},
		}, ws)
		if err != nil {
			return err
		}
		config.ConfigVars = vars

		// Send new config
		if err := srv.Send(&vagrant_server.RunnerConfigResponse{
			Config: config,
		}); err != nil {
			return err
		}

		// Nil out the stuff we used so that if we're waiting awhile we can GC
		config = nil

		// Wait for any changes
		if err := ws.WatchCtx(ctx); err != nil {
			return err
		}
	}
}

func (s *service) RunnerJobStream(
	server vagrant_server.Vagrant_RunnerJobStreamServer,
) error {
	log := hclog.FromContext(server.Context())
	ctx, cancel := context.WithCancel(server.Context())
	defer cancel()

	// Receive our opening message so we can determine the runner ID.
	req, err := server.Recv()
	if err != nil {
		return err
	}
	reqEvent, ok := req.Event.(*vagrant_server.RunnerJobStreamRequest_Request_)
	if !ok {
		return status.Errorf(codes.FailedPrecondition,
			"first message must be a Request event")
	}
	log = log.With("runner_id", reqEvent.Request.RunnerId)

	// Get the runner to validate it is registered
	runner, err := s.state.RunnerById(reqEvent.Request.RunnerId)
	if err != nil {
		return err
	}

	// Get a job assignment for this runner
	job, err := s.state.JobAssignForRunner(ctx, runner)
	if err != nil {
		return err
	}

	// Send the job assignment.
	//
	// If this has an error, we continue to accumulate the error until
	// we set the ack status in the DB. We do this because if we fail to
	// send the job assignment we want to nack the job so it is queued again.
	err = server.Send(&vagrant_server.RunnerJobStreamResponse{
		Event: &vagrant_server.RunnerJobStreamResponse_Assignment{
			Assignment: &vagrant_server.RunnerJobStreamResponse_JobAssignment{
				Job: job.Job,
			},
		},
	})

	// Wait for an ack. We only do this if the job assignment above
	// succeeded. If it didn't succeed, the client will never send us
	// an ack.
	ack := false
	if err == nil { // if sending the job assignment was a success
		req, err = server.Recv()

		// If we received a message we inspect it. If we failed to
		// receive a message, we've set the `err` value and we keep
		// ack to false so that we nack the job later.
		if err == nil {
			switch req.Event.(type) {
			case *vagrant_server.RunnerJobStreamRequest_Ack_:
				ack = true

			case *vagrant_server.RunnerJobStreamRequest_Error_:
				ack = false

			default:
				ack = false
				err = status.Errorf(codes.FailedPrecondition,
					"ack expected, got: %T", req.Event)
			}
		}
	}

	// Send the ack.
	job, ackerr := s.state.JobAck(job.Id, ack)
	if ackerr != nil {
		// If this fails, we just log, there is nothing more we can do.
		log.Warn("job ack failed", "outer_error", err, "error", ackerr)

		// If we had no outer error, set the ackerr so that we exit. If
		// we do have an outer error, then the ack error only shows up in
		// the log.
		if err == nil {
			err = ackerr
		}
	}

	// If we have an error, return that. We also return if we didn't ack for
	// any reason. This error can be set at any point since job assignment.
	if err != nil || !ack {
		return err
	}

	// Start a goroutine that watches for job changes
	jobCh := make(chan *state.Job, 1)
	errCh := make(chan error, 1)
	go func() {
		for {
			ws := memdb.NewWatchSet()
			job, err = s.state.JobById(job.Id, ws)
			if err != nil {
				errCh <- err
				return
			}
			if job == nil {
				errCh <- status.Errorf(codes.Internal, "job disappeared")
				return
			}

			// Send the job
			select {
			case jobCh <- job:
			case <-ctx.Done():
				return
			}

			// Wait for the job to update
			if err := ws.WatchCtx(ctx); err != nil {
				if ctx.Err() == nil {
					errCh <- err
				}

				return
			}
		}
	}()

	// Create a goroutine that just waits for events. We have to do this
	// so we can exit properly on client side close.
	eventCh := make(chan *vagrant_server.RunnerJobStreamRequest, 1)
	go func() {
		defer cancel()

		for {
			log.Trace("waiting for job stream event")
			req, err := server.Recv()
			if err == io.EOF {
				// On EOF, this means the client closed their write side.
				// In this case, we assume we have exited and exit accordingly.
				return
			}

			if err != nil {
				// For any other error, we send the error along and exit the
				// read loop. The sent error will be picked up and sent back
				// as a result to the client.
				errCh <- err
				return
			}
			log.Trace("event received", "event", req.Event)

			// Send the event down
			select {
			case eventCh <- req:
			case <-ctx.Done():
				return
			}

			// If this is a terminating event, we exit this loop
			switch event := req.Event.(type) {
			case *vagrant_server.RunnerJobStreamRequest_Complete_:
				log.Debug("job stream recv loop exiting due to completion")
				return
			case *vagrant_server.RunnerJobStreamRequest_Error_:
				log.Debug("job stream recv loop exiting due to error",
					"error", event.Error.Error.Message)
				return
			}
		}
	}()

	// Recv events in a loop
	var lastJob *vagrant_server.Job
	for {
		select {
		case <-ctx.Done():
			// We need to drain the event channel
			for {
				select {
				case req := <-eventCh:
					if err := s.handleJobStreamRequest(log, job, server, req); err != nil {
						return err
					}
				default:
					return nil
				}
			}

		case err := <-errCh:
			return err

		case req := <-eventCh:
			if err := s.handleJobStreamRequest(log, job, server, req); err != nil {
				return err
			}

		case job := <-jobCh:
			if lastJob == job.Job {
				continue
			}

			// If the job is canceled, send that event. We send this each time
			// the cancel time changes. The cancel time only changes if multiple
			// cancel requests are made.
			if job.CancelTime != nil &&
				(lastJob == nil || lastJob.CancelTime != job.CancelTime) {
				// The job is forced if we're in an error state. This must be true
				// because we would've already exited the loop if we naturally
				// got a terminal event.
				force := job.State == vagrant_server.Job_ERROR

				err := server.Send(&vagrant_server.RunnerJobStreamResponse{
					Event: &vagrant_server.RunnerJobStreamResponse_Cancel{
						Cancel: &vagrant_server.RunnerJobStreamResponse_JobCancel{
							Force: force,
						},
					},
				})
				if err != nil {
					return err
				}

				// On force we exit immediately.
				if force {
					return nil
				}
			}

			lastJob = job.Job
		}
	}
}

func (s *service) handleJobStreamRequest(
	log hclog.Logger,
	job *state.Job,
	srv vagrant_server.Vagrant_RunnerJobStreamServer,
	req *vagrant_server.RunnerJobStreamRequest,
) error {
	log.Trace("event received", "event", req.Event)
	switch event := req.Event.(type) {
	case *vagrant_server.RunnerJobStreamRequest_Complete_:
		return s.state.JobComplete(job.Id, event.Complete.Result, nil)

	case *vagrant_server.RunnerJobStreamRequest_Error_:
		return s.state.JobComplete(job.Id, nil, status.FromProto(event.Error.Error).Err())

	case *vagrant_server.RunnerJobStreamRequest_Heartbeat_:
		return s.state.JobHeartbeat(job.Id)

	case *vagrant_server.RunnerJobStreamRequest_Terminal:
		// This shouldn't happen but we want to protect against it to prevent
		// a panic.
		if job.OutputBuffer == nil {
			log.Warn("got terminal event but internal output buffer is nil, dropping lines")
			return nil
		}

		// Write the entries to the output buffer
		entries := make([]logbuffer.Entry, len(event.Terminal.Events))
		for i, ev := range event.Terminal.Events {
			entries[i] = ev
		}

		// Write the events
		job.OutputBuffer.Write(entries...)

		return nil

	default:
		log.Warn("unexpected event received", "event", req.Event)
	}

	return nil
}