File: context_queue.go

package info (click to toggle)
golang-gvisor-gvisor 0.0~20240729.0-5
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 21,276 kB
  • sloc: asm: 3,361; ansic: 1,197; cpp: 348; makefile: 92; python: 89; sh: 83
file content (139 lines) | stat: -rw-r--r-- 4,604 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package systrap

import (
	"sync/atomic"

	"gvisor.dev/gvisor/pkg/sentry/platform"
)

// LINT.IfChange
const (
	// maxEntries is the size of the ringbuffer.
	maxContextQueueEntries uint32 = uint32(maxGuestContexts) + 1
)

type queuedContext struct {
	contextID uint32
	threadID  uint32
}

// contextQueue is a structure shared with the each stub thread that is used to
// signal to stub threads which contexts are ready to resume running.
//
// It is a lockless ringbuffer where threads try to police themselves on whether
// they should continue waiting for a context or go to sleep if they are
// unneeded.
type contextQueue struct {
	// start is an index used for taking contexts out of the ringbuffer.
	start uint32
	// end is an index used for putting new contexts into the ringbuffer.
	end uint32

	// numActiveThreads indicates to the sentry how many stubs are running.
	// It is changed only by stub threads.
	numActiveThreads uint32
	// numSpinningThreads indicates to the sentry how many stubs are waiting
	// to receive a context from the queue, and are not doing useful work.
	numSpinningThreads uint32
	// numThreadsToWakeup is the number of threads requested by Sentry to wake up.
	// The Sentry increments it and stub threads decrements.
	numThreadsToWakeup uint32
	// numActiveContext is a number of running and waiting contexts
	numActiveContexts uint32
	// numAwakeContexts is the number of awake contexts. It includes all
	// active contexts and contexts that are running in the Sentry.
	numAwakeContexts uint32

	fastPathDisabled uint32
	usedFastPath     uint32
	ringbuffer       [maxContextQueueEntries]uint64
}

const (
	// Each element of a contextQueue ring buffer is a sum of its index
	// shifted by CQ_INDEX_SHIFT and context_id.
	contextQueueIndexShift = 32
)

// LINT.ThenChange(./sysmsg/sysmsg_lib.c)

func (q *contextQueue) init() {
	for i := uint32(0); i < maxContextQueueEntries; i++ {
		q.ringbuffer[i] = uint64(invalidContextID)
	}
	// Allow tests to trigger overflows of start and end.
	idx := ^uint32(0) - maxContextQueueEntries*4
	atomic.StoreUint32(&q.start, idx)
	atomic.StoreUint32(&q.end, idx)
	atomic.StoreUint32(&q.numActiveThreads, 0)
	atomic.StoreUint32(&q.numSpinningThreads, 0)
	atomic.StoreUint32(&q.numThreadsToWakeup, 0)
	atomic.StoreUint32(&q.numActiveContexts, 0)
	atomic.StoreUint32(&q.numAwakeContexts, 0)
	atomic.StoreUint32(&q.fastPathDisabled, 1)
	atomic.StoreUint32(&q.usedFastPath, 0)
}

func (q *contextQueue) isEmpty() bool {
	return atomic.LoadUint32(&q.start) == atomic.LoadUint32(&q.end)
}

func (q *contextQueue) queuedContexts() uint32 {
	return (atomic.LoadUint32(&q.end) + maxContextQueueEntries - atomic.LoadUint32(&q.start)) % maxContextQueueEntries
}

// add puts the given ctx onto the context queue, and records a state of
// the subprocess after insertion to see if there are more active stub threads
// or more waiting contexts.
func (q *contextQueue) add(ctx *sharedContext) *platform.ContextError {
	ctx.startWaitingTS = cputicks()

	if fastpath.stubFastPath() {
		q.enableFastPath()
	} else {
		q.disableFastPath()
	}
	contextID := ctx.contextID
	atomic.AddUint32(&q.numActiveContexts, 1)
	next := atomic.AddUint32(&q.end, 1)
	if (next % maxContextQueueEntries) ==
		(atomic.LoadUint32(&q.start) % maxContextQueueEntries) {
		// reachable only in case of corrupted memory
		return corruptedSharedMemoryErr("context queue is full, indicates tampering with queue counters")
	}
	idx := next - 1
	next = idx % maxContextQueueEntries
	v := (uint64(idx) << contextQueueIndexShift) + uint64(contextID)
	atomic.StoreUint64(&q.ringbuffer[next], v)

	if atomic.SwapUint32(&q.usedFastPath, 0) != 0 {
		fastpath.usedStubFastPath.Store(true)
	}
	return nil
}

func (q *contextQueue) disableFastPath() {
	atomic.StoreUint32(&q.fastPathDisabled, 1)
}

func (q *contextQueue) enableFastPath() {
	atomic.StoreUint32(&q.fastPathDisabled, 0)
}

func (q *contextQueue) fastPathEnabled() bool {
	return atomic.LoadUint32(&q.fastPathDisabled) == 0
}