1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
|
package seccheck
import (
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/sync"
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
// with any writer critical sections in seq.
//
//go:nosplit
func SeqAtomicLoadSinkSlice(seq *sync.SeqCount, ptr *[]Sink) []Sink {
for {
if val, ok := SeqAtomicTryLoadSinkSlice(seq, seq.BeginRead(), ptr); ok {
return val
}
}
}
// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
// read would race with a writer critical section, SeqAtomicTryLoad returns
// (unspecified, false).
//
//go:nosplit
func SeqAtomicTryLoadSinkSlice(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *[]Sink) (val []Sink, ok bool) {
if sync.RaceEnabled {
gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
}
ok = seq.ReadOk(epoch)
return
}
// SeqAtomicStore sets *ptr to a copy of val, ensuring that any racing reader
// critical sections are forced to retry.
//
//go:nosplit
func SeqAtomicStoreSinkSlice(seq *sync.SeqCount, ptr *[]Sink, val []Sink) {
seq.BeginWrite()
SeqAtomicStoreSeqedSinkSlice(ptr, val)
seq.EndWrite()
}
// SeqAtomicStoreSeqed sets *ptr to a copy of val.
//
// Preconditions: ptr is protected by a SeqCount that will be in a writer
// critical section throughout the call to SeqAtomicStore.
//
//go:nosplit
func SeqAtomicStoreSeqedSinkSlice(ptr *[]Sink, val []Sink) {
if sync.RaceEnabled {
gohacks.Memmove(unsafe.Pointer(ptr), unsafe.Pointer(&val), unsafe.Sizeof(val))
} else {
*ptr = val
}
}
|