File: metric_unsafe.go

package info (click to toggle)
golang-gvisor-gvisor 0.0~20240729.0-5
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 21,276 kB
  • sloc: asm: 3,361; ansic: 1,197; cpp: 348; makefile: 92; python: 89; sh: 83
file content (56 lines) | stat: -rw-r--r-- 2,060 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package metric

import (
	"unsafe"

	"gvisor.dev/gvisor/pkg/atomicbitops"
	"gvisor.dev/gvisor/pkg/gohacks"
	"gvisor.dev/gvisor/pkg/sync"
)

// snapshotDistribution snapshots the sample data of distribution metrics in
// a non-consistent manner.
// Distribution metrics don't need to be read consistently, because any
// inconsistency (i.e. increments that race with the snapshot) will simply be
// detected during the next snapshot instead. Reading them consistently would
// require more synchronization during increments, which we need to be cheap.
func snapshotDistribution(samples []atomicbitops.Uint64) []uint64 {
	// The number of buckets within a distribution never changes, so there is
	// no race condition from getting the number of buckets upfront.
	numBuckets := len(samples)
	snapshot := make([]uint64, numBuckets)
	if sync.RaceEnabled {
		// runtime.RaceDisable() doesn't actually stop the race detector, so it
		// can't help us here. Instead, call runtime.memmove directly, which is
		// not instrumented by the race detector.
		gohacks.Memmove(unsafe.Pointer(&snapshot[0]), unsafe.Pointer(&samples[0]), unsafe.Sizeof(uint64(0))*uintptr(numBuckets))
	} else {
		for i := range samples {
			snapshot[i] = samples[i].RacyLoad()
		}
	}
	return snapshot
}

// CheapNowNano returns the a timestamp in nanoseconds.
// It is *NOT* measured from the Unix epoch.
// It is monotonic.
//
//go:nosplit
func CheapNowNano() int64 {
	return gohacks.Nanotime()
}