File: atomics.go

package info (click to toggle)
golang-1.24 1.24.4-4
  • links: PTS, VCS
  • area: main
  • in suites: experimental, forky, sid
  • size: 167,952 kB
  • sloc: asm: 154,901; ansic: 7,009; sh: 2,267; javascript: 1,705; perl: 1,052; python: 421; makefile: 110; cpp: 39; f90: 8; awk: 7; objc: 4
file content (95 lines) | stat: -rw-r--r-- 2,573 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// asmcheck

// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// These tests check that atomic instructions without dynamic checks are
// generated for architectures that support them

package codegen

import "sync/atomic"

type Counter struct {
	count int32
}

func (c *Counter) Increment() {
	// Check that ARm64 v8.0 has both atomic instruction (LDADDALW) and a dynamic check
	// (for arm64HasATOMICS), while ARM64 v8.1 has only atomic and no dynamic check.
	// arm64/v8.0:"LDADDALW"
	// arm64/v8.1:"LDADDALW"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// amd64:"LOCK",-"CMPXCHG"
	atomic.AddInt32(&c.count, 1)
}

func atomicLogical64(x *atomic.Uint64) uint64 {
	var r uint64

	// arm64/v8.0:"LDCLRALD"
	// arm64/v8.1:"LDCLRALD"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
	// amd64:"LOCK",-"CMPXCHGQ"
	x.And(11)
	// arm64/v8.0:"LDCLRALD"
	// arm64/v8.1:"LDCLRALD"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// amd64:"LOCK","CMPXCHGQ"
	r += x.And(22)

	// arm64/v8.0:"LDORALD"
	// arm64/v8.1:"LDORALD"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
	// amd64:"LOCK",-"CMPXCHGQ"
	x.Or(33)
	// arm64/v8.0:"LDORALD"
	// arm64/v8.1:"LDORALD"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// amd64:"LOCK","CMPXCHGQ"
	r += x.Or(44)

	return r
}

func atomicLogical32(x *atomic.Uint32) uint32 {
	var r uint32

	// arm64/v8.0:"LDCLRALW"
	// arm64/v8.1:"LDCLRALW"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
	// amd64:"LOCK",-"CMPXCHGL"
	x.And(11)
	// arm64/v8.0:"LDCLRALW"
	// arm64/v8.1:"LDCLRALW"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// amd64:"LOCK","CMPXCHGL"
	r += x.And(22)

	// arm64/v8.0:"LDORALW"
	// arm64/v8.1:"LDORALW"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
	// amd64:"LOCK",-"CMPXCHGL"
	x.Or(33)
	// arm64/v8.0:"LDORALW"
	// arm64/v8.1:"LDORALW"
	// arm64/v8.0:".*arm64HasATOMICS"
	// arm64/v8.1:-".*arm64HasATOMICS"
	// amd64:"LOCK","CMPXCHGL"
	r += x.Or(44)

	return r
}