File: atomic_idempotent.ll

package info (click to toggle)
llvm-toolchain-3.7 1%3A3.7.1-5
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 345,556 kB
  • ctags: 362,199
  • sloc: cpp: 2,156,381; ansic: 458,339; objc: 91,547; python: 89,988; asm: 86,305; sh: 21,479; makefile: 6,853; perl: 5,601; ml: 5,458; pascal: 3,933; lisp: 2,429; xml: 686; cs: 239; php: 202; csh: 117
file content (56 lines) | stat: -rw-r--r-- 1,386 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
; RUN: llc < %s -march=x86-64 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X64
; RUN: llc < %s -march=x86 -mattr=+sse2 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X32

; On x86, an atomic rmw operation that does not modify the value in memory
; (such as atomic add 0) can be replaced by an mfence followed by a mov.
; This is explained (with the motivation for such an optimization) in
; http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf

define i8 @add8(i8* %p) {
; CHECK-LABEL: add8
; CHECK: mfence
; CHECK: movb
  %1 = atomicrmw add i8* %p, i8 0 monotonic
  ret i8 %1
}

define i16 @or16(i16* %p) {
; CHECK-LABEL: or16
; CHECK: mfence
; CHECK: movw
  %1 = atomicrmw or i16* %p, i16 0 acquire
  ret i16 %1
}

define i32 @xor32(i32* %p) {
; CHECK-LABEL: xor32
; CHECK: mfence
; CHECK: movl
  %1 = atomicrmw xor i32* %p, i32 0 release
  ret i32 %1
}

define i64 @sub64(i64* %p) {
; CHECK-LABEL: sub64
; X64: mfence
; X64: movq
; X32-NOT: mfence
  %1 = atomicrmw sub i64* %p, i64 0 seq_cst
  ret i64 %1
}

define i128 @or128(i128* %p) {
; CHECK-LABEL: or128
; CHECK-NOT: mfence
  %1 = atomicrmw or i128* %p, i128 0 monotonic
  ret i128 %1
}

; For 'and', the idempotent value is (-1)
define i32 @and32 (i32* %p) {
; CHECK-LABEL: and32
; CHECK: mfence
; CHECK: movl
  %1 = atomicrmw and i32* %p, i32 -1 acq_rel
  ret i32 %1
}