1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
|
; RUN: opt -S %s -atomic-expand | FileCheck %s
;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
;; instructions are not available.
;;; NOTE: this test is mostly target-independent -- any target which
;;; doesn't support cmpxchg of sub-word sizes would do.
target datalayout = "E-m:e-i64:64-n32:64-S128"
target triple = "sparcv9-unknown-unknown"
; CHECK-LABEL: @test_cmpxchg_i8(
; CHECK: fence seq_cst
; CHECK: %0 = ptrtoint i8* %arg to i64
; CHECK: %1 = and i64 %0, -4
; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
; CHECK: %PtrLSB = and i64 %0, 3
; CHECK: %2 = xor i64 %PtrLSB, 3
; CHECK: %3 = shl i64 %2, 3
; CHECK: %ShiftAmt = trunc i64 %3 to i32
; CHECK: %Mask = shl i32 255, %ShiftAmt
; CHECK: %Inv_Mask = xor i32 %Mask, -1
; CHECK: %4 = zext i8 %new to i32
; CHECK: %5 = shl i32 %4, %ShiftAmt
; CHECK: %6 = zext i8 %old to i32
; CHECK: %7 = shl i32 %6, %ShiftAmt
; CHECK: %8 = load i32, i32* %AlignedAddr
; CHECK: %9 = and i32 %8, %Inv_Mask
; CHECK: br label %partword.cmpxchg.loop
; CHECK:partword.cmpxchg.loop:
; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
; CHECK: %11 = or i32 %10, %5
; CHECK: %12 = or i32 %10, %7
; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
; CHECK: %14 = extractvalue { i32, i1 } %13, 0
; CHECK: %15 = extractvalue { i32, i1 } %13, 1
; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
; CHECK:partword.cmpxchg.failure:
; CHECK: %16 = and i32 %14, %Inv_Mask
; CHECK: %17 = icmp ne i32 %10, %16
; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
; CHECK:partword.cmpxchg.end:
; CHECK: %shifted = lshr i32 %14, %ShiftAmt
; CHECK: %extracted = trunc i32 %shifted to i8
; CHECK: %18 = insertvalue { i8, i1 } undef, i8 %extracted, 0
; CHECK: %19 = insertvalue { i8, i1 } %18, i1 %15, 1
; CHECK: fence seq_cst
; CHECK: %ret = extractvalue { i8, i1 } %19, 0
; CHECK: ret i8 %ret
define i8 @test_cmpxchg_i8(i8* %arg, i8 %old, i8 %new) {
entry:
%ret_succ = cmpxchg i8* %arg, i8 %old, i8 %new seq_cst monotonic
%ret = extractvalue { i8, i1 } %ret_succ, 0
ret i8 %ret
}
; CHECK-LABEL: @test_cmpxchg_i16(
; CHECK: fence seq_cst
; CHECK: %0 = ptrtoint i16* %arg to i64
; CHECK: %1 = and i64 %0, -4
; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
; CHECK: %PtrLSB = and i64 %0, 3
; CHECK: %2 = xor i64 %PtrLSB, 2
; CHECK: %3 = shl i64 %2, 3
; CHECK: %ShiftAmt = trunc i64 %3 to i32
; CHECK: %Mask = shl i32 65535, %ShiftAmt
; CHECK: %Inv_Mask = xor i32 %Mask, -1
; CHECK: %4 = zext i16 %new to i32
; CHECK: %5 = shl i32 %4, %ShiftAmt
; CHECK: %6 = zext i16 %old to i32
; CHECK: %7 = shl i32 %6, %ShiftAmt
; CHECK: %8 = load i32, i32* %AlignedAddr
; CHECK: %9 = and i32 %8, %Inv_Mask
; CHECK: br label %partword.cmpxchg.loop
; CHECK:partword.cmpxchg.loop:
; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
; CHECK: %11 = or i32 %10, %5
; CHECK: %12 = or i32 %10, %7
; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
; CHECK: %14 = extractvalue { i32, i1 } %13, 0
; CHECK: %15 = extractvalue { i32, i1 } %13, 1
; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
; CHECK:partword.cmpxchg.failure:
; CHECK: %16 = and i32 %14, %Inv_Mask
; CHECK: %17 = icmp ne i32 %10, %16
; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
; CHECK:partword.cmpxchg.end:
; CHECK: %shifted = lshr i32 %14, %ShiftAmt
; CHECK: %extracted = trunc i32 %shifted to i16
; CHECK: %18 = insertvalue { i16, i1 } undef, i16 %extracted, 0
; CHECK: %19 = insertvalue { i16, i1 } %18, i1 %15, 1
; CHECK: fence seq_cst
; CHECK: %ret = extractvalue { i16, i1 } %19, 0
; CHECK: ret i16 %ret
define i16 @test_cmpxchg_i16(i16* %arg, i16 %old, i16 %new) {
entry:
%ret_succ = cmpxchg i16* %arg, i16 %old, i16 %new seq_cst monotonic
%ret = extractvalue { i16, i1 } %ret_succ, 0
ret i16 %ret
}
; CHECK-LABEL: @test_add_i16(
; CHECK: fence seq_cst
; CHECK: %0 = ptrtoint i16* %arg to i64
; CHECK: %1 = and i64 %0, -4
; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
; CHECK: %PtrLSB = and i64 %0, 3
; CHECK: %2 = xor i64 %PtrLSB, 2
; CHECK: %3 = shl i64 %2, 3
; CHECK: %ShiftAmt = trunc i64 %3 to i32
; CHECK: %Mask = shl i32 65535, %ShiftAmt
; CHECK: %Inv_Mask = xor i32 %Mask, -1
; CHECK: %4 = zext i16 %val to i32
; CHECK: %ValOperand_Shifted = shl i32 %4, %ShiftAmt
; CHECK: %5 = load i32, i32* %AlignedAddr, align 4
; CHECK: br label %atomicrmw.start
; CHECK:atomicrmw.start:
; CHECK: %loaded = phi i32 [ %5, %entry ], [ %newloaded, %atomicrmw.start ]
; CHECK: %new = add i32 %loaded, %ValOperand_Shifted
; CHECK: %6 = and i32 %new, %Mask
; CHECK: %7 = and i32 %loaded, %Inv_Mask
; CHECK: %8 = or i32 %7, %6
; CHECK: %9 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %8 monotonic monotonic
; CHECK: %success = extractvalue { i32, i1 } %9, 1
; CHECK: %newloaded = extractvalue { i32, i1 } %9, 0
; CHECK: br i1 %success, label %atomicrmw.end, label %atomicrmw.start
; CHECK:atomicrmw.end:
; CHECK: %shifted = lshr i32 %newloaded, %ShiftAmt
; CHECK: %extracted = trunc i32 %shifted to i16
; CHECK: fence seq_cst
; CHECK: ret i16 %extracted
define i16 @test_add_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw add i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_xor_i16(
; (I'm going to just assert on the bits that differ from add, above.)
; CHECK:atomicrmw.start:
; CHECK: %new = xor i32 %loaded, %ValOperand_Shifted
; CHECK: %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
; CHECK:atomicrmw.end:
define i16 @test_xor_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw xor i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_or_i16(
; (I'm going to just assert on the bits that differ from add, above.)
; CHECK:atomicrmw.start:
; CHECK: %new = or i32 %loaded, %ValOperand_Shifted
; CHECK: %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
; CHECK:atomicrmw.end:
define i16 @test_or_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw or i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_and_i16(
; (I'm going to just assert on the bits that differ from add, above.)
; CHECK: %AndOperand = or i32 %Inv_Mask, %ValOperand_Shifted
; CHECK:atomicrmw.start:
; CHECK: %new = and i32 %loaded, %AndOperand
; CHECK: %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
; CHECK:atomicrmw.end:
define i16 @test_and_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw and i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_min_i16(
; CHECK:atomicrmw.start:
; CHECK: %shifted = lshr i32 %loaded, %ShiftAmt
; CHECK: %extracted = trunc i32 %shifted to i16
; CHECK: %6 = icmp sle i16 %extracted, %val
; CHECK: %new = select i1 %6, i16 %extracted, i16 %val
; CHECK: %extended = zext i16 %new to i32
; CHECK: %shifted1 = shl nuw i32 %extended, %ShiftAmt
; CHECK: %unmasked = and i32 %loaded, %Inv_Mask
; CHECK: %inserted = or i32 %unmasked, %shifted1
; CHECK: %7 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %inserted monotonic monotonic
; CHECK:atomicrmw.end:
define i16 @test_min_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw min i16* %arg, i16 %val seq_cst
ret i16 %ret
}
|