1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
|
//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the RISC-V instructions from the standard 'A', Atomic
// Instructions extension.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Instruction class templates
//===----------------------------------------------------------------------===//
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
: RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
(outs GPR:$rd), (ins GPR:$rs1),
opcodestr, "$rd, (${rs1})"> {
let rs2 = 0;
}
multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
def "" : LR_r<0, 0, funct3, opcodestr>;
def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
}
let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
: RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
opcodestr, "$rd, $rs2, (${rs1})">;
multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
}
multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
def : Pat<(StoreOp GPR:$rs1, StTy:$rs2), (Inst StTy:$rs2, GPR:$rs1, 0)>;
def : Pat<(StoreOp AddrFI:$rs1, StTy:$rs2), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
def : Pat<(StoreOp (add GPR:$rs1, simm12:$imm12), StTy:$rs2),
(Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
def : Pat<(StoreOp (add AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
(Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
(Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtA] in {
defm LR_W : LR_r_aq_rl<0b010, "lr.w">;
defm SC_W : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">;
defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">;
defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">;
defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">;
defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">;
defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">;
defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">;
defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">;
defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">;
defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">;
} // Predicates = [HasStdExtA]
let Predicates = [HasStdExtA, IsRV64] in {
defm LR_D : LR_r_aq_rl<0b011, "lr.d">;
defm SC_D : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">;
defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">;
defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">;
defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">;
defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">;
defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">;
defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">;
defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">;
defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">;
defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">;
} // Predicates = [HasStdExtA, IsRV64]
//===----------------------------------------------------------------------===//
// Pseudo-instructions and codegen patterns
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtA] in {
/// Atomic loads and stores
// Fences will be inserted for atomic load/stores according to the logic in
// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
defm : LdPat<atomic_load_8, LB>;
defm : LdPat<atomic_load_16, LH>;
defm : LdPat<atomic_load_32, LW>;
defm : AtomicStPat<atomic_store_8, SB, GPR>;
defm : AtomicStPat<atomic_store_16, SH, GPR>;
defm : AtomicStPat<atomic_store_32, SW, GPR>;
/// AMOs
multiclass AMOPat<string AtomicOp, string BaseInst> {
def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
!cast<RVInst>(BaseInst)>;
def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
!cast<RVInst>(BaseInst#"_AQ")>;
def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
!cast<RVInst>(BaseInst#"_RL")>;
def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
!cast<RVInst>(BaseInst#"_AQ_RL")>;
def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
!cast<RVInst>(BaseInst#"_AQ_RL")>;
}
defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
(AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
(AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
(AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
(AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
(AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
/// Pseudo AMOs
class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
(ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 0;
}
def PseudoAtomicLoadNand32 : PseudoAMO;
// Ordering constants must be kept in sync with the AtomicOrdering enum in
// AtomicOrdering.h.
def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
class PseudoMaskedAMO
: Pseudo<(outs GPR:$res, GPR:$scratch),
(ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 0;
}
class PseudoMaskedAMOMinMax
: Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
(ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
"@earlyclobber $scratch2";
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 0;
}
class PseudoMaskedAMOUMinUMax
: Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
(ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
"@earlyclobber $scratch2";
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 0;
}
class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
: Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering),
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering)>;
class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
: Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
imm:$ordering),
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
imm:$ordering)>;
def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
PseudoMaskedAtomicSwap32>;
def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
PseudoMaskedAtomicLoadAdd32>;
def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
PseudoMaskedAtomicLoadSub32>;
def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
PseudoMaskedAtomicLoadNand32>;
def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
PseudoMaskedAtomicLoadMax32>;
def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
PseudoMaskedAtomicLoadMin32>;
def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
PseudoMaskedAtomicLoadUMax32>;
def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
PseudoMaskedAtomicLoadUMin32>;
/// Compare and exchange
class PseudoCmpXchg
: Pseudo<(outs GPR:$res, GPR:$scratch),
(ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 0;
}
// Ordering constants must be kept in sync with the AtomicOrdering enum in
// AtomicOrdering.h.
multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
(CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
(CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
(CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
(CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
(CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
}
def PseudoCmpXchg32 : PseudoCmpXchg;
defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
def PseudoMaskedCmpXchg32
: Pseudo<(outs GPR:$res, GPR:$scratch),
(ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 0;
}
def : Pat<(int_riscv_masked_cmpxchg_i32
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
(PseudoMaskedCmpXchg32
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
} // Predicates = [HasStdExtA]
let Predicates = [HasStdExtA, IsRV64] in {
/// 64-bit atomic loads and stores
// Fences will be inserted for atomic load/stores according to the logic in
// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
defm : LdPat<atomic_load_64, LD>;
defm : AtomicStPat<atomic_store_64, SD, GPR>;
defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
/// 64-bit AMOs
def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr),
(AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr),
(AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr),
(AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr),
(AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr),
(AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
/// 64-bit pseudo AMOs
def PseudoAtomicLoadNand64 : PseudoAMO;
// Ordering constants must be kept in sync with the AtomicOrdering enum in
// AtomicOrdering.h.
def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
PseudoMaskedAtomicSwap32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
PseudoMaskedAtomicLoadAdd32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
PseudoMaskedAtomicLoadSub32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
PseudoMaskedAtomicLoadNand32>;
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
PseudoMaskedAtomicLoadMax32>;
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
PseudoMaskedAtomicLoadMin32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
PseudoMaskedAtomicLoadUMax32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
PseudoMaskedAtomicLoadUMin32>;
/// 64-bit compare and exchange
def PseudoCmpXchg64 : PseudoCmpXchg;
defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
def : Pat<(int_riscv_masked_cmpxchg_i64
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
(PseudoMaskedCmpXchg32
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
} // Predicates = [HasStdExtA, IsRV64]
|