1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
|
//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "RISCVMatInt.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "llvm/ADT/APInt.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) {
if (!HasRVC)
return Res.size();
int Cost = 0;
for (auto Instr : Res) {
bool Compressed;
switch (Instr.Opc) {
default:
llvm_unreachable("Unexpected opcode");
case RISCV::SLLI:
case RISCV::SRLI:
Compressed = true;
break;
case RISCV::ADDI:
case RISCV::ADDIW:
case RISCV::LUI:
Compressed = isInt<6>(Instr.Imm);
break;
case RISCV::ADD_UW:
Compressed = false;
break;
}
// Two RVC instructions take the same space as one RVI instruction, but
// can take longer to execute than the single RVI instruction. Thus, we
// consider that two RVC instruction are slightly more costly than one
// RVI instruction. For longer sequences of RVC instructions the space
// savings can be worth it, though. The costs below try to model that.
if (!Compressed)
Cost += 100; // Baseline cost of one RVI instruction: 100%.
else
Cost += 70; // 70% cost of baseline.
}
return Cost;
}
// Recursively generate a sequence for materializing an integer.
static void generateInstSeqImpl(int64_t Val,
const FeatureBitset &ActiveFeatures,
RISCVMatInt::InstSeq &Res) {
bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
if (isInt<32>(Val)) {
// Depending on the active bits in the immediate Value v, the following
// instruction sequences are emitted:
//
// v == 0 : ADDI
// v[0,12) != 0 && v[12,32) == 0 : ADDI
// v[0,12) == 0 && v[12,32) != 0 : LUI
// v[0,32) != 0 : LUI+ADDI(W)
int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
int64_t Lo12 = SignExtend64<12>(Val);
if (Hi20)
Res.push_back(RISCVMatInt::Inst(RISCV::LUI, Hi20));
if (Lo12 || Hi20 == 0) {
unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
}
return;
}
assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
// In the worst case, for a full 64-bit constant, a sequence of 8 instructions
// (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
// that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
// while the following ADDI instructions contribute up to 12 bits each.
//
// On the first glance, implementing this seems to be possible by simply
// emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
// shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
// fact that ADDI performs a sign extended addition, doing it like that would
// only be possible when at most 11 bits of the ADDI instructions are used.
// Using all 12 bits of the ADDI instructions, like done by GAS, actually
// requires that the constant is processed starting with the least significant
// bit.
//
// In the following, constants are processed from LSB to MSB but instruction
// emission is performed from MSB to LSB by recursively calling
// generateInstSeq. In each recursion, first the lowest 12 bits are removed
// from the constant and the optimal shift amount, which can be greater than
// 12 bits if the constant is sparse, is determined. Then, the shifted
// remaining constant is processed recursively and gets emitted as soon as it
// fits into 32 bits. The emission of the shifts and additions is subsequently
// performed when the recursion returns.
int64_t Lo12 = SignExtend64<12>(Val);
int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
// If the remaining bits don't fit in 12 bits, we might be able to reduce the
// shift amount in order to use LUI which will zero the lower 12 bits.
bool Unsigned = false;
if (ShiftAmount > 12 && !isInt<12>(Hi52)) {
if (isInt<32>((uint64_t)Hi52 << 12)) {
// Reduce the shift amount and add zeros to the LSBs so it will match LUI.
ShiftAmount -= 12;
Hi52 = (uint64_t)Hi52 << 12;
} else if (isUInt<32>((uint64_t)Hi52 << 12) &&
ActiveFeatures[RISCV::FeatureStdExtZba]) {
// Reduce the shift amount and add zeros to the LSBs so it will match
// LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
ShiftAmount -= 12;
Hi52 = ((uint64_t)Hi52 << 12) | (0xffffffffull << 32);
Unsigned = true;
}
}
// Try to use SLLI_UW for Hi52 when it is uint32 but not int32.
if (isUInt<32>((uint64_t)Hi52) && !isInt<32>((uint64_t)Hi52) &&
ActiveFeatures[RISCV::FeatureStdExtZba]) {
// Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
// SLLI_UW.
Hi52 = ((uint64_t)Hi52) | (0xffffffffull << 32);
Unsigned = true;
}
generateInstSeqImpl(Hi52, ActiveFeatures, Res);
if (Unsigned)
Res.push_back(RISCVMatInt::Inst(RISCV::SLLI_UW, ShiftAmount));
else
Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
if (Lo12)
Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
}
static unsigned extractRotateInfo(int64_t Val) {
// for case: 0b111..1..xxxxxx1..1..
unsigned LeadingOnes = countLeadingOnes((uint64_t)Val);
unsigned TrailingOnes = countTrailingOnes((uint64_t)Val);
if (TrailingOnes > 0 && TrailingOnes < 64 &&
(LeadingOnes + TrailingOnes) > (64 - 12))
return 64 - TrailingOnes;
// for case: 0bxxx1..1..1...xxx
unsigned UpperTrailingOnes = countTrailingOnes(Hi_32(Val));
unsigned LowerLeadingOnes = countLeadingOnes(Lo_32(Val));
if (UpperTrailingOnes < 32 &&
(UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
return 32 - UpperTrailingOnes;
return 0;
}
namespace llvm {
namespace RISCVMatInt {
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
RISCVMatInt::InstSeq Res;
generateInstSeqImpl(Val, ActiveFeatures, Res);
// If the constant is positive we might be able to generate a shifted constant
// with no leading zeros and use a final SRLI to restore them.
if (Val > 0 && Res.size() > 2) {
assert(ActiveFeatures[RISCV::Feature64Bit] &&
"Expected RV32 to only need 2 instructions");
unsigned LeadingZeros = countLeadingZeros((uint64_t)Val);
uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
// Fill in the bits that will be shifted out with 1s. An example where this
// helps is trailing one masks with 32 or more ones. This will generate
// ADDI -1 and an SRLI.
ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
RISCVMatInt::InstSeq TmpSeq;
generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
// Keep the new sequence if it is an improvement.
if (TmpSeq.size() < Res.size()) {
Res = TmpSeq;
// A 2 instruction sequence is the best we can do.
if (Res.size() <= 2)
return Res;
}
// Some cases can benefit from filling the lower bits with zeros instead.
ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
TmpSeq.clear();
generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
// Keep the new sequence if it is an improvement.
if (TmpSeq.size() < Res.size()) {
Res = TmpSeq;
// A 2 instruction sequence is the best we can do.
if (Res.size() <= 2)
return Res;
}
// If we have exactly 32 leading zeros and Zba, we can try using zext.w at
// the end of the sequence.
if (LeadingZeros == 32 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
// Try replacing upper bits with 1.
uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
TmpSeq.clear();
generateInstSeqImpl(LeadingOnesVal, ActiveFeatures, TmpSeq);
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADD_UW, 0));
// Keep the new sequence if it is an improvement.
if (TmpSeq.size() < Res.size()) {
Res = TmpSeq;
// A 2 instruction sequence is the best we can do.
if (Res.size() <= 2)
return Res;
}
}
}
// Perform optimization with BCLRI/BSETI in the Zbs extension.
if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbs]) {
assert(ActiveFeatures[RISCV::Feature64Bit] &&
"Expected RV32 to only need 2 instructions");
// 1. For values in range 0xffffffff 7fffffff ~ 0xffffffff 00000000,
// call generateInstSeqImpl with Val|0x80000000 (which is expected be
// an int32), then emit (BCLRI r, 31).
// 2. For values in range 0x80000000 ~ 0xffffffff, call generateInstSeqImpl
// with Val&~0x80000000 (which is expected to be an int32), then
// emit (BSETI r, 31).
int64_t NewVal;
unsigned Opc;
if (Val < 0) {
Opc = RISCV::BCLRI;
NewVal = Val | 0x80000000ll;
} else {
Opc = RISCV::BSETI;
NewVal = Val & ~0x80000000ll;
}
if (isInt<32>(NewVal)) {
RISCVMatInt::InstSeq TmpSeq;
generateInstSeqImpl(NewVal, ActiveFeatures, TmpSeq);
TmpSeq.push_back(RISCVMatInt::Inst(Opc, 31));
if (TmpSeq.size() < Res.size())
Res = TmpSeq;
}
// Try to use BCLRI for upper 32 bits if the original lower 32 bits are
// negative int32, or use BSETI for upper 32 bits if the original lower
// 32 bits are positive int32.
int32_t Lo = Val;
uint32_t Hi = Val >> 32;
Opc = 0;
RISCVMatInt::InstSeq TmpSeq;
generateInstSeqImpl(Lo, ActiveFeatures, TmpSeq);
// Check if it is profitable to use BCLRI/BSETI.
if (Lo > 0 && TmpSeq.size() + countPopulation(Hi) < Res.size()) {
Opc = RISCV::BSETI;
} else if (Lo < 0 && TmpSeq.size() + countPopulation(~Hi) < Res.size()) {
Opc = RISCV::BCLRI;
Hi = ~Hi;
}
// Search for each bit and build corresponding BCLRI/BSETI.
if (Opc > 0) {
while (Hi != 0) {
unsigned Bit = countTrailingZeros(Hi);
TmpSeq.push_back(RISCVMatInt::Inst(Opc, Bit + 32));
Hi &= ~(1 << Bit);
}
if (TmpSeq.size() < Res.size())
Res = TmpSeq;
}
}
// Perform optimization with SH*ADD in the Zba extension.
if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
assert(ActiveFeatures[RISCV::Feature64Bit] &&
"Expected RV32 to only need 2 instructions");
int64_t Div = 0;
unsigned Opc = 0;
RISCVMatInt::InstSeq TmpSeq;
// Select the opcode and divisor.
if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
Div = 3;
Opc = RISCV::SH1ADD;
} else if ((Val % 5) == 0 && isInt<32>(Val / 5)) {
Div = 5;
Opc = RISCV::SH2ADD;
} else if ((Val % 9) == 0 && isInt<32>(Val / 9)) {
Div = 9;
Opc = RISCV::SH3ADD;
}
// Build the new instruction sequence.
if (Div > 0) {
generateInstSeqImpl(Val / Div, ActiveFeatures, TmpSeq);
TmpSeq.push_back(RISCVMatInt::Inst(Opc, 0));
if (TmpSeq.size() < Res.size())
Res = TmpSeq;
} else {
// Try to use LUI+SH*ADD+ADDI.
int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull;
int64_t Lo12 = SignExtend64<12>(Val);
Div = 0;
if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
Div = 3;
Opc = RISCV::SH1ADD;
} else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
Div = 5;
Opc = RISCV::SH2ADD;
} else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
Div = 9;
Opc = RISCV::SH3ADD;
}
// Build the new instruction sequence.
if (Div > 0) {
// For Val that has zero Lo12 (implies Val equals to Hi52) should has
// already been processed to LUI+SH*ADD by previous optimization.
assert(Lo12 != 0 &&
"unexpected instruction sequence for immediate materialisation");
assert(TmpSeq.empty() && "Expected empty TmpSeq");
generateInstSeqImpl(Hi52 / Div, ActiveFeatures, TmpSeq);
TmpSeq.push_back(RISCVMatInt::Inst(Opc, 0));
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
if (TmpSeq.size() < Res.size())
Res = TmpSeq;
}
}
}
// Perform optimization with rori in the Zbb extension.
if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbb]) {
if (unsigned Rotate = extractRotateInfo(Val)) {
RISCVMatInt::InstSeq TmpSeq;
uint64_t NegImm12 =
((uint64_t)Val >> (64 - Rotate)) | ((uint64_t)Val << Rotate);
assert(isInt<12>(NegImm12));
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDI, NegImm12));
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::RORI, Rotate));
Res = TmpSeq;
}
}
return Res;
}
int getIntMatCost(const APInt &Val, unsigned Size,
const FeatureBitset &ActiveFeatures, bool CompressionCost) {
bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
bool HasRVC = CompressionCost && ActiveFeatures[RISCV::FeatureStdExtC];
int PlatRegSize = IsRV64 ? 64 : 32;
// Split the constant into platform register sized chunks, and calculate cost
// of each chunk.
int Cost = 0;
for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures);
Cost += getInstSeqCost(MatSeq, HasRVC);
}
return std::max(1, Cost);
}
} // namespace RISCVMatInt
} // namespace llvm
|