1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
|
//===-- GCNPreRAOptimizations.cpp -----------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This pass combines split register tuple initialization into a single pseudo:
///
/// undef %0.sub1:sreg_64 = S_MOV_B32 1
/// %0.sub0:sreg_64 = S_MOV_B32 2
/// =>
/// %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 0x200000001
///
/// This is to allow rematerialization of a value instead of spilling. It is
/// supposed to be done after register coalescer to allow it to do its job and
/// before actual register allocation to allow rematerialization.
///
/// Right now the pass only handles 64 bit SGPRs with immediate initializers,
/// although the same shall be possible with other register classes and
/// instructions if necessary.
///
/// This pass also adds register allocation hints to COPY.
/// The hints will be post-processed by SIRegisterInfo::getRegAllocationHints.
/// When using True16, we often see COPY moving a 16-bit value between a VGPR_32
/// and a VGPR_16. If we use the VGPR_16 that corresponds to the lo16 bits of
/// the VGPR_32, the COPY can be completely eliminated.
///
//===----------------------------------------------------------------------===//
#include "GCNPreRAOptimizations.h"
#include "AMDGPU.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIRegisterInfo.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/InitializePasses.h"
using namespace llvm;
#define DEBUG_TYPE "amdgpu-pre-ra-optimizations"
namespace {
class GCNPreRAOptimizationsImpl {
private:
const SIInstrInfo *TII;
const SIRegisterInfo *TRI;
MachineRegisterInfo *MRI;
LiveIntervals *LIS;
bool processReg(Register Reg);
public:
GCNPreRAOptimizationsImpl(LiveIntervals *LS) : LIS(LS) {}
bool run(MachineFunction &MF);
};
class GCNPreRAOptimizationsLegacy : public MachineFunctionPass {
public:
static char ID;
GCNPreRAOptimizationsLegacy() : MachineFunctionPass(ID) {
initializeGCNPreRAOptimizationsLegacyPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
StringRef getPassName() const override {
return "AMDGPU Pre-RA optimizations";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LiveIntervalsWrapperPass>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
} // End anonymous namespace.
INITIALIZE_PASS_BEGIN(GCNPreRAOptimizationsLegacy, DEBUG_TYPE,
"AMDGPU Pre-RA optimizations", false, false)
INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass)
INITIALIZE_PASS_END(GCNPreRAOptimizationsLegacy, DEBUG_TYPE,
"Pre-RA optimizations", false, false)
char GCNPreRAOptimizationsLegacy::ID = 0;
char &llvm::GCNPreRAOptimizationsID = GCNPreRAOptimizationsLegacy::ID;
FunctionPass *llvm::createGCNPreRAOptimizationsLegacyPass() {
return new GCNPreRAOptimizationsLegacy();
}
bool GCNPreRAOptimizationsImpl::processReg(Register Reg) {
MachineInstr *Def0 = nullptr;
MachineInstr *Def1 = nullptr;
uint64_t Init = 0;
bool Changed = false;
SmallSet<Register, 32> ModifiedRegs;
bool IsAGPRDst = TRI->isAGPRClass(MRI->getRegClass(Reg));
for (MachineInstr &I : MRI->def_instructions(Reg)) {
switch (I.getOpcode()) {
default:
return false;
case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
break;
case AMDGPU::COPY: {
// Some subtargets cannot do an AGPR to AGPR copy directly, and need an
// intermdiate temporary VGPR register. Try to find the defining
// accvgpr_write to avoid temporary registers.
if (!IsAGPRDst)
return false;
Register SrcReg = I.getOperand(1).getReg();
if (!SrcReg.isVirtual())
break;
// Check if source of copy is from another AGPR.
bool IsAGPRSrc = TRI->isAGPRClass(MRI->getRegClass(SrcReg));
if (!IsAGPRSrc)
break;
// def_instructions() does not look at subregs so it may give us a
// different instruction that defines the same vreg but different subreg
// so we have to manually check subreg.
Register SrcSubReg = I.getOperand(1).getSubReg();
for (auto &Def : MRI->def_instructions(SrcReg)) {
if (SrcSubReg != Def.getOperand(0).getSubReg())
continue;
if (Def.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
MachineOperand DefSrcMO = Def.getOperand(1);
// Immediates are not an issue and can be propagated in
// postrapseudos pass. Only handle cases where defining
// accvgpr_write source is a vreg.
if (DefSrcMO.isReg() && DefSrcMO.getReg().isVirtual()) {
// Propagate source reg of accvgpr write to this copy instruction
I.getOperand(1).setReg(DefSrcMO.getReg());
I.getOperand(1).setSubReg(DefSrcMO.getSubReg());
// Reg uses were changed, collect unique set of registers to update
// live intervals at the end.
ModifiedRegs.insert(DefSrcMO.getReg());
ModifiedRegs.insert(SrcReg);
Changed = true;
}
// Found the defining accvgpr_write, stop looking any further.
break;
}
}
break;
}
case AMDGPU::S_MOV_B32:
if (I.getOperand(0).getReg() != Reg || !I.getOperand(1).isImm() ||
I.getNumOperands() != 2)
return false;
switch (I.getOperand(0).getSubReg()) {
default:
return false;
case AMDGPU::sub0:
if (Def0)
return false;
Def0 = &I;
Init |= Lo_32(I.getOperand(1).getImm());
break;
case AMDGPU::sub1:
if (Def1)
return false;
Def1 = &I;
Init |= static_cast<uint64_t>(I.getOperand(1).getImm()) << 32;
break;
}
break;
}
}
// For AGPR reg, check if live intervals need to be updated.
if (IsAGPRDst) {
if (Changed) {
for (Register RegToUpdate : ModifiedRegs) {
LIS->removeInterval(RegToUpdate);
LIS->createAndComputeVirtRegInterval(RegToUpdate);
}
}
return Changed;
}
// For SGPR reg, check if we can combine instructions.
if (!Def0 || !Def1 || Def0->getParent() != Def1->getParent())
return Changed;
LLVM_DEBUG(dbgs() << "Combining:\n " << *Def0 << " " << *Def1
<< " =>\n");
if (SlotIndex::isEarlierInstr(LIS->getInstructionIndex(*Def1),
LIS->getInstructionIndex(*Def0)))
std::swap(Def0, Def1);
LIS->RemoveMachineInstrFromMaps(*Def0);
LIS->RemoveMachineInstrFromMaps(*Def1);
auto NewI = BuildMI(*Def0->getParent(), *Def0, Def0->getDebugLoc(),
TII->get(AMDGPU::S_MOV_B64_IMM_PSEUDO), Reg)
.addImm(Init);
Def0->eraseFromParent();
Def1->eraseFromParent();
LIS->InsertMachineInstrInMaps(*NewI);
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
LLVM_DEBUG(dbgs() << " " << *NewI);
return true;
}
bool GCNPreRAOptimizationsLegacy::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
LiveIntervals *LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
return GCNPreRAOptimizationsImpl(LIS).run(MF);
}
PreservedAnalyses
GCNPreRAOptimizationsPass::run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
LiveIntervals *LIS = &MFAM.getResult<LiveIntervalsAnalysis>(MF);
GCNPreRAOptimizationsImpl(LIS).run(MF);
return PreservedAnalyses::all();
}
bool GCNPreRAOptimizationsImpl::run(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
TII = ST.getInstrInfo();
MRI = &MF.getRegInfo();
TRI = ST.getRegisterInfo();
bool Changed = false;
for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
Register Reg = Register::index2VirtReg(I);
if (!LIS->hasInterval(Reg))
continue;
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
if ((RC->MC->getSizeInBits() != 64 || !TRI->isSGPRClass(RC)) &&
(ST.hasGFX90AInsts() || !TRI->isAGPRClass(RC)))
continue;
Changed |= processReg(Reg);
}
if (!ST.useRealTrue16Insts())
return Changed;
// Add RA hints to improve True16 COPY elimination.
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
if (MI.getOpcode() != AMDGPU::COPY)
continue;
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
if (Dst.isVirtual() &&
MRI->getRegClass(Dst) == &AMDGPU::VGPR_16RegClass &&
Src.isPhysical() &&
TRI->getRegClassForReg(*MRI, Src) == &AMDGPU::VGPR_32RegClass)
MRI->setRegAllocationHint(Dst, 0, TRI->getSubReg(Src, AMDGPU::lo16));
if (Src.isVirtual() &&
MRI->getRegClass(Src) == &AMDGPU::VGPR_16RegClass &&
Dst.isPhysical() &&
TRI->getRegClassForReg(*MRI, Dst) == &AMDGPU::VGPR_32RegClass)
MRI->setRegAllocationHint(Src, 0, TRI->getSubReg(Dst, AMDGPU::lo16));
if (!Dst.isVirtual() || !Src.isVirtual())
continue;
if (MRI->getRegClass(Dst) == &AMDGPU::VGPR_32RegClass &&
MRI->getRegClass(Src) == &AMDGPU::VGPR_16RegClass) {
MRI->setRegAllocationHint(Dst, AMDGPURI::Size32, Src);
MRI->setRegAllocationHint(Src, AMDGPURI::Size16, Dst);
}
if (MRI->getRegClass(Dst) == &AMDGPU::VGPR_16RegClass &&
MRI->getRegClass(Src) == &AMDGPU::VGPR_32RegClass)
MRI->setRegAllocationHint(Dst, AMDGPURI::Size16, Src);
}
}
return Changed;
}
|