1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
|
//===- AArch64GlobalISelUtils.cpp --------------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file Implementations of AArch64-specific helper functions used in the
/// GlobalISel pipeline.
//===----------------------------------------------------------------------===//
#include "AArch64GlobalISelUtils.h"
#include "AArch64InstrInfo.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
Optional<RegOrConstant>
AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
if (auto Splat = getVectorSplat(MI, MRI))
return Splat;
if (MI.getOpcode() != AArch64::G_DUP)
return None;
Register Src = MI.getOperand(1).getReg();
if (auto ValAndVReg =
getAnyConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI))
return RegOrConstant(ValAndVReg->Value.getSExtValue());
return RegOrConstant(Src);
}
Optional<int64_t>
AArch64GISelUtils::getAArch64VectorSplatScalar(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
auto Splat = getAArch64VectorSplat(MI, MRI);
if (!Splat || Splat->isReg())
return None;
return Splat->getCst();
}
bool AArch64GISelUtils::isCMN(const MachineInstr *MaybeSub,
const CmpInst::Predicate &Pred,
const MachineRegisterInfo &MRI) {
// Match:
//
// %sub = G_SUB 0, %y
// %cmp = G_ICMP eq/ne, %sub, %z
//
// Or
//
// %sub = G_SUB 0, %y
// %cmp = G_ICMP eq/ne, %z, %sub
if (!MaybeSub || MaybeSub->getOpcode() != TargetOpcode::G_SUB ||
!CmpInst::isEquality(Pred))
return false;
auto MaybeZero =
getIConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI);
return MaybeZero && MaybeZero->Value.getZExtValue() == 0;
}
bool AArch64GISelUtils::tryEmitBZero(MachineInstr &MI,
MachineIRBuilder &MIRBuilder,
bool MinSize) {
assert(MI.getOpcode() == TargetOpcode::G_MEMSET);
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
if (!TLI.getLibcallName(RTLIB::BZERO))
return false;
auto Zero =
getIConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI);
if (!Zero || Zero->Value.getSExtValue() != 0)
return false;
// It's not faster to use bzero rather than memset for sizes <= 256.
// However, it *does* save us a mov from wzr, so if we're going for
// minsize, use bzero even if it's slower.
if (!MinSize) {
// If the size is known, check it. If it is not known, assume using bzero is
// better.
if (auto Size = getIConstantVRegValWithLookThrough(
MI.getOperand(2).getReg(), MRI)) {
if (Size->Value.getSExtValue() <= 256)
return false;
}
}
MIRBuilder.setInstrAndDebugLoc(MI);
MIRBuilder
.buildInstr(TargetOpcode::G_BZERO, {},
{MI.getOperand(0), MI.getOperand(2)})
.addImm(MI.getOperand(3).getImm())
.addMemOperand(*MI.memoperands_begin());
MI.eraseFromParent();
return true;
}
void AArch64GISelUtils::changeFCMPPredToAArch64CC(
const CmpInst::Predicate P, AArch64CC::CondCode &CondCode,
AArch64CC::CondCode &CondCode2) {
CondCode2 = AArch64CC::AL;
switch (P) {
default:
llvm_unreachable("Unknown FP condition!");
case CmpInst::FCMP_OEQ:
CondCode = AArch64CC::EQ;
break;
case CmpInst::FCMP_OGT:
CondCode = AArch64CC::GT;
break;
case CmpInst::FCMP_OGE:
CondCode = AArch64CC::GE;
break;
case CmpInst::FCMP_OLT:
CondCode = AArch64CC::MI;
break;
case CmpInst::FCMP_OLE:
CondCode = AArch64CC::LS;
break;
case CmpInst::FCMP_ONE:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GT;
break;
case CmpInst::FCMP_ORD:
CondCode = AArch64CC::VC;
break;
case CmpInst::FCMP_UNO:
CondCode = AArch64CC::VS;
break;
case CmpInst::FCMP_UEQ:
CondCode = AArch64CC::EQ;
CondCode2 = AArch64CC::VS;
break;
case CmpInst::FCMP_UGT:
CondCode = AArch64CC::HI;
break;
case CmpInst::FCMP_UGE:
CondCode = AArch64CC::PL;
break;
case CmpInst::FCMP_ULT:
CondCode = AArch64CC::LT;
break;
case CmpInst::FCMP_ULE:
CondCode = AArch64CC::LE;
break;
case CmpInst::FCMP_UNE:
CondCode = AArch64CC::NE;
break;
}
}
void AArch64GISelUtils::changeVectorFCMPPredToAArch64CC(
const CmpInst::Predicate P, AArch64CC::CondCode &CondCode,
AArch64CC::CondCode &CondCode2, bool &Invert) {
Invert = false;
switch (P) {
default:
// Mostly the scalar mappings work fine.
changeFCMPPredToAArch64CC(P, CondCode, CondCode2);
break;
case CmpInst::FCMP_UNO:
Invert = true;
LLVM_FALLTHROUGH;
case CmpInst::FCMP_ORD:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GE;
break;
case CmpInst::FCMP_UEQ:
case CmpInst::FCMP_ULT:
case CmpInst::FCMP_ULE:
case CmpInst::FCMP_UGT:
case CmpInst::FCMP_UGE:
// All of the compare-mask comparisons are ordered, but we can switch
// between the two by a double inversion. E.g. ULE == !OGT.
Invert = true;
changeFCMPPredToAArch64CC(CmpInst::getInversePredicate(P), CondCode,
CondCode2);
break;
}
}
|