1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
|
//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetAsmBackend.h"
#include "X86.h"
#include "X86FixupKinds.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/ELFObjectWriter.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MachObjectWriter.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
static unsigned getFixupKindLog2Size(unsigned Kind) {
switch (Kind) {
default: assert(0 && "invalid fixup kind!");
case X86::reloc_pcrel_1byte:
case FK_Data_1: return 0;
case X86::reloc_pcrel_2byte:
case FK_Data_2: return 1;
case X86::reloc_pcrel_4byte:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
case FK_Data_4: return 2;
case FK_Data_8: return 3;
}
}
namespace {
class X86AsmBackend : public TargetAsmBackend {
public:
X86AsmBackend(const Target &T)
: TargetAsmBackend(T) {}
void ApplyFixup(const MCFixup &Fixup, MCDataFragment &DF,
uint64_t Value) const {
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
assert(Fixup.getOffset() + Size <= DF.getContents().size() &&
"Invalid fixup offset!");
for (unsigned i = 0; i != Size; ++i)
DF.getContents()[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
}
bool MayNeedRelaxation(const MCInst &Inst) const;
void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
};
} // end anonymous namespace
static unsigned getRelaxedOpcode(unsigned Op) {
switch (Op) {
default:
return Op;
case X86::JAE_1: return X86::JAE_4;
case X86::JA_1: return X86::JA_4;
case X86::JBE_1: return X86::JBE_4;
case X86::JB_1: return X86::JB_4;
case X86::JE_1: return X86::JE_4;
case X86::JGE_1: return X86::JGE_4;
case X86::JG_1: return X86::JG_4;
case X86::JLE_1: return X86::JLE_4;
case X86::JL_1: return X86::JL_4;
case X86::JMP_1: return X86::JMP_4;
case X86::JNE_1: return X86::JNE_4;
case X86::JNO_1: return X86::JNO_4;
case X86::JNP_1: return X86::JNP_4;
case X86::JNS_1: return X86::JNS_4;
case X86::JO_1: return X86::JO_4;
case X86::JP_1: return X86::JP_4;
case X86::JS_1: return X86::JS_4;
}
}
bool X86AsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
// Check if this instruction is ever relaxable.
if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode())
return false;
// If so, just assume it can be relaxed. Once we support relaxing more complex
// instructions we should check that the instruction actually has symbolic
// operands before doing this, but we need to be careful about things like
// PCrel.
return true;
}
// FIXME: Can tblgen help at all here to verify there aren't other instructions
// we can relax?
void X86AsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
if (RelaxedOp == Inst.getOpcode()) {
SmallString<256> Tmp;
raw_svector_ostream OS(Tmp);
Inst.dump_pretty(OS);
OS << "\n";
report_fatal_error("unexpected instruction to relax: " + OS.str());
}
Res = Inst;
Res.setOpcode(RelaxedOp);
}
/// WriteNopData - Write optimal nops to the output file for the \arg Count
/// bytes. This returns the number of bytes written. It may return 0 if
/// the \arg Count is more than the maximum optimal nops.
///
/// FIXME this is X86 32-bit specific and should move to a better place.
bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
static const uint8_t Nops[16][16] = {
// nop
{0x90},
// xchg %ax,%ax
{0x66, 0x90},
// nopl (%[re]ax)
{0x0f, 0x1f, 0x00},
// nopl 0(%[re]ax)
{0x0f, 0x1f, 0x40, 0x00},
// nopl 0(%[re]ax,%[re]ax,1)
{0x0f, 0x1f, 0x44, 0x00, 0x00},
// nopw 0(%[re]ax,%[re]ax,1)
{0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
// nopl 0L(%[re]ax)
{0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
// nopl 0L(%[re]ax,%[re]ax,1)
{0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
// nopw 0L(%[re]ax,%[re]ax,1)
{0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
// nopw %cs:0L(%[re]ax,%[re]ax,1)
{0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
// nopl 0(%[re]ax,%[re]ax,1)
// nopw 0(%[re]ax,%[re]ax,1)
{0x0f, 0x1f, 0x44, 0x00, 0x00,
0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
// nopw 0(%[re]ax,%[re]ax,1)
// nopw 0(%[re]ax,%[re]ax,1)
{0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
// nopw 0(%[re]ax,%[re]ax,1)
// nopl 0L(%[re]ax) */
{0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
// nopl 0L(%[re]ax)
// nopl 0L(%[re]ax)
{0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
// nopl 0L(%[re]ax)
// nopl 0L(%[re]ax,%[re]ax,1)
{0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}
};
// Write an optimal sequence for the first 15 bytes.
uint64_t OptimalCount = (Count < 16) ? Count : 15;
for (uint64_t i = 0, e = OptimalCount; i != e; i++)
OW->Write8(Nops[OptimalCount - 1][i]);
// Finish with single byte nops.
for (uint64_t i = OptimalCount, e = Count; i != e; ++i)
OW->Write8(0x90);
return true;
}
/* *** */
namespace {
class ELFX86AsmBackend : public X86AsmBackend {
public:
ELFX86AsmBackend(const Target &T)
: X86AsmBackend(T) {
HasAbsolutizedSet = true;
HasScatteredSymbols = true;
}
bool isVirtualSection(const MCSection &Section) const {
const MCSectionELF &SE = static_cast<const MCSectionELF&>(Section);
return SE.getType() == MCSectionELF::SHT_NOBITS;;
}
};
class ELFX86_32AsmBackend : public ELFX86AsmBackend {
public:
ELFX86_32AsmBackend(const Target &T)
: ELFX86AsmBackend(T) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return new ELFObjectWriter(OS, /*Is64Bit=*/false,
/*IsLittleEndian=*/true,
/*HasRelocationAddend=*/false);
}
};
class ELFX86_64AsmBackend : public ELFX86AsmBackend {
public:
ELFX86_64AsmBackend(const Target &T)
: ELFX86AsmBackend(T) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return new ELFObjectWriter(OS, /*Is64Bit=*/true,
/*IsLittleEndian=*/true,
/*HasRelocationAddend=*/true);
}
};
class WindowsX86AsmBackend : public X86AsmBackend {
bool Is64Bit;
public:
WindowsX86AsmBackend(const Target &T, bool is64Bit)
: X86AsmBackend(T)
, Is64Bit(is64Bit) {
HasScatteredSymbols = true;
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return createWinCOFFObjectWriter(OS, Is64Bit);
}
bool isVirtualSection(const MCSection &Section) const {
const MCSectionCOFF &SE = static_cast<const MCSectionCOFF&>(Section);
return SE.getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
}
};
class DarwinX86AsmBackend : public X86AsmBackend {
public:
DarwinX86AsmBackend(const Target &T)
: X86AsmBackend(T) {
HasAbsolutizedSet = true;
HasScatteredSymbols = true;
}
bool isVirtualSection(const MCSection &Section) const {
const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
return (SMO.getType() == MCSectionMachO::S_ZEROFILL ||
SMO.getType() == MCSectionMachO::S_GB_ZEROFILL ||
SMO.getType() == MCSectionMachO::S_THREAD_LOCAL_ZEROFILL);
}
};
class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
public:
DarwinX86_32AsmBackend(const Target &T)
: DarwinX86AsmBackend(T) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return new MachObjectWriter(OS, /*Is64Bit=*/false);
}
};
class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
public:
DarwinX86_64AsmBackend(const Target &T)
: DarwinX86AsmBackend(T) {
HasReliableSymbolDifference = true;
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return new MachObjectWriter(OS, /*Is64Bit=*/true);
}
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
// Temporary labels in the string literals sections require symbols. The
// issue is that the x86_64 relocation format does not allow symbol +
// offset, and so the linker does not have enough information to resolve the
// access to the appropriate atom unless an external relocation is used. For
// non-cstring sections, we expect the compiler to use a non-temporary label
// for anything that could have an addend pointing outside the symbol.
//
// See <rdar://problem/4765733>.
const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
}
virtual bool isSectionAtomizable(const MCSection &Section) const {
const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
// Fixed sized data sections are uniqued, they cannot be diced into atoms.
switch (SMO.getType()) {
default:
return true;
case MCSectionMachO::S_4BYTE_LITERALS:
case MCSectionMachO::S_8BYTE_LITERALS:
case MCSectionMachO::S_16BYTE_LITERALS:
case MCSectionMachO::S_LITERAL_POINTERS:
case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
case MCSectionMachO::S_INTERPOSING:
return false;
}
}
};
} // end anonymous namespace
TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
const std::string &TT) {
switch (Triple(TT).getOS()) {
case Triple::Darwin:
return new DarwinX86_32AsmBackend(T);
case Triple::MinGW32:
case Triple::Cygwin:
case Triple::Win32:
return new WindowsX86AsmBackend(T, false);
default:
return new ELFX86_32AsmBackend(T);
}
}
TargetAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
const std::string &TT) {
switch (Triple(TT).getOS()) {
case Triple::Darwin:
return new DarwinX86_64AsmBackend(T);
case Triple::MinGW64:
case Triple::Cygwin:
case Triple::Win32:
return new WindowsX86AsmBackend(T, true);
default:
return new ELFX86_64AsmBackend(T);
}
}
|