1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
|
#include "cpuid.h"
#include "sanitizer_common/sanitizer_common.h"
#if !SANITIZER_FUCHSIA
#include "sanitizer_common/sanitizer_posix.h"
#endif
#include "xray_defs.h"
#include "xray_interface_internal.h"
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
#include <sys/types.h>
#include <sys/sysctl.h>
#elif SANITIZER_FUCHSIA
#include <zircon/syscalls.h>
#endif
#include <atomic>
#include <cstdint>
#include <errno.h>
#include <fcntl.h>
#include <iterator>
#include <limits>
#include <tuple>
#include <unistd.h>
namespace __xray {
#if SANITIZER_LINUX
static std::pair<ssize_t, bool>
retryingReadSome(int Fd, char *Begin, char *End) XRAY_NEVER_INSTRUMENT {
auto BytesToRead = std::distance(Begin, End);
ssize_t BytesRead;
ssize_t TotalBytesRead = 0;
while (BytesToRead && (BytesRead = read(Fd, Begin, BytesToRead))) {
if (BytesRead == -1) {
if (errno == EINTR)
continue;
Report("Read error; errno = %d\n", errno);
return std::make_pair(TotalBytesRead, false);
}
TotalBytesRead += BytesRead;
BytesToRead -= BytesRead;
Begin += BytesRead;
}
return std::make_pair(TotalBytesRead, true);
}
static bool readValueFromFile(const char *Filename,
long long *Value) XRAY_NEVER_INSTRUMENT {
int Fd = open(Filename, O_RDONLY | O_CLOEXEC);
if (Fd == -1)
return false;
static constexpr size_t BufSize = 256;
char Line[BufSize] = {};
ssize_t BytesRead;
bool Success;
std::tie(BytesRead, Success) = retryingReadSome(Fd, Line, Line + BufSize);
close(Fd);
if (!Success)
return false;
const char *End = nullptr;
long long Tmp = internal_simple_strtoll(Line, &End, 10);
bool Result = false;
if (Line[0] != '\0' && (*End == '\n' || *End == '\0')) {
*Value = Tmp;
Result = true;
}
return Result;
}
uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
long long TSCFrequency = -1;
if (readValueFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz",
&TSCFrequency)) {
TSCFrequency *= 1000;
} else if (readValueFromFile(
"/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&TSCFrequency)) {
TSCFrequency *= 1000;
} else {
Report("Unable to determine CPU frequency for TSC accounting.\n");
}
return TSCFrequency == -1 ? 0 : static_cast<uint64_t>(TSCFrequency);
}
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
long long TSCFrequency = -1;
size_t tscfreqsz = sizeof(TSCFrequency);
#if SANITIZER_APPLE
if (internal_sysctlbyname("machdep.tsc.frequency", &TSCFrequency,
&tscfreqsz, NULL, 0) != -1) {
#else
if (internal_sysctlbyname("machdep.tsc_freq", &TSCFrequency, &tscfreqsz,
NULL, 0) != -1) {
#endif
return static_cast<uint64_t>(TSCFrequency);
} else {
Report("Unable to determine CPU frequency for TSC accounting.\n");
}
return 0;
}
#elif !SANITIZER_FUCHSIA
uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
/* Not supported */
return 0;
}
#endif
static constexpr uint8_t CallOpCode = 0xe8;
static constexpr uint16_t MovR10Seq = 0xba41;
static constexpr uint16_t Jmp9Seq = 0x09eb;
static constexpr uint16_t Jmp20Seq = 0x14eb;
static constexpr uint16_t Jmp15Seq = 0x0feb;
static constexpr uint8_t JmpOpCode = 0xe9;
static constexpr uint8_t RetOpCode = 0xc3;
static constexpr uint16_t NopwSeq = 0x9066;
static constexpr int64_t MinOffset{std::numeric_limits<int32_t>::min()};
static constexpr int64_t MaxOffset{std::numeric_limits<int32_t>::max()};
bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled,
void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
// xray_sled_n:
// jmp +9
// <9 byte nop>
//
// With the following:
//
// mov r10d, <function id>
// call <relative 32bit offset to entry trampoline>
//
// We need to do this in the following order:
//
// 1. Put the function id first, 2 bytes from the start of the sled (just
// after the 2-byte jmp instruction).
// 2. Put the call opcode 6 bytes from the start of the sled.
// 3. Put the relative offset 7 bytes from the start of the sled.
// 4. Do an atomic write over the jmp instruction for the "mov r10d"
// opcode and first operand.
//
// Prerequisite is to compute the relative offset to the trampoline's address.
const uint64_t Address = Sled.address();
int64_t TrampolineOffset = reinterpret_cast<int64_t>(Trampoline) -
(static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Entry trampoline (%p) too far from sled (%p)\n",
reinterpret_cast<void *>(Trampoline),
reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
*reinterpret_cast<uint32_t *>(Address + 2) = FuncId;
*reinterpret_cast<uint8_t *>(Address + 6) = CallOpCode;
*reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset;
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp9Seq,
std::memory_order_release);
// FIXME: Write out the nops still?
}
return true;
}
bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
// xray_sled_n:
// ret
// <10 byte nop>
//
// With the following:
//
// mov r10d, <function id>
// jmp <relative 32bit offset to exit trampoline>
//
// 1. Put the function id first, 2 bytes from the start of the sled (just
// after the 1-byte ret instruction).
// 2. Put the jmp opcode 6 bytes from the start of the sled.
// 3. Put the relative offset 7 bytes from the start of the sled.
// 4. Do an atomic write over the jmp instruction for the "mov r10d"
// opcode and first operand.
//
// Prerequisite is to compute the relative offset fo the
// __xray_FunctionExit function's address.
const uint64_t Address = Sled.address();
int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionExit) -
(static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Exit trampoline (%p) too far from sled (%p)\n",
reinterpret_cast<void *>(__xray_FunctionExit),
reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
*reinterpret_cast<uint32_t *>(Address + 2) = FuncId;
*reinterpret_cast<uint8_t *>(Address + 6) = JmpOpCode;
*reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset;
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint8_t> *>(Address), RetOpCode,
std::memory_order_release);
// FIXME: Write out the nops still?
}
return true;
}
bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the tail call sled with a similar
// sequence as the entry sled, but calls the tail exit sled instead.
const uint64_t Address = Sled.address();
int64_t TrampolineOffset =
reinterpret_cast<int64_t>(__xray_FunctionTailExit) -
(static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n",
reinterpret_cast<void *>(__xray_FunctionTailExit),
reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
*reinterpret_cast<uint32_t *>(Address + 2) = FuncId;
*reinterpret_cast<uint8_t *>(Address + 6) = CallOpCode;
*reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset;
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp9Seq,
std::memory_order_release);
// FIXME: Write out the nops still?
}
return true;
}
bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
// In Version 0:
//
// xray_sled_n:
// jmp +20 // 2 bytes
// ...
//
// With the following:
//
// nopw // 2 bytes*
// ...
//
//
// The "unpatch" should just turn the 'nopw' back to a 'jmp +20'.
//
// ---
//
// In Version 1 or 2:
//
// The jump offset is now 15 bytes (0x0f), so when restoring the nopw back
// to a jmp, use 15 bytes instead.
//
const uint64_t Address = Sled.address();
if (Enable) {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq,
std::memory_order_release);
} else {
switch (Sled.Version) {
case 1:
case 2:
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq,
std::memory_order_release);
break;
case 0:
default:
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq,
std::memory_order_release);
break;
}
}
return false;
}
bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
// xray_sled_n:
// jmp +20 // 2 byte instruction
// ...
//
// With the following:
//
// nopw // 2 bytes
// ...
//
//
// The "unpatch" should just turn the 'nopw' back to a 'jmp +20'.
// The 20 byte sled stashes three argument registers, calls the trampoline,
// unstashes the registers and returns. If the arguments are already in
// the correct registers, the stashing and unstashing become equivalently
// sized nops.
const uint64_t Address = Sled.address();
if (Enable) {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq,
std::memory_order_release);
}
return false;
}
#if !SANITIZER_FUCHSIA
// We determine whether the CPU we're running on has the correct features we
// need. In x86_64 this will be rdtscp support.
bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT {
unsigned int EAX, EBX, ECX, EDX;
// We check whether rdtscp support is enabled. According to the x86_64 manual,
// level should be set at 0x80000001, and we should have a look at bit 27 in
// EDX. That's 0x8000000 (or 1u << 27).
__asm__ __volatile__("cpuid" : "=a"(EAX), "=b"(EBX), "=c"(ECX), "=d"(EDX)
: "0"(0x80000001));
if (!(EDX & (1u << 27))) {
Report("Missing rdtscp support.\n");
return false;
}
// Also check whether we can determine the CPU frequency, since if we cannot,
// we should use the emulated TSC instead.
if (!getTSCFrequency()) {
Report("Unable to determine CPU frequency.\n");
return false;
}
return true;
}
#endif
} // namespace __xray
|