1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
|
// run-pass
// no-prefer-dynamic
// ignore-wasm32-bare no libc
// ignore-windows
// ignore-sgx no libc
// ignore-emscripten no processes
// ignore-sgx no processes
// ignore-fuchsia no fork
#![feature(rustc_private)]
#![feature(never_type)]
#![feature(panic_always_abort)]
extern crate libc;
use std::alloc::{GlobalAlloc, Layout};
use std::fmt;
use std::panic::{self, panic_any};
use std::os::unix::process::{CommandExt, ExitStatusExt};
use std::process::{self, Command, ExitStatus};
use std::sync::atomic::{AtomicU32, Ordering};
use libc::c_int;
/// This stunt allocator allows us to spot heap allocations in the child.
struct PidChecking<A> {
parent: A,
require_pid: AtomicU32,
}
#[global_allocator]
static ALLOCATOR: PidChecking<std::alloc::System> = PidChecking {
parent: std::alloc::System,
require_pid: AtomicU32::new(0),
};
impl<A> PidChecking<A> {
fn engage(&self) {
let parent_pid = process::id();
eprintln!("engaging allocator trap, parent pid={}", parent_pid);
self.require_pid.store(parent_pid, Ordering::Release);
}
fn check(&self) {
let require_pid = self.require_pid.load(Ordering::Acquire);
if require_pid != 0 {
let actual_pid = process::id();
if require_pid != actual_pid {
unsafe {
libc::raise(libc::SIGUSR1);
}
}
}
}
}
unsafe impl<A:GlobalAlloc> GlobalAlloc for PidChecking<A> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.check();
self.parent.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.check();
self.parent.dealloc(ptr, layout)
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
self.check();
self.parent.alloc_zeroed(layout)
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
self.check();
self.parent.realloc(ptr, layout, new_size)
}
}
fn expect_aborted(status: ExitStatus) {
dbg!(status);
let signal = status.signal().expect("expected child process to die of signal");
#[cfg(not(target_os = "android"))]
assert!(signal == libc::SIGABRT || signal == libc::SIGILL || signal == libc::SIGTRAP);
#[cfg(target_os = "android")]
{
assert!(signal == libc::SIGABRT || signal == libc::SIGSEGV);
if signal == libc::SIGSEGV {
// Pre-KitKat versions of Android signal an abort() with SIGSEGV at address 0xdeadbaad
// See e.g. https://groups.google.com/g/android-ndk/c/laW1CJc7Icc
//
// This behavior was changed in KitKat to send a standard SIGABRT signal.
// See: https://r.android.com/60341
//
// Additional checks performed:
// 1. Find last tombstone (similar to coredump but in text format) from the
// same executable (path) as we are (must be because of usage of fork):
// This ensures that we look into the correct tombstone.
// 2. Cause of crash is a SIGSEGV with address 0xdeadbaad.
// 3. libc::abort call is in one of top two functions on callstack.
// The last two steps distinguish between a normal SIGSEGV and one caused
// by libc::abort.
let this_exe = std::env::current_exe().unwrap().into_os_string().into_string().unwrap();
let exe_string = format!(">>> {this_exe} <<<");
let tombstone = (0..100)
.map(|n| format!("/data/tombstones/tombstone_{n:02}"))
.filter(|f| std::path::Path::new(&f).exists())
.map(|f| std::fs::read_to_string(&f).expect("Cannot read tombstone file"))
.filter(|f| f.contains(&exe_string))
.last()
.expect("no tombstone found");
println!("Content of tombstone:\n{tombstone}");
assert!(tombstone
.contains("signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr deadbaad"));
let abort_on_top = tombstone
.lines()
.skip_while(|l| !l.contains("backtrace:"))
.skip(1)
.take_while(|l| l.starts_with(" #"))
.take(2)
.any(|f| f.contains("/system/lib/libc.so (abort"));
assert!(abort_on_top);
}
}
}
fn main() {
ALLOCATOR.engage();
fn run(do_panic: &dyn Fn()) -> ExitStatus {
let child = unsafe { libc::fork() };
assert!(child >= 0);
if child == 0 {
panic::always_abort();
do_panic();
process::exit(0);
}
let mut status: c_int = 0;
let got = unsafe { libc::waitpid(child, &mut status, 0) };
assert_eq!(got, child);
let status = ExitStatus::from_raw(status.into());
status
}
fn one(do_panic: &dyn Fn()) {
let status = run(do_panic);
expect_aborted(status);
}
one(&|| panic!());
one(&|| panic!("some message"));
one(&|| panic!("message with argument: {}", 42));
#[derive(Debug)]
struct Wotsit { }
one(&|| panic_any(Wotsit { }));
let mut c = Command::new("echo");
unsafe {
c.pre_exec(|| panic!("{}", "crash now!"));
}
let st = c.status().expect("failed to get command status");
expect_aborted(st);
struct DisplayWithHeap;
impl fmt::Display for DisplayWithHeap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let s = vec![0; 100];
let s = std::hint::black_box(s);
write!(f, "{:?}", s)
}
}
// Some panics in the stdlib that we want not to allocate, as
// otherwise these facilities become impossible to use in the
// child after fork, which is really quite awkward.
one(&|| { None::<DisplayWithHeap>.unwrap(); });
one(&|| { None::<DisplayWithHeap>.expect("unwrapped a none"); });
one(&|| { std::str::from_utf8(b"\xff").unwrap(); });
one(&|| {
let x = [0, 1, 2, 3];
let y = x[std::hint::black_box(4)];
let _z = std::hint::black_box(y);
});
// Finally, check that our stunt allocator can actually catch an allocation after fork.
// ie, that our test is effective.
let status = run(&|| panic!("allocating to display... {}", DisplayWithHeap));
dbg!(status);
assert_eq!(status.signal(), Some(libc::SIGUSR1));
}
|