Skip to content

Commit

Permalink
Merge pull request #20561 from jacobly0/debug-segfaults
Browse files Browse the repository at this point in the history
debug: prevent segfaults on linux
  • Loading branch information
andrewrk authored Jul 10, 2024
2 parents c5283eb + 1b34ae1 commit b3b923e
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 31 deletions.
116 changes: 89 additions & 27 deletions lib/std/debug.zig
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,7 @@ pub const StackIterator = struct {
first_address: ?usize,
// Last known value of the frame pointer register.
fp: usize,
ma: MemoryAccessor = MemoryAccessor.init,

// When DebugInfo and a register context is available, this iterator can unwind
// stacks with frames that don't use a frame pointer (ie. -fomit-frame-pointer),
Expand Down Expand Up @@ -616,16 +617,16 @@ pub const StackIterator = struct {
}
}

pub fn deinit(self: *StackIterator) void {
if (have_ucontext and self.unwind_state != null) self.unwind_state.?.dwarf_context.deinit();
pub fn deinit(it: *StackIterator) void {
if (have_ucontext and it.unwind_state != null) it.unwind_state.?.dwarf_context.deinit();
}

pub fn getLastError(self: *StackIterator) ?struct {
pub fn getLastError(it: *StackIterator) ?struct {
err: UnwindError,
address: usize,
} {
if (!have_ucontext) return null;
if (self.unwind_state) |*unwind_state| {
if (it.unwind_state) |*unwind_state| {
if (unwind_state.last_error) |err| {
unwind_state.last_error = null;
return .{
Expand Down Expand Up @@ -662,14 +663,14 @@ pub const StackIterator = struct {
else
@sizeOf(usize);

pub fn next(self: *StackIterator) ?usize {
var address = self.next_internal() orelse return null;
pub fn next(it: *StackIterator) ?usize {
var address = it.next_internal() orelse return null;

if (self.first_address) |first_address| {
if (it.first_address) |first_address| {
while (address != first_address) {
address = self.next_internal() orelse return null;
address = it.next_internal() orelse return null;
}
self.first_address = null;
it.first_address = null;
}

return address;
Expand Down Expand Up @@ -718,8 +719,74 @@ pub const StackIterator = struct {
}
}

fn next_unwind(self: *StackIterator) !usize {
const unwind_state = &self.unwind_state.?;
pub const MemoryAccessor = struct {
var cached_pid: posix.pid_t = -1;

mem: switch (native_os) {
.linux => File,
else => void,
},

pub const init: MemoryAccessor = .{
.mem = switch (native_os) {
.linux => .{ .handle = -1 },
else => {},
},
};

fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool {
switch (native_os) {
.linux => while (true) switch (ma.mem.handle) {
-2 => break,
-1 => {
const linux = std.os.linux;
const pid = switch (@atomicLoad(posix.pid_t, &cached_pid, .monotonic)) {
-1 => pid: {
const pid = linux.getpid();
@atomicStore(posix.pid_t, &cached_pid, pid, .monotonic);
break :pid pid;
},
else => |pid| pid,
};
const bytes_read = linux.process_vm_readv(
pid,
&.{.{ .base = buf.ptr, .len = buf.len }},
&.{.{ .base = @ptrFromInt(address), .len = buf.len }},
0,
);
switch (linux.E.init(bytes_read)) {
.SUCCESS => return bytes_read == buf.len,
.FAULT => return false,
.INVAL, .PERM, .SRCH => unreachable, // own pid is always valid
.NOMEM, .NOSYS => {},
else => unreachable, // unexpected
}
var path_buf: [
std.fmt.count("/proc/{d}/mem", .{math.minInt(posix.pid_t)})
]u8 = undefined;
const path = std.fmt.bufPrint(&path_buf, "/proc/{d}/mem", .{pid}) catch
unreachable;
ma.mem = std.fs.openFileAbsolute(path, .{}) catch {
ma.mem.handle = -2;
break;
};
},
else => return (ma.mem.pread(buf, address) catch return false) == buf.len,
},
else => {},
}
if (!isValidMemory(address)) return false;
@memcpy(buf, @as([*]const u8, @ptrFromInt(address)));
return true;
}
pub fn load(ma: *MemoryAccessor, comptime Type: type, address: usize) ?Type {
var result: Type = undefined;
return if (ma.read(address, std.mem.asBytes(&result))) result else null;
}
};

fn next_unwind(it: *StackIterator) !usize {
const unwind_state = &it.unwind_state.?;
const module = try unwind_state.debug_info.getModuleForAddress(unwind_state.dwarf_context.pc);
switch (native_os) {
.macos, .ios, .watchos, .tvos, .visionos => {
Expand All @@ -741,13 +808,13 @@ pub const StackIterator = struct {
} else return error.MissingDebugInfo;
}

fn next_internal(self: *StackIterator) ?usize {
fn next_internal(it: *StackIterator) ?usize {
if (have_ucontext) {
if (self.unwind_state) |*unwind_state| {
if (it.unwind_state) |*unwind_state| {
if (!unwind_state.failed) {
if (unwind_state.dwarf_context.pc == 0) return null;
defer self.fp = unwind_state.dwarf_context.getFp() catch 0;
if (self.next_unwind()) |return_address| {
defer it.fp = unwind_state.dwarf_context.getFp() catch 0;
if (it.next_unwind()) |return_address| {
return return_address;
} else |err| {
unwind_state.last_error = err;
Expand All @@ -763,29 +830,24 @@ pub const StackIterator = struct {

const fp = if (comptime native_arch.isSPARC())
// On SPARC the offset is positive. (!)
math.add(usize, self.fp, fp_offset) catch return null
math.add(usize, it.fp, fp_offset) catch return null
else
math.sub(usize, self.fp, fp_offset) catch return null;
math.sub(usize, it.fp, fp_offset) catch return null;

// Sanity check.
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp))
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize))) return null;
const new_fp = math.add(usize, it.ma.load(usize, fp) orelse return null, fp_bias) catch
return null;

const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null;

// Sanity check: the stack grows down thus all the parent frames must be
// be at addresses that are greater (or equal) than the previous one.
// A zero frame pointer often signals this is the last frame, that case
// is gracefully handled by the next call to next_internal.
if (new_fp != 0 and new_fp < self.fp)
if (new_fp != 0 and new_fp < it.fp) return null;
const new_pc = it.ma.load(usize, math.add(usize, fp, pc_offset) catch return null) orelse
return null;

const new_pc = @as(
*const usize,
@ptrFromInt(math.add(usize, fp, pc_offset) catch return null),
).*;

self.fp = new_fp;
it.fp = new_fp;

return new_pc;
}
Expand Down
8 changes: 4 additions & 4 deletions lib/std/os/linux.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1519,15 +1519,15 @@ pub fn setgroups(size: usize, list: [*]const gid_t) usize {
}

pub fn setsid() pid_t {
return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.setsid)))));
return @bitCast(@as(u32, @truncate(syscall0(.setsid))));
}

pub fn getpid() pid_t {
return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.getpid)))));
return @bitCast(@as(u32, @truncate(syscall0(.getpid))));
}

pub fn gettid() pid_t {
return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.gettid)))));
return @bitCast(@as(u32, @truncate(syscall0(.gettid))));
}

pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize {
Expand Down Expand Up @@ -2116,7 +2116,7 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u
);
}

pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize {
pub fn process_vm_readv(pid: pid_t, local: []const iovec, remote: []const iovec_const, flags: usize) usize {
return syscall6(
.process_vm_readv,
@as(usize, @bitCast(@as(isize, pid))),
Expand Down
19 changes: 19 additions & 0 deletions src/Zcu/PerThread.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2194,6 +2194,25 @@ fn processExportsInner(
gop.value_ptr.* = export_idx;
}
}

switch (exported) {
.decl_index => |idx| if (failed: {
const decl = zcu.declPtr(idx);
if (decl.analysis != .complete) break :failed true;
// Check if has owned function
if (!decl.owns_tv) break :failed false;
if (decl.typeOf(zcu).zigTypeTag(zcu) != .Fn) break :failed false;
// Check if owned function failed
const a = zcu.funcInfo(decl.val.toIntern()).analysis(&zcu.intern_pool);
break :failed a.state != .success;
}) {
// This `Decl` is failed, so was never sent to codegen.
// TODO: we should probably tell the backend to delete any old exports of this `Decl`?
return;
},
.value => {},
}

if (zcu.comp.bin_file) |lf| {
try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices));
} else if (zcu.llvm_object) |llvm_object| {
Expand Down

0 comments on commit b3b923e

Please sign in to comment.