Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 15 additions & 14 deletions lib/build-web/fuzz.zig
Original file line number Diff line number Diff line change
Expand Up @@ -228,20 +228,21 @@ fn unpackSourcesInner(tar_bytes: []u8) !void {
if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
log.debug("found file: '{s}'", .{tar_file.name});
const file_name = try gpa.dupe(u8, tar_file.name);
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
const pkg_name = file_name[0..pkg_name_end];
const gop = try Walk.modules.getOrPut(gpa, pkg_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or
std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
{
gop.value_ptr.* = file;
}
const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
it.unread_file_bytes = 0; // we have read the whole thing
assert(file == try Walk.add_file(file_name, file_bytes));
}
// This is a hack to guess modules from the tar file contents. To handle modules
// properly, the build system will need to change the structure here to have one
// directory per module. This in turn requires compiler enhancements to allow
// the build system to actually discover the required information.
const mod_name, const is_module_root = p: {
if (std.mem.find(u8, file_name, "std/")) |i| break :p .{ "std", std.mem.eql(u8, file_name[i + 4 ..], "std.zig") };
if (std.mem.endsWith(u8, file_name, "/builtin.zig")) break :p .{ "builtin", true };
break :p .{ "root", std.mem.endsWith(u8, file_name, "/root.zig") };
};
const gop = try Walk.modules.getOrPut(gpa, mod_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or is_module_root) gop.value_ptr.* = file;
const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
it.unread_file_bytes = 0; // we have read the whole thing
assert(file == try Walk.add_file(file_name, file_bytes));
} else {
log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
}
Expand Down
2 changes: 1 addition & 1 deletion lib/compiler/test_runner.zig
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ fn mainServer() !void {
const test_fn = builtin.test_functions[index];
const entry_addr = @intFromPtr(test_fn.func);

try server.serveU64Message(.fuzz_start_addr, entry_addr);
try server.serveU64Message(.fuzz_start_addr, fuzz_abi.fuzzer_unslide_address(entry_addr));
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
is_fuzz_test = false;
fuzz_test_index = index;
Expand Down
34 changes: 28 additions & 6 deletions lib/fuzzer.zig
Original file line number Diff line number Diff line change
Expand Up @@ -116,13 +116,18 @@ const Executable = struct {
"failed to init memory map for coverage file '{s}': {t}",
.{ &coverage_file_name, e },
);
map.appendSliceAssumeCapacity(mem.asBytes(&abi.SeenPcsHeader{
map.appendSliceAssumeCapacity(@ptrCast(&abi.SeenPcsHeader{
.n_runs = 0,
.unique_runs = 0,
.pcs_len = pcs.len,
}));
map.appendNTimesAssumeCapacity(0, pc_bitset_usizes * @sizeOf(usize));
map.appendSliceAssumeCapacity(mem.sliceAsBytes(pcs));
// Relocations have been applied to `pcs` so it contains runtime addresses (with slide
// applied). We need to translate these to the virtual addresses as on disk.
for (pcs) |pc| {
const pc_vaddr = fuzzer_unslide_address(pc);
map.appendSliceAssumeCapacity(@ptrCast(&pc_vaddr));
}
return map;
} else {
const size = coverage_file.getEndPos() catch |e| panic(
Expand Down Expand Up @@ -215,7 +220,16 @@ const Executable = struct {
.{ self.pc_counters.len, pcs.len },
);

self.pc_digest = std.hash.Wyhash.hash(0, mem.sliceAsBytes(pcs));
self.pc_digest = digest: {
// Relocations have been applied to `pcs` so it contains runtime addresses (with slide
// applied). We need to translate these to the virtual addresses as on disk.
var h: std.hash.Wyhash = .init(0);
for (pcs) |pc| {
const pc_vaddr = fuzzer_unslide_address(pc);
h.update(@ptrCast(&pc_vaddr));
}
break :digest h.final();
};
self.shared_seen_pcs = getCoverageFile(cache_dir, pcs, self.pc_digest);

return self;
Expand Down Expand Up @@ -622,6 +636,14 @@ export fn fuzzer_main(limit_kind: abi.LimitKind, amount: u64) void {
}
}

export fn fuzzer_unslide_address(addr: usize) usize {
const si = std.debug.getSelfDebugInfo() catch @compileError("unsupported");
const slide = si.getModuleSlide(std.debug.getDebugInfoAllocator(), addr) catch |err| {
std.debug.panic("failed to find virtual address slide: {t}", .{err});
};
return addr - slide;
}

/// Helps determine run uniqueness in the face of recursion.
/// Currently not used by the fuzzer.
export threadlocal var __sancov_lowest_stack: usize = 0;
Expand Down Expand Up @@ -1185,13 +1207,13 @@ const Mutation = enum {
const j = rng.uintAtMostBiased(usize, corpus[splice_i].len - len);
out.appendSliceAssumeCapacity(corpus[splice_i][j..][0..len]);
},
.@"const" => out.appendSliceAssumeCapacity(mem.asBytes(
.@"const" => out.appendSliceAssumeCapacity(@ptrCast(
&data_ctx[rng.uintLessThanBiased(usize, data_ctx.len)],
)),
.small => out.appendSliceAssumeCapacity(mem.asBytes(
.small => out.appendSliceAssumeCapacity(@ptrCast(
&mem.nativeTo(data_ctx[0], rng.int(SmallValue), data_ctx[1]),
)),
.few => out.appendSliceAssumeCapacity(mem.asBytes(
.few => out.appendSliceAssumeCapacity(@ptrCast(
&fewValue(rng, data_ctx[0], data_ctx[1]),
)),
}
Expand Down
29 changes: 25 additions & 4 deletions lib/std/Build/Fuzz.zig
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,14 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
errdefer gop.value_ptr.coverage.deinit(fuzz.gpa);

const rebuilt_exe_path = run_step.rebuilt_executable.?;
var debug_info = std.debug.Info.load(fuzz.gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
const target = run_step.producer.?.rootModuleTarget();
var debug_info = std.debug.Info.load(
fuzz.gpa,
rebuilt_exe_path,
&gop.value_ptr.coverage,
target.ofmt,
target.cpu.arch,
) catch |err| {
log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err),
});
Expand Down Expand Up @@ -479,9 +486,23 @@ fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReporte
if (false) {
const sl = coverage_map.source_locations[index];
const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
});
if (pcs.len == 1) {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index 0 (final)", .{
addr, file_name, sl.line, sl.column,
});
} else if (index == 0) {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index 0 before {x}", .{
addr, file_name, sl.line, sl.column, pcs[index + 1],
});
} else if (index == pcs.len - 1) {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} (final) after {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1],
});
} else {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
});
}
}
try coverage_map.entry_points.append(fuzz.gpa, @intCast(index));
}
Expand Down
29 changes: 14 additions & 15 deletions lib/std/Build/Step/CheckObject.zig
Original file line number Diff line number Diff line change
Expand Up @@ -729,10 +729,10 @@ const MachODumper = struct {
imports: std.ArrayListUnmanaged([]const u8) = .empty,

fn parse(ctx: *ObjectContext) !void {
var it = ctx.getLoadCommandIterator();
var it = try ctx.getLoadCommandIterator();
var i: usize = 0;
while (it.next()) |cmd| {
switch (cmd.cmd()) {
while (try it.next()) |cmd| {
switch (cmd.hdr.cmd) {
.SEGMENT_64 => {
const seg = cmd.cast(macho.segment_command_64).?;
try ctx.segments.append(ctx.gpa, seg);
Expand Down Expand Up @@ -771,14 +771,13 @@ const MachODumper = struct {
return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + off)), 0);
}

fn getLoadCommandIterator(ctx: ObjectContext) macho.LoadCommandIterator {
const data = ctx.data[@sizeOf(macho.mach_header_64)..][0..ctx.header.sizeofcmds];
return .{ .ncmds = ctx.header.ncmds, .buffer = data };
fn getLoadCommandIterator(ctx: ObjectContext) !macho.LoadCommandIterator {
return .init(&ctx.header, ctx.data[@sizeOf(macho.mach_header_64)..]);
}

fn getLoadCommand(ctx: ObjectContext, cmd: macho.LC) ?macho.LoadCommandIterator.LoadCommand {
var it = ctx.getLoadCommandIterator();
while (it.next()) |lc| if (lc.cmd() == cmd) {
fn getLoadCommand(ctx: ObjectContext, cmd: macho.LC) !?macho.LoadCommandIterator.LoadCommand {
var it = try ctx.getLoadCommandIterator();
while (try it.next()) |lc| if (lc.hdr.cmd == cmd) {
return lc;
};
return null;
Expand Down Expand Up @@ -872,9 +871,9 @@ const MachODumper = struct {
\\LC {d}
\\cmd {s}
\\cmdsize {d}
, .{ index, @tagName(lc.cmd()), lc.cmdsize() });
, .{ index, @tagName(lc.hdr.cmd), lc.hdr.cmdsize });

switch (lc.cmd()) {
switch (lc.hdr.cmd) {
.SEGMENT_64 => {
const seg = lc.cast(macho.segment_command_64).?;
try writer.writeByte('\n');
Expand Down Expand Up @@ -1592,9 +1591,9 @@ const MachODumper = struct {
.headers => {
try ObjectContext.dumpHeader(ctx.header, writer);

var it = ctx.getLoadCommandIterator();
var it = try ctx.getLoadCommandIterator();
var i: usize = 0;
while (it.next()) |cmd| {
while (try it.next()) |cmd| {
try ObjectContext.dumpLoadCommand(cmd, i, writer);
try writer.writeByte('\n');

Expand All @@ -1615,7 +1614,7 @@ const MachODumper = struct {
.dyld_weak_bind,
.dyld_lazy_bind,
=> {
const cmd = ctx.getLoadCommand(.DYLD_INFO_ONLY) orelse
const cmd = try ctx.getLoadCommand(.DYLD_INFO_ONLY) orelse
return step.fail("no dyld info found", .{});
const lc = cmd.cast(macho.dyld_info_command).?;

Expand Down Expand Up @@ -1649,7 +1648,7 @@ const MachODumper = struct {
},

.exports => blk: {
if (ctx.getLoadCommand(.DYLD_INFO_ONLY)) |cmd| {
if (try ctx.getLoadCommand(.DYLD_INFO_ONLY)) |cmd| {
const lc = cmd.cast(macho.dyld_info_command).?;
if (lc.export_size > 0) {
const data = ctx.data[lc.export_off..][0..lc.export_size];
Expand Down
5 changes: 5 additions & 0 deletions lib/std/Build/Step/Compile.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1932,6 +1932,11 @@ pub fn rebuildInFuzzMode(c: *Compile, gpa: Allocator, progress_node: std.Progres
c.step.result_error_bundle.deinit(gpa);
c.step.result_error_bundle = std.zig.ErrorBundle.empty;

if (c.step.result_failed_command) |cmd| {
gpa.free(cmd);
c.step.result_failed_command = null;
}

const zig_args = try getZigArgs(c, true);
const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false, null, gpa);
return maybe_output_bin_path.?;
Expand Down
13 changes: 11 additions & 2 deletions lib/std/Build/Step/Run.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1140,6 +1140,12 @@ pub fn rerunInFuzzMode(
.output_file, .output_directory => unreachable,
}
}

if (run.step.result_failed_command) |cmd| {
fuzz.gpa.free(cmd);
run.step.result_failed_command = null;
}

const has_side_effects = false;
const rand_int = std.crypto.random.int(u64);
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
Expand All @@ -1150,7 +1156,7 @@ pub fn rerunInFuzzMode(
.web_server = null, // only needed for time reports
.ttyconf = fuzz.ttyconf,
.unit_test_timeout_ns = null, // don't time out fuzz tests for now
.gpa = undefined, // not used by `runCommand`
.gpa = fuzz.gpa,
}, .{
.unit_test_index = unit_test_index,
.fuzz = fuzz,
Expand Down Expand Up @@ -1870,7 +1876,10 @@ fn pollZigTest(
// test. For instance, if the test runner leaves this much time between us requesting a test to
// start and it acknowledging the test starting, we terminate the child and raise an error. This
// *should* never happen, but could in theory be caused by some very unlucky IB in a test.
const response_timeout_ns = @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s);
const response_timeout_ns: ?u64 = ns: {
if (fuzz_context != null) break :ns null; // don't timeout fuzz tests
break :ns @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s);
};

const stdout = poller.reader(.stdout);
const stderr = poller.reader(.stderr);
Expand Down
1 change: 1 addition & 0 deletions lib/std/Build/abi.zig
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ pub const fuzz = struct {
pub extern fn fuzzer_init_test(test_one: TestOne, unit_test_name: Slice) void;
pub extern fn fuzzer_new_input(bytes: Slice) void;
pub extern fn fuzzer_main(limit_kind: LimitKind, amount: u64) void;
pub extern fn fuzzer_unslide_address(addr: usize) usize;

pub const Slice = extern struct {
ptr: [*]const u8,
Expand Down
22 changes: 16 additions & 6 deletions lib/std/Io/Writer.zig
Original file line number Diff line number Diff line change
Expand Up @@ -270,16 +270,17 @@ fn writeSplatHeaderLimitFinish(
remaining -= copy_len;
if (remaining == 0) break :v;
}
for (data[0 .. data.len - 1]) |buf| if (buf.len != 0) {
const copy_len = @min(header.len, remaining);
vecs[i] = buf;
for (data[0 .. data.len - 1]) |buf| {
if (buf.len == 0) continue;
const copy_len = @min(buf.len, remaining);
vecs[i] = buf[0..copy_len];
i += 1;
remaining -= copy_len;
if (remaining == 0) break :v;
if (vecs.len - i == 0) break :v;
};
}
const pattern = data[data.len - 1];
if (splat == 1) {
if (splat == 1 or remaining < pattern.len) {
vecs[i] = pattern[0..@min(remaining, pattern.len)];
i += 1;
break :v;
Expand Down Expand Up @@ -915,7 +916,16 @@ pub fn sendFileHeader(
if (new_end <= w.buffer.len) {
@memcpy(w.buffer[w.end..][0..header.len], header);
w.end = new_end;
return header.len + try w.vtable.sendFile(w, file_reader, limit);
const file_bytes = w.vtable.sendFile(w, file_reader, limit) catch |err| switch (err) {
error.ReadFailed, error.WriteFailed => |e| return e,
error.EndOfStream, error.Unimplemented => |e| {
// These errors are non-fatal, so if we wrote any header bytes, we will report that
// and suppress this error. Only if there was no header may we return the error.
if (header.len != 0) return header.len;
return e;
},
};
return header.len + file_bytes;
}
const buffered_contents = limit.slice(file_reader.interface.buffered());
const n = try w.vtable.drain(w, &.{ header, buffered_contents }, 1);
Expand Down
3 changes: 2 additions & 1 deletion lib/std/debug.zig
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ const root = @import("root");
pub const Dwarf = @import("debug/Dwarf.zig");
pub const Pdb = @import("debug/Pdb.zig");
pub const ElfFile = @import("debug/ElfFile.zig");
pub const MachOFile = @import("debug/MachOFile.zig");
pub const Info = @import("debug/Info.zig");
pub const Coverage = @import("debug/Coverage.zig");
pub const cpu_context = @import("debug/cpu_context.zig");
Expand Down Expand Up @@ -1366,7 +1367,7 @@ test printLineFromFile {

/// The returned allocator should be thread-safe if the compilation is multi-threaded, because
/// multiple threads could capture and/or print stack traces simultaneously.
fn getDebugInfoAllocator() Allocator {
pub fn getDebugInfoAllocator() Allocator {
// Allow overriding the debug info allocator by exposing `root.debug.getDebugInfoAllocator`.
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getDebugInfoAllocator")) {
return root.debug.getDebugInfoAllocator();
Expand Down
Loading