diff --git a/lib/fuzzer/web/main.zig b/lib/build-web/fuzz.zig
similarity index 52%
rename from lib/fuzzer/web/main.zig
rename to lib/build-web/fuzz.zig
index 5b16b0532ccf..a05f9d2e649a 100644
--- a/lib/fuzzer/web/main.zig
+++ b/lib/build-web/fuzz.zig
@@ -1,229 +1,26 @@
-const std = @import("std");
-const assert = std.debug.assert;
-const abi = std.Build.Fuzz.abi;
-const gpa = std.heap.wasm_allocator;
-const log = std.log;
-const Coverage = std.debug.Coverage;
-const Allocator = std.mem.Allocator;
-
-const Walk = @import("Walk");
-const Decl = Walk.Decl;
-const html_render = @import("html_render");
-
-/// Nanoseconds.
-var server_base_timestamp: i64 = 0;
-/// Milliseconds.
-var client_base_timestamp: i64 = 0;
-/// Relative to `server_base_timestamp`.
+// Server timestamp.
var start_fuzzing_timestamp: i64 = undefined;
const js = struct {
- extern "js" fn log(ptr: [*]const u8, len: usize) void;
- extern "js" fn panic(ptr: [*]const u8, len: usize) noreturn;
- extern "js" fn timestamp() i64;
- extern "js" fn emitSourceIndexChange() void;
- extern "js" fn emitCoverageUpdate() void;
- extern "js" fn emitEntryPointsUpdate() void;
-};
+ extern "fuzz" fn requestSources() void;
+ extern "fuzz" fn ready() void;
-pub const std_options: std.Options = .{
- .logFn = logFn,
+ extern "fuzz" fn updateStats(html_ptr: [*]const u8, html_len: usize) void;
+ extern "fuzz" fn updateEntryPoints(html_ptr: [*]const u8, html_len: usize) void;
+ extern "fuzz" fn updateSource(html_ptr: [*]const u8, html_len: usize) void;
+ extern "fuzz" fn updateCoverage(covered_ptr: [*]const SourceLocationIndex, covered_len: u32) void;
};
-pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noreturn {
- _ = st;
- _ = addr;
- log.err("panic: {s}", .{msg});
- @trap();
-}
-
-fn logFn(
- comptime message_level: log.Level,
- comptime scope: @TypeOf(.enum_literal),
- comptime format: []const u8,
- args: anytype,
-) void {
- const level_txt = comptime message_level.asText();
- const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
- var buf: [500]u8 = undefined;
- const line = std.fmt.bufPrint(&buf, level_txt ++ prefix2 ++ format, args) catch l: {
- buf[buf.len - 3 ..][0..3].* = "...".*;
- break :l &buf;
- };
- js.log(line.ptr, line.len);
-}
-
-export fn alloc(n: usize) [*]u8 {
- const slice = gpa.alloc(u8, n) catch @panic("OOM");
- return slice.ptr;
-}
-
-var message_buffer: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
-
-/// Resizes the message buffer to be the correct length; returns the pointer to
-/// the query string.
-export fn message_begin(len: usize) [*]u8 {
- message_buffer.resize(gpa, len) catch @panic("OOM");
- return message_buffer.items.ptr;
-}
-
-export fn message_end() void {
- const msg_bytes = message_buffer.items;
-
- const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]);
- switch (tag) {
- .current_time => return currentTimeMessage(msg_bytes),
- .source_index => return sourceIndexMessage(msg_bytes) catch @panic("OOM"),
- .coverage_update => return coverageUpdateMessage(msg_bytes) catch @panic("OOM"),
- .entry_points => return entryPointsMessage(msg_bytes) catch @panic("OOM"),
- _ => unreachable,
- }
-}
-
-export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
- const tar_bytes = tar_ptr[0..tar_len];
- log.debug("received {d} bytes of tar file", .{tar_bytes.len});
-
- unpackInner(tar_bytes) catch |err| {
- fatal("unable to unpack tar: {s}", .{@errorName(err)});
- };
-}
-
-/// Set by `set_input_string`.
-var input_string: std.ArrayListUnmanaged(u8) = .empty;
-var string_result: std.ArrayListUnmanaged(u8) = .empty;
-
-export fn set_input_string(len: usize) [*]u8 {
- input_string.resize(gpa, len) catch @panic("OOM");
- return input_string.items.ptr;
-}
-
-/// Looks up the root struct decl corresponding to a file by path.
-/// Uses `input_string`.
-export fn find_file_root() Decl.Index {
- const file: Walk.File.Index = @enumFromInt(Walk.files.getIndex(input_string.items) orelse return .none);
- return file.findRootDecl();
-}
-
-export fn decl_source_html(decl_index: Decl.Index) String {
- const decl = decl_index.get();
-
- string_result.clearRetainingCapacity();
- html_render.fileSourceHtml(decl.file, &string_result, decl.ast_node, .{}) catch |err| {
- fatal("unable to render source: {s}", .{@errorName(err)});
- };
- return String.init(string_result.items);
-}
-
-export fn totalSourceLocations() usize {
- return coverage_source_locations.items.len;
-}
-
-export fn coveredSourceLocations() usize {
- const covered_bits = recent_coverage_update.items[@sizeOf(abi.CoverageUpdateHeader)..];
- var count: usize = 0;
- for (covered_bits) |byte| count += @popCount(byte);
- return count;
-}
-
-fn getCoverageUpdateHeader() *abi.CoverageUpdateHeader {
- return @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)]));
-}
-
-export fn totalRuns() u64 {
- const header = getCoverageUpdateHeader();
- return header.n_runs;
-}
-
-export fn uniqueRuns() u64 {
- const header = getCoverageUpdateHeader();
- return header.unique_runs;
-}
-
-export fn totalRunsPerSecond() f64 {
- @setFloatMode(.optimized);
- const header = getCoverageUpdateHeader();
- const ns_elapsed: f64 = @floatFromInt(nsSince(start_fuzzing_timestamp));
- const n_runs: f64 = @floatFromInt(header.n_runs);
- return n_runs / (ns_elapsed / std.time.ns_per_s);
-}
-
-const String = Slice(u8);
-
-fn Slice(T: type) type {
- return packed struct(u64) {
- ptr: u32,
- len: u32,
-
- fn init(s: []const T) @This() {
- return .{
- .ptr = @intFromPtr(s.ptr),
- .len = s.len,
- };
- }
- };
-}
-
-fn unpackInner(tar_bytes: []u8) !void {
- var fbs = std.io.fixedBufferStream(tar_bytes);
- var file_name_buffer: [1024]u8 = undefined;
- var link_name_buffer: [1024]u8 = undefined;
- var it = std.tar.iterator(fbs.reader(), .{
- .file_name_buffer = &file_name_buffer,
- .link_name_buffer = &link_name_buffer,
- });
- while (try it.next()) |tar_file| {
- switch (tar_file.kind) {
- .file => {
- if (tar_file.size == 0 and tar_file.name.len == 0) break;
- if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
- log.debug("found file: '{s}'", .{tar_file.name});
- const file_name = try gpa.dupe(u8, tar_file.name);
- if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
- const pkg_name = file_name[0..pkg_name_end];
- const gop = try Walk.modules.getOrPut(gpa, pkg_name);
- const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
- if (!gop.found_existing or
- std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
- std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
- {
- gop.value_ptr.* = file;
- }
- const file_bytes = tar_bytes[fbs.pos..][0..@intCast(tar_file.size)];
- assert(file == try Walk.add_file(file_name, file_bytes));
- }
- } else {
- log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
- }
- },
- else => continue,
- }
- }
-}
-
-fn fatal(comptime format: []const u8, args: anytype) noreturn {
- var buf: [500]u8 = undefined;
- const line = std.fmt.bufPrint(&buf, format, args) catch l: {
- buf[buf.len - 3 ..][0..3].* = "...".*;
- break :l &buf;
- };
- js.panic(line.ptr, line.len);
-}
-
-fn currentTimeMessage(msg_bytes: []u8) void {
- client_base_timestamp = js.timestamp();
- server_base_timestamp = @bitCast(msg_bytes[1..][0..8].*);
-}
+pub fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
+ Walk.files.clearRetainingCapacity();
+ Walk.decls.clearRetainingCapacity();
+ Walk.modules.clearRetainingCapacity();
+ recent_coverage_update.clearRetainingCapacity();
+ selected_source_location = null;
-/// Nanoseconds passed since a server timestamp.
-fn nsSince(server_timestamp: i64) i64 {
- const ms_passed = js.timestamp() - client_base_timestamp;
- const ns_passed = server_base_timestamp - server_timestamp;
- return ns_passed + ms_passed * std.time.ns_per_ms;
-}
+ js.requestSources();
-fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
- const Header = abi.SourceIndexHeader;
+ const Header = abi.fuzz.SourceIndexHeader;
const header: Header = @bitCast(msg_bytes[0..@sizeOf(Header)].*);
const directories_start = @sizeOf(Header);
@@ -239,27 +36,55 @@ fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const source_locations: []const Coverage.SourceLocation = @alignCast(std.mem.bytesAsSlice(Coverage.SourceLocation, msg_bytes[source_locations_start..source_locations_end]));
start_fuzzing_timestamp = header.start_timestamp;
- try updateCoverage(directories, files, source_locations, string_bytes);
- js.emitSourceIndexChange();
+ try updateCoverageSources(directories, files, source_locations, string_bytes);
+ js.ready();
}
-fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
+var coverage = Coverage.init;
+/// Index of type `SourceLocationIndex`.
+var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
+/// Contains the most recent coverage update message, unmodified.
+var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
+
+fn updateCoverageSources(
+ directories: []const Coverage.String,
+ files: []const Coverage.File,
+ source_locations: []const Coverage.SourceLocation,
+ string_bytes: []const u8,
+) !void {
+ coverage.directories.clearRetainingCapacity();
+ coverage.files.clearRetainingCapacity();
+ coverage.string_bytes.clearRetainingCapacity();
+ coverage_source_locations.clearRetainingCapacity();
+
+ try coverage_source_locations.appendSlice(gpa, source_locations);
+ try coverage.string_bytes.appendSlice(gpa, string_bytes);
+
+ try coverage.files.entries.resize(gpa, files.len);
+ @memcpy(coverage.files.entries.items(.key), files);
+ try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
+
+ try coverage.directories.entries.resize(gpa, directories.len);
+ @memcpy(coverage.directories.entries.items(.key), directories);
+ try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
+}
+
+pub fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
recent_coverage_update.clearRetainingCapacity();
recent_coverage_update.appendSlice(gpa, msg_bytes) catch @panic("OOM");
- js.emitCoverageUpdate();
+ try updateStats();
+ try updateCoverage();
}
-var entry_points: std.ArrayListUnmanaged(u32) = .empty;
-
-fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
- const header: abi.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.EntryPointHeader)].*);
- entry_points.resize(gpa, header.flags.locs_len) catch @panic("OOM");
- @memcpy(entry_points.items, std.mem.bytesAsSlice(u32, msg_bytes[@sizeOf(abi.EntryPointHeader)..]));
- js.emitEntryPointsUpdate();
-}
+var entry_points: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
-export fn entryPoints() Slice(u32) {
- return Slice(u32).init(entry_points.items);
+pub fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
+ const header: abi.fuzz.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.fuzz.EntryPointHeader)].*);
+ const slis: []align(1) const SourceLocationIndex = @ptrCast(msg_bytes[@sizeOf(abi.fuzz.EntryPointHeader)..]);
+ assert(slis.len == header.locsLen());
+ try entry_points.resize(gpa, slis.len);
+ @memcpy(entry_points.items, slis);
+ try updateEntryPoints();
}
/// Index into `coverage_source_locations`.
@@ -277,11 +102,18 @@ const SourceLocationIndex = enum(u32) {
fn sourceLocationLinkHtml(
sli: SourceLocationIndex,
out: *std.ArrayListUnmanaged(u8),
+ focused: bool,
) Allocator.Error!void {
const sl = sli.ptr();
- try out.writer(gpa).print("", .{@intFromEnum(sli)});
+ try out.writer(gpa).print("", .{
+ @as([]const u8, if (focused) " class=\"status-running\"" else ""),
+ });
try sli.appendPath(out);
- try out.writer(gpa).print(":{d}:{d} ", .{ sl.line, sl.column });
+ try out.writer(gpa).print(":{d}:{d} View ", .{
+ sl.line,
+ sl.column,
+ @intFromEnum(sli),
+ });
}
fn appendPath(sli: SourceLocationIndex, out: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
@@ -372,84 +204,174 @@ fn computeSourceAnnotations(
}
}
-var coverage = Coverage.init;
-/// Index of type `SourceLocationIndex`.
-var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
-/// Contains the most recent coverage update message, unmodified.
-var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
+export fn fuzzUnpackSources(tar_ptr: [*]u8, tar_len: usize) void {
+ const tar_bytes = tar_ptr[0..tar_len];
+ log.debug("received {d} bytes of sources.tar", .{tar_bytes.len});
-fn updateCoverage(
- directories: []const Coverage.String,
- files: []const Coverage.File,
- source_locations: []const Coverage.SourceLocation,
- string_bytes: []const u8,
-) !void {
- coverage.directories.clearRetainingCapacity();
- coverage.files.clearRetainingCapacity();
- coverage.string_bytes.clearRetainingCapacity();
- coverage_source_locations.clearRetainingCapacity();
+ unpackSourcesInner(tar_bytes) catch |err| {
+ fatal("unable to unpack sources.tar: {s}", .{@errorName(err)});
+ };
+}
- try coverage_source_locations.appendSlice(gpa, source_locations);
- try coverage.string_bytes.appendSlice(gpa, string_bytes);
+fn unpackSourcesInner(tar_bytes: []u8) !void {
+ var tar_reader: std.Io.Reader = .fixed(tar_bytes);
+ var file_name_buffer: [1024]u8 = undefined;
+ var link_name_buffer: [1024]u8 = undefined;
+ var it: std.tar.Iterator = .init(&tar_reader, .{
+ .file_name_buffer = &file_name_buffer,
+ .link_name_buffer = &link_name_buffer,
+ });
+ while (try it.next()) |tar_file| {
+ switch (tar_file.kind) {
+ .file => {
+ if (tar_file.size == 0 and tar_file.name.len == 0) break;
+ if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
+ log.debug("found file: '{s}'", .{tar_file.name});
+ const file_name = try gpa.dupe(u8, tar_file.name);
+ if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
+ const pkg_name = file_name[0..pkg_name_end];
+ const gop = try Walk.modules.getOrPut(gpa, pkg_name);
+ const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
+ if (!gop.found_existing or
+ std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
+ std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
+ {
+ gop.value_ptr.* = file;
+ }
+ const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
+ it.unread_file_bytes = 0; // we have read the whole thing
+ assert(file == try Walk.add_file(file_name, file_bytes));
+ }
+ } else {
+ log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
+ }
+ },
+ else => continue,
+ }
+ }
+}
- try coverage.files.entries.resize(gpa, files.len);
- @memcpy(coverage.files.entries.items(.key), files);
- try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
+fn updateStats() error{OutOfMemory}!void {
+ @setFloatMode(.optimized);
- try coverage.directories.entries.resize(gpa, directories.len);
- @memcpy(coverage.directories.entries.items(.key), directories);
- try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
-}
+ if (recent_coverage_update.items.len == 0) return;
-export fn sourceLocationLinkHtml(index: SourceLocationIndex) String {
- string_result.clearRetainingCapacity();
- index.sourceLocationLinkHtml(&string_result) catch @panic("OOM");
- return String.init(string_result.items);
-}
+ const hdr: *abi.fuzz.CoverageUpdateHeader = @alignCast(@ptrCast(
+ recent_coverage_update.items[0..@sizeOf(abi.fuzz.CoverageUpdateHeader)],
+ ));
-/// Returns empty string if coverage metadata is not available for this source location.
-export fn sourceLocationPath(sli: SourceLocationIndex) String {
- string_result.clearRetainingCapacity();
- if (sli.haveCoverage()) sli.appendPath(&string_result) catch @panic("OOM");
- return String.init(string_result.items);
-}
+ const covered_src_locs: usize = n: {
+ var n: usize = 0;
+ const covered_bits = recent_coverage_update.items[@sizeOf(abi.fuzz.CoverageUpdateHeader)..];
+ for (covered_bits) |byte| n += @popCount(byte);
+ break :n n;
+ };
+ const total_src_locs = coverage_source_locations.items.len;
-export fn sourceLocationFileHtml(sli: SourceLocationIndex) String {
- string_result.clearRetainingCapacity();
- sli.fileHtml(&string_result) catch |err| switch (err) {
- error.OutOfMemory => @panic("OOM"),
- error.SourceUnavailable => {},
+ const avg_speed: f64 = speed: {
+ const ns_elapsed: f64 = @floatFromInt(nsSince(start_fuzzing_timestamp));
+ const n_runs: f64 = @floatFromInt(hdr.n_runs);
+ break :speed n_runs / (ns_elapsed / std.time.ns_per_s);
};
- return String.init(string_result.items);
+
+ const html = try std.fmt.allocPrint(gpa,
+ \\{d}
+ \\{d} ({d:.1}%)
+ \\{d} / {d} ({d:.1}%)
+ \\{d:.0}
+ , .{
+ hdr.n_runs,
+ hdr.unique_runs,
+ @as(f64, @floatFromInt(hdr.unique_runs)) / @as(f64, @floatFromInt(hdr.n_runs)),
+ covered_src_locs,
+ total_src_locs,
+ @as(f64, @floatFromInt(covered_src_locs)) / @as(f64, @floatFromInt(total_src_locs)),
+ avg_speed,
+ });
+ defer gpa.free(html);
+
+ js.updateStats(html.ptr, html.len);
}
-export fn sourceLocationFileCoveredList(sli_file: SourceLocationIndex) Slice(SourceLocationIndex) {
- const global = struct {
- var result: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
- fn add(i: u32, want_file: Coverage.File.Index) void {
- const src_loc_index: SourceLocationIndex = @enumFromInt(i);
- if (src_loc_index.ptr().file == want_file) result.appendAssumeCapacity(src_loc_index);
- }
- };
- const want_file = sli_file.ptr().file;
- global.result.clearRetainingCapacity();
+fn updateEntryPoints() error{OutOfMemory}!void {
+ var html: std.ArrayListUnmanaged(u8) = .empty;
+ defer html.deinit(gpa);
+ for (entry_points.items) |sli| {
+ try html.appendSlice(gpa, "
");
+ try sli.sourceLocationLinkHtml(&html, selected_source_location == sli);
+ try html.appendSlice(gpa, " \n");
+ }
+ js.updateEntryPoints(html.items.ptr, html.items.len);
+}
+
+fn updateCoverage() error{OutOfMemory}!void {
+ if (recent_coverage_update.items.len == 0) return;
+ const want_file = (selected_source_location orelse return).ptr().file;
+
+ var covered: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
+ defer covered.deinit(gpa);
// This code assumes 64-bit elements, which is incorrect if the executable
// being fuzzed is not a 64-bit CPU. It also assumes little-endian which
// can also be incorrect.
- comptime assert(abi.CoverageUpdateHeader.trailing[0] == .pc_bits_usize);
+ comptime assert(abi.fuzz.CoverageUpdateHeader.trailing[0] == .pc_bits_usize);
const n_bitset_elems = (coverage_source_locations.items.len + @bitSizeOf(u64) - 1) / @bitSizeOf(u64);
const covered_bits = std.mem.bytesAsSlice(
u64,
- recent_coverage_update.items[@sizeOf(abi.CoverageUpdateHeader)..][0 .. n_bitset_elems * @sizeOf(u64)],
+ recent_coverage_update.items[@sizeOf(abi.fuzz.CoverageUpdateHeader)..][0 .. n_bitset_elems * @sizeOf(u64)],
);
- var sli: u32 = 0;
+ var sli: SourceLocationIndex = @enumFromInt(0);
for (covered_bits) |elem| {
- global.result.ensureUnusedCapacity(gpa, 64) catch @panic("OOM");
+ try covered.ensureUnusedCapacity(gpa, 64);
for (0..@bitSizeOf(u64)) |i| {
- if ((elem & (@as(u64, 1) << @intCast(i))) != 0) global.add(sli, want_file);
- sli += 1;
+ if ((elem & (@as(u64, 1) << @intCast(i))) != 0) {
+ if (sli.ptr().file == want_file) {
+ covered.appendAssumeCapacity(sli);
+ }
+ }
+ sli = @enumFromInt(@intFromEnum(sli) + 1);
}
}
- return Slice(SourceLocationIndex).init(global.result.items);
+
+ js.updateCoverage(covered.items.ptr, covered.items.len);
}
+
+fn updateSource() error{OutOfMemory}!void {
+ if (recent_coverage_update.items.len == 0) return;
+ const file_sli = selected_source_location.?;
+ var html: std.ArrayListUnmanaged(u8) = .empty;
+ defer html.deinit(gpa);
+ file_sli.fileHtml(&html) catch |err| switch (err) {
+ error.OutOfMemory => |e| return e,
+ error.SourceUnavailable => {},
+ };
+ js.updateSource(html.items.ptr, html.items.len);
+}
+
+var selected_source_location: ?SourceLocationIndex = null;
+
+/// This function is not used directly by `main.js`, but a reference to it is
+/// emitted by `SourceLocationIndex.sourceLocationLinkHtml`.
+export fn fuzzSelectSli(sli: SourceLocationIndex) void {
+ if (!sli.haveCoverage()) return;
+ selected_source_location = sli;
+ updateEntryPoints() catch @panic("out of memory"); // highlights the selected one green
+ updateSource() catch @panic("out of memory");
+ updateCoverage() catch @panic("out of memory");
+}
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const Coverage = std.debug.Coverage;
+const abi = std.Build.abi;
+const assert = std.debug.assert;
+const gpa = std.heap.wasm_allocator;
+
+const Walk = @import("Walk");
+const html_render = @import("html_render");
+
+const nsSince = @import("main.zig").nsSince;
+const Slice = @import("main.zig").Slice;
+const fatal = @import("main.zig").fatal;
+const log = std.log;
+const String = Slice(u8);
diff --git a/lib/build-web/index.html b/lib/build-web/index.html
new file mode 100644
index 000000000000..117ae14c209c
--- /dev/null
+++ b/lib/build-web/index.html
@@ -0,0 +1,202 @@
+
+
+
+Zig Build System
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Files Discovered:
+ Files Analyzed:
+ Generic Instances Analyzed:
+ Inline Calls Analyzed:
+ Compilation Time:
+
+
+
+
+ Pipeline Component
+ CPU Time
+ Sum across all threads of the time spent in this pipeline component
+
+ Real Time
+ Wall-clock time elapsed between the start and end of this compilation phase
+
+ Compilation Phase
+
+
+
+
+ Parsing
+ tokenize converts a file of Zig source code into a sequence of tokens, which are then processed by Parse into an Abstract Syntax Tree (AST).
+
+
+
+ File Lower
+ Tokenization, parsing, and lowering of Zig source files to a high-level IR. Starting from module roots, every file theoretically accessible through a chain of @import calls is processed. Individual source files are processed serially, but different files are processed in parallel by a thread pool. The results of this phase of compilation are cached on disk per source file, meaning the time spent here is typically only relevant to "clean" builds.
+
+
+
+ AST Lowering
+ AstGen converts a file's AST into a high-level SSA IR named Zig Intermediate Representation (ZIR). The resulting ZIR code is cached on disk to avoid, for instance, re-lowering all source files in the Zig standard library each time the compiler is invoked.
+
+
+
+
+ Semantic Analysis
+ Sema interprets ZIR to perform type checking, compile-time code execution, and type resolution, collectively termed "semantic analysis". When a runtime function body is analyzed, it emits Analyzed Intermediate Representation (AIR) code to be sent to the next pipeline component. Semantic analysis is currently entirely single-threaded.
+
+
+
+ Declaration Lower
+ Semantic analysis, code generation, and linking, at the granularity of individual declarations (as opposed to whole source files). These components are run in parallel with one another. Semantic analysis is almost always the bottleneck, as it is complex and currently can only run single-threaded. This phase completes when a work queue empties, but semantic analysis may add work by one declaration referencing another. This is the main phase of compilation, typically taking significantly longer than File Lower (even in a clean build).
+
+
+
+ Code Generation
+ CodeGen converts AIR from Sema into machine instructions in the form of Machine Intermediate Representation (MIR). This work is usually highly parallel, since in most cases, arbitrarily many functions can be run through CodeGen simultaneously.
+
+
+
+
+ Linking
+ link converts MIR from CodeGen, as well as global constants and variables from Sema, and places them in the output binary. MIR is converted to a finished sequence of real instruction bytes. When using the LLVM backend, most of this work is instead deferred to the "LLVM Emit" phase.
+
+
+
+
+
+
+
+ LLVM Emit
+ Only applicable when using the LLVM backend. Conversion of generated LLVM bitcode to an object file, including any optimization passes. When using LLVM, this phase of compilation is typically the slowest by a significant margin. Unfortunately, the Zig compiler implementation has essentially no control over it.
+
+
+
+
+
+
+ Linker Flush
+ Finalizing the emitted binary, and ensuring it is fully written to disk. When using LLD, this phase represents the entire linker invocation. Otherwise, the amount of work performed here is dependent on details of Zig's linker implementation for the particular output format, but typically aims to be fairly minimal.
+
+
+
+
+
+ Files
+
+
+
+ File
+ Semantic Analysis
+ Code Generation
+ Linking
+
+
+
+
+
+
+
+ Declarations
+
+
+
+ File
+ Declaration
+ Analysis Count
+ The number of times the compiler analyzed some part of this declaration. If this is a function, inline and comptime calls to it are not included here. Typically, this value is approximately equal to the number of instances of a generic declaration.
+
+ Semantic Analysis
+ Code Generation
+ Linking
+
+
+
+
+
+
+
+ LLVM Pass Timings
+
+
+
+
+
+
+
+
+
+ Total Runs:
+ Unique Runs:
+ Speed: runs/sec
+ Coverage:
+
+
+
+
+
+
+
+
+Loading JavaScript...
+
+If you are using Firefox and zig build --listen is definitely running, you may be experiencing an unreasonably aggressive exponential
+backoff for WebSocket connection attempts, which is enabled by default and can block connection attempts for up to a minute. To disable this limit,
+open about:config and set the network.websocket.delay-failed-reconnects option to false.
+
+
+ Zig Build System
+
+ | steps
+ Rebuild
+
+
+
+
+
+
+
+
+
+ Help
+ This is the Zig Build System web interface. It allows live interaction with the build system.
+ The following zig build flags can expose extra features of this interface:
+
+ --time-report: collect and show statistics about the time taken to evaluate a build graph
+ --fuzz: enable the fuzzer for any Zig test binaries in the build graph (experimental)
+
+
+
+
+
+
diff --git a/lib/build-web/main.js b/lib/build-web/main.js
new file mode 100644
index 000000000000..481c33f024ba
--- /dev/null
+++ b/lib/build-web/main.js
@@ -0,0 +1,346 @@
+const domConnectionStatus = document.getElementById("connectionStatus");
+const domFirefoxWebSocketBullshitExplainer = document.getElementById("firefoxWebSocketBullshitExplainer");
+
+const domMain = document.getElementsByTagName("main")[0];
+const domSummary = {
+ stepCount: document.getElementById("summaryStepCount"),
+ status: document.getElementById("summaryStatus"),
+};
+const domButtonRebuild = document.getElementById("buttonRebuild");
+const domStepList = document.getElementById("stepList");
+let domSteps = [];
+
+let wasm_promise = fetch("main.wasm");
+let wasm_exports = null;
+
+const text_decoder = new TextDecoder();
+const text_encoder = new TextEncoder();
+
+domButtonRebuild.addEventListener("click", () => wasm_exports.rebuild());
+
+setConnectionStatus("Loading WebAssembly...", false);
+WebAssembly.instantiateStreaming(wasm_promise, {
+ core: {
+ log: function(ptr, len) {
+ const msg = decodeString(ptr, len);
+ console.log(msg);
+ },
+ panic: function (ptr, len) {
+ const msg = decodeString(ptr, len);
+ throw new Error("panic: " + msg);
+ },
+ timestamp: function () {
+ return BigInt(new Date());
+ },
+ hello: hello,
+ updateBuildStatus: updateBuildStatus,
+ updateStepStatus: updateStepStatus,
+ sendWsMessage: (ptr, len) => ws.send(new Uint8Array(wasm_exports.memory.buffer, ptr, len)),
+ },
+ fuzz: {
+ requestSources: fuzzRequestSources,
+ ready: fuzzReady,
+ updateStats: fuzzUpdateStats,
+ updateEntryPoints: fuzzUpdateEntryPoints,
+ updateSource: fuzzUpdateSource,
+ updateCoverage: fuzzUpdateCoverage,
+ },
+ time_report: {
+ updateCompile: timeReportUpdateCompile,
+ updateGeneric: timeReportUpdateGeneric,
+ },
+}).then(function(obj) {
+ setConnectionStatus("Connecting to WebSocket...", true);
+ connectWebSocket();
+
+ wasm_exports = obj.instance.exports;
+ window.wasm = obj; // for debugging
+});
+
+function connectWebSocket() {
+ const host = document.location.host;
+ const pathname = document.location.pathname;
+ const isHttps = document.location.protocol === 'https:';
+ const match = host.match(/^(.+):(\d+)$/);
+ const defaultPort = isHttps ? 443 : 80;
+ const port = match ? parseInt(match[2], 10) : defaultPort;
+ const hostName = match ? match[1] : host;
+ const wsProto = isHttps ? "wss:" : "ws:";
+ const wsUrl = wsProto + '//' + hostName + ':' + port + pathname;
+ ws = new WebSocket(wsUrl);
+ ws.binaryType = "arraybuffer";
+ ws.addEventListener('message', onWebSocketMessage, false);
+ ws.addEventListener('error', onWebSocketClose, false);
+ ws.addEventListener('close', onWebSocketClose, false);
+ ws.addEventListener('open', onWebSocketOpen, false);
+}
+function onWebSocketOpen() {
+ setConnectionStatus("Waiting for data...", false);
+}
+function onWebSocketMessage(ev) {
+ const jsArray = new Uint8Array(ev.data);
+ const ptr = wasm_exports.message_begin(jsArray.length);
+ const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, jsArray.length);
+ wasmArray.set(jsArray);
+ wasm_exports.message_end();
+}
+function onWebSocketClose() {
+ setConnectionStatus("WebSocket connection closed. Re-connecting...", true);
+ ws.removeEventListener('message', onWebSocketMessage, false);
+ ws.removeEventListener('error', onWebSocketClose, false);
+ ws.removeEventListener('close', onWebSocketClose, false);
+ ws.removeEventListener('open', onWebSocketOpen, false);
+ ws = null;
+ setTimeout(connectWebSocket, 1000);
+}
+
+function setConnectionStatus(msg, is_websocket_connect) {
+ domConnectionStatus.textContent = msg;
+ if (msg.length > 0) {
+ domConnectionStatus.classList.remove("hidden");
+ domMain.classList.add("hidden");
+ } else {
+ domConnectionStatus.classList.add("hidden");
+ domMain.classList.remove("hidden");
+ }
+ if (is_websocket_connect) {
+ domFirefoxWebSocketBullshitExplainer.classList.remove("hidden");
+ } else {
+ domFirefoxWebSocketBullshitExplainer.classList.add("hidden");
+ }
+}
+
+function hello(
+ steps_len,
+ build_status,
+ time_report,
+) {
+ domSummary.stepCount.textContent = steps_len;
+ updateBuildStatus(build_status);
+ setConnectionStatus("", false);
+
+ {
+ let entries = [];
+ for (let i = 0; i < steps_len; i += 1) {
+ const step_name = unwrapString(wasm_exports.stepName(i));
+ const code = document.createElement("code");
+ code.textContent = step_name;
+ const li = document.createElement("li");
+ li.appendChild(code);
+ entries.push(li);
+ }
+ domStepList.replaceChildren(...entries);
+ for (let i = 0; i < steps_len; i += 1) {
+ updateStepStatus(i);
+ }
+ }
+
+ if (time_report) timeReportReset(steps_len);
+ fuzzReset();
+}
+
+function updateBuildStatus(s) {
+ let text;
+ let active = false;
+ let reset_time_reports = false;
+ if (s == 0) {
+ text = "Idle";
+ } else if (s == 1) {
+ text = "Watching for changes...";
+ } else if (s == 2) {
+ text = "Running...";
+ active = true;
+ reset_time_reports = true;
+ } else if (s == 3) {
+ text = "Starting fuzzer...";
+ active = true;
+ } else {
+ console.log(`bad build status: ${s}`);
+ }
+ domSummary.status.textContent = text;
+ if (active) {
+ domSummary.status.classList.add("status-running");
+ domSummary.status.classList.remove("status-idle");
+ domButtonRebuild.disabled = true;
+ } else {
+ domSummary.status.classList.remove("status-running");
+ domSummary.status.classList.add("status-idle");
+ domButtonRebuild.disabled = false;
+ }
+ if (reset_time_reports) {
+ // Grey out and collapse all the time reports
+ for (const time_report_host of domTimeReportList.children) {
+ const details = time_report_host.shadowRoot.querySelector(":host > details");
+ details.classList.add("pending");
+ details.open = false;
+ }
+ }
+}
+function updateStepStatus(step_idx) {
+ const li = domStepList.children[step_idx];
+ const step_status = wasm_exports.stepStatus(step_idx);
+ li.classList.remove("step-wip", "step-success", "step-failure");
+ if (step_status == 0) {
+ // pending
+ } else if (step_status == 1) {
+ li.classList.add("step-wip");
+ } else if (step_status == 2) {
+ li.classList.add("step-success");
+ } else if (step_status == 3) {
+ li.classList.add("step-failure");
+ } else {
+ console.log(`bad step status: ${step_status}`);
+ }
+}
+
+function decodeString(ptr, len) {
+ if (len === 0) return "";
+ return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
+}
+function getU32Array(ptr, len) {
+ if (len === 0) return new Uint32Array();
+ return new Uint32Array(wasm_exports.memory.buffer, ptr, len);
+}
+function unwrapString(bigint) {
+ const ptr = Number(bigint & 0xffffffffn);
+ const len = Number(bigint >> 32n);
+ return decodeString(ptr, len);
+}
+
+const time_report_entry_template = document.getElementById("timeReportEntryTemplate").content;
+const domTimeReport = document.getElementById("timeReport");
+const domTimeReportList = document.getElementById("timeReportList");
+function timeReportReset(steps_len) {
+ let entries = [];
+ for (let i = 0; i < steps_len; i += 1) {
+ const step_name = unwrapString(wasm_exports.stepName(i));
+ const host = document.createElement("div");
+ const shadow = host.attachShadow({ mode: "open" });
+ shadow.appendChild(time_report_entry_template.cloneNode(true));
+ shadow.querySelector(":host > details").classList.add("pending");
+ const slotted_name = document.createElement("code");
+ slotted_name.setAttribute("slot", "step-name");
+ slotted_name.textContent = step_name;
+ host.appendChild(slotted_name);
+ entries.push(host);
+ }
+ domTimeReportList.replaceChildren(...entries);
+ domTimeReport.classList.remove("hidden");
+}
+function timeReportUpdateCompile(
+ step_idx,
+ inner_html_ptr,
+ inner_html_len,
+ file_table_html_ptr,
+ file_table_html_len,
+ decl_table_html_ptr,
+ decl_table_html_len,
+ use_llvm,
+) {
+ const inner_html = decodeString(inner_html_ptr, inner_html_len);
+ const file_table_html = decodeString(file_table_html_ptr, file_table_html_len);
+ const decl_table_html = decodeString(decl_table_html_ptr, decl_table_html_len);
+
+ const host = domTimeReportList.children.item(step_idx);
+ const shadow = host.shadowRoot;
+
+ shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
+
+ shadow.getElementById("genericReport").classList.add("hidden");
+ shadow.getElementById("compileReport").classList.remove("hidden");
+
+ if (!use_llvm) shadow.querySelector(":host > details").classList.add("no-llvm");
+ host.innerHTML = inner_html;
+ shadow.getElementById("fileTableBody").innerHTML = file_table_html;
+ shadow.getElementById("declTableBody").innerHTML = decl_table_html;
+}
+function timeReportUpdateGeneric(
+ step_idx,
+ inner_html_ptr,
+ inner_html_len,
+) {
+ const inner_html = decodeString(inner_html_ptr, inner_html_len);
+ const host = domTimeReportList.children.item(step_idx);
+ const shadow = host.shadowRoot;
+ shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
+ shadow.getElementById("genericReport").classList.remove("hidden");
+ shadow.getElementById("compileReport").classList.add("hidden");
+ host.innerHTML = inner_html;
+}
+
+const fuzz_entry_template = document.getElementById("fuzzEntryTemplate").content;
+const domFuzz = document.getElementById("fuzz");
+const domFuzzStatus = document.getElementById("fuzzStatus");
+const domFuzzEntries = document.getElementById("fuzzEntries");
+let domFuzzInstance = null;
+function fuzzRequestSources() {
+ domFuzzStatus.classList.remove("hidden");
+ domFuzzStatus.textContent = "Loading sources tarball...";
+ fetch("sources.tar").then(function(response) {
+ if (!response.ok) throw new Error("unable to download sources");
+ domFuzzStatus.textContent = "Parsing fuzz test sources...";
+ return response.arrayBuffer();
+ }).then(function(buffer) {
+ if (buffer.length === 0) throw new Error("sources.tar was empty");
+ const js_array = new Uint8Array(buffer);
+ const ptr = wasm_exports.alloc(js_array.length);
+ const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
+ wasm_array.set(js_array);
+ wasm_exports.fuzzUnpackSources(ptr, js_array.length);
+ domFuzzStatus.textContent = "";
+ domFuzzStatus.classList.add("hidden");
+ });
+}
+function fuzzReady() {
+ domFuzz.classList.remove("hidden");
+
+ // TODO: multiple fuzzer instances
+ if (domFuzzInstance !== null) return;
+
+ const host = document.createElement("div");
+ const shadow = host.attachShadow({ mode: "open" });
+ shadow.appendChild(fuzz_entry_template.cloneNode(true));
+
+ domFuzzInstance = host;
+ domFuzzEntries.appendChild(host);
+}
+function fuzzReset() {
+ domFuzz.classList.add("hidden");
+ domFuzzEntries.replaceChildren();
+ domFuzzInstance = null;
+}
+function fuzzUpdateStats(stats_html_ptr, stats_html_len) {
+ if (domFuzzInstance === null) throw new Error("fuzzUpdateStats called when fuzzer inactive");
+ const stats_html = decodeString(stats_html_ptr, stats_html_len);
+ const host = domFuzzInstance;
+ host.innerHTML = stats_html;
+}
+function fuzzUpdateEntryPoints(entry_points_html_ptr, entry_points_html_len) {
+ if (domFuzzInstance === null) throw new Error("fuzzUpdateEntryPoints called when fuzzer inactive");
+ const entry_points_html = decodeString(entry_points_html_ptr, entry_points_html_len);
+ const domEntryPointList = domFuzzInstance.shadowRoot.getElementById("entryPointList");
+ domEntryPointList.innerHTML = entry_points_html;
+}
+function fuzzUpdateSource(source_html_ptr, source_html_len) {
+ if (domFuzzInstance === null) throw new Error("fuzzUpdateSource called when fuzzer inactive");
+ const source_html = decodeString(source_html_ptr, source_html_len);
+ const domSourceText = domFuzzInstance.shadowRoot.getElementById("sourceText");
+ domSourceText.innerHTML = source_html;
+ domFuzzInstance.shadowRoot.getElementById("source").classList.remove("hidden");
+}
+function fuzzUpdateCoverage(covered_ptr, covered_len) {
+ if (domFuzzInstance === null) throw new Error("fuzzUpdateCoverage called when fuzzer inactive");
+ const shadow = domFuzzInstance.shadowRoot;
+ const domSourceText = shadow.getElementById("sourceText");
+ const covered = getU32Array(covered_ptr, covered_len);
+ for (let i = 0; i < domSourceText.children.length; i += 1) {
+ const childDom = domSourceText.children[i];
+ if (childDom.id != null && childDom.id[0] == "l") {
+ childDom.classList.add("l");
+ childDom.classList.remove("c");
+ }
+ }
+ for (const sli of covered) {
+ shadow.getElementById(`l${sli}`).classList.add("c");
+ }
+}
diff --git a/lib/build-web/main.zig b/lib/build-web/main.zig
new file mode 100644
index 000000000000..e971f6de48e7
--- /dev/null
+++ b/lib/build-web/main.zig
@@ -0,0 +1,213 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const abi = std.Build.abi;
+const gpa = std.heap.wasm_allocator;
+const log = std.log;
+const Allocator = std.mem.Allocator;
+
+const fuzz = @import("fuzz.zig");
+const time_report = @import("time_report.zig");
+
+/// Nanoseconds.
+var server_base_timestamp: i64 = 0;
+/// Milliseconds.
+var client_base_timestamp: i64 = 0;
+
+pub var step_list: []Step = &.{};
+/// Not accessed after initialization, but must be freed alongside `step_list`.
+pub var step_list_data: []u8 = &.{};
+
+const Step = struct {
+ name: []const u8,
+ status: abi.StepUpdate.Status,
+};
+
+const js = struct {
+ extern "core" fn log(ptr: [*]const u8, len: usize) void;
+ extern "core" fn panic(ptr: [*]const u8, len: usize) noreturn;
+ extern "core" fn timestamp() i64;
+ extern "core" fn hello(
+ steps_len: u32,
+ status: abi.BuildStatus,
+ time_report: bool,
+ ) void;
+ extern "core" fn updateBuildStatus(status: abi.BuildStatus) void;
+ extern "core" fn updateStepStatus(step_idx: u32) void;
+ extern "core" fn sendWsMessage(ptr: [*]const u8, len: usize) void;
+};
+
+pub const std_options: std.Options = .{
+ .logFn = logFn,
+};
+
+pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noreturn {
+ _ = st;
+ _ = addr;
+ log.err("panic: {s}", .{msg});
+ @trap();
+}
+
+fn logFn(
+ comptime message_level: log.Level,
+ comptime scope: @TypeOf(.enum_literal),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ const level_txt = comptime message_level.asText();
+ const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
+ var buf: [500]u8 = undefined;
+ const line = std.fmt.bufPrint(&buf, level_txt ++ prefix2 ++ format, args) catch l: {
+ buf[buf.len - 3 ..][0..3].* = "...".*;
+ break :l &buf;
+ };
+ js.log(line.ptr, line.len);
+}
+
+export fn alloc(n: usize) [*]u8 {
+ const slice = gpa.alloc(u8, n) catch @panic("OOM");
+ return slice.ptr;
+}
+
+var message_buffer: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
+
+/// Resizes the message buffer to be the correct length; returns the pointer to
+/// the query string.
+export fn message_begin(len: usize) [*]u8 {
+ message_buffer.resize(gpa, len) catch @panic("OOM");
+ return message_buffer.items.ptr;
+}
+
+export fn message_end() void {
+ const msg_bytes = message_buffer.items;
+
+ const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]);
+ switch (tag) {
+ _ => @panic("malformed message"),
+
+ .hello => return helloMessage(msg_bytes) catch @panic("OOM"),
+ .status_update => return statusUpdateMessage(msg_bytes) catch @panic("OOM"),
+ .step_update => return stepUpdateMessage(msg_bytes) catch @panic("OOM"),
+
+ .fuzz_source_index => return fuzz.sourceIndexMessage(msg_bytes) catch @panic("OOM"),
+ .fuzz_coverage_update => return fuzz.coverageUpdateMessage(msg_bytes) catch @panic("OOM"),
+ .fuzz_entry_points => return fuzz.entryPointsMessage(msg_bytes) catch @panic("OOM"),
+
+ .time_report_generic_result => return time_report.genericResultMessage(msg_bytes) catch @panic("OOM"),
+ .time_report_compile_result => return time_report.compileResultMessage(msg_bytes) catch @panic("OOM"),
+ }
+}
+
+const String = Slice(u8);
+
+pub fn Slice(T: type) type {
+ return packed struct(u64) {
+ ptr: u32,
+ len: u32,
+
+ pub fn init(s: []const T) @This() {
+ return .{
+ .ptr = @intFromPtr(s.ptr),
+ .len = s.len,
+ };
+ }
+ };
+}
+
+pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
+ var buf: [500]u8 = undefined;
+ const line = std.fmt.bufPrint(&buf, format, args) catch l: {
+ buf[buf.len - 3 ..][0..3].* = "...".*;
+ break :l &buf;
+ };
+ js.panic(line.ptr, line.len);
+}
+
+fn helloMessage(msg_bytes: []align(4) u8) Allocator.Error!void {
+ if (msg_bytes.len < @sizeOf(abi.Hello)) @panic("malformed Hello message");
+ const hdr: *const abi.Hello = @ptrCast(msg_bytes[0..@sizeOf(abi.Hello)]);
+ const trailing = msg_bytes[@sizeOf(abi.Hello)..];
+
+ client_base_timestamp = js.timestamp();
+ server_base_timestamp = hdr.timestamp;
+
+ const steps = try gpa.alloc(Step, hdr.steps_len);
+ errdefer gpa.free(steps);
+
+ const step_name_lens: []align(1) const u32 = @ptrCast(trailing[0 .. steps.len * 4]);
+
+ const step_name_data_len: usize = len: {
+ var sum: usize = 0;
+ for (step_name_lens) |n| sum += n;
+ break :len sum;
+ };
+ const step_name_data: []const u8 = trailing[steps.len * 4 ..][0..step_name_data_len];
+ const step_status_bits: []const u8 = trailing[steps.len * 4 + step_name_data_len ..];
+
+ const duped_step_name_data = try gpa.dupe(u8, step_name_data);
+ errdefer gpa.free(duped_step_name_data);
+
+ var name_off: usize = 0;
+ for (steps, step_name_lens, 0..) |*step_out, name_len, step_idx| {
+ step_out.* = .{
+ .name = duped_step_name_data[name_off..][0..name_len],
+ .status = @enumFromInt(@as(u2, @truncate(step_status_bits[step_idx / 4] >> @intCast((step_idx % 4) * 2)))),
+ };
+ name_off += name_len;
+ }
+
+ gpa.free(step_list);
+ gpa.free(step_list_data);
+ step_list = steps;
+ step_list_data = duped_step_name_data;
+
+ js.hello(step_list.len, hdr.status, hdr.flags.time_report);
+}
+fn statusUpdateMessage(msg_bytes: []u8) Allocator.Error!void {
+ if (msg_bytes.len < @sizeOf(abi.StatusUpdate)) @panic("malformed StatusUpdate message");
+ const msg: *const abi.StatusUpdate = @ptrCast(msg_bytes[0..@sizeOf(abi.StatusUpdate)]);
+ js.updateBuildStatus(msg.new);
+}
+fn stepUpdateMessage(msg_bytes: []u8) Allocator.Error!void {
+ if (msg_bytes.len < @sizeOf(abi.StepUpdate)) @panic("malformed StepUpdate message");
+ const msg: *const abi.StepUpdate = @ptrCast(msg_bytes[0..@sizeOf(abi.StepUpdate)]);
+ if (msg.step_idx >= step_list.len) @panic("malformed StepUpdate message");
+ step_list[msg.step_idx].status = msg.bits.status;
+ js.updateStepStatus(msg.step_idx);
+}
+
+export fn stepName(idx: usize) String {
+ return .init(step_list[idx].name);
+}
+export fn stepStatus(idx: usize) u8 {
+ return @intFromEnum(step_list[idx].status);
+}
+
+export fn rebuild() void {
+ const msg: abi.Rebuild = .{};
+ const raw: []const u8 = @ptrCast(&msg);
+ js.sendWsMessage(raw.ptr, raw.len);
+}
+
+/// Nanoseconds passed since a server timestamp.
+pub fn nsSince(server_timestamp: i64) i64 {
+ const ms_passed = js.timestamp() - client_base_timestamp;
+ const ns_passed = server_base_timestamp - server_timestamp;
+ return ns_passed + ms_passed * std.time.ns_per_ms;
+}
+
+pub fn fmtEscapeHtml(unescaped: []const u8) HtmlEscaper {
+ return .{ .unescaped = unescaped };
+}
+const HtmlEscaper = struct {
+ unescaped: []const u8,
+ pub fn format(he: HtmlEscaper, w: *std.Io.Writer) !void {
+ for (he.unescaped) |c| switch (c) {
+ '&' => try w.writeAll("&"),
+ '<' => try w.writeAll("<"),
+ '>' => try w.writeAll(">"),
+ '"' => try w.writeAll("""),
+ '\'' => try w.writeAll("'"),
+ else => try w.writeByte(c),
+ };
+ }
+};
diff --git a/lib/build-web/style.css b/lib/build-web/style.css
new file mode 100644
index 000000000000..eb89ced52707
--- /dev/null
+++ b/lib/build-web/style.css
@@ -0,0 +1,240 @@
+body {
+ font-family: system-ui, -apple-system, Roboto, "Segoe UI", sans-serif;
+ color: #000000;
+ padding: 1em 10%;
+}
+ul.no-marker {
+ list-style-type: none;
+ padding-left: 0;
+}
+hr {
+ margin: 2em 0;
+}
+.hidden {
+ display: none;
+}
+.empty-cell {
+ background: #ccc;
+}
+table.time-stats > tbody > tr > th {
+ text-align: left;
+}
+table.time-stats > tbody > tr > td {
+ text-align: right;
+}
+details > summary {
+ cursor: pointer;
+ font-size: 1.5em;
+}
+.tooltip {
+ text-decoration: underline;
+ cursor: help;
+}
+.tooltip-content {
+ border-radius: 6px;
+ display: none;
+ position: absolute;
+ background: #fff;
+ border: 1px solid black;
+ max-width: 500px;
+ padding: 1em;
+ text-align: left;
+ font-weight: normal;
+ pointer-events: none;
+}
+.tooltip:hover > .tooltip-content {
+ display: block;
+}
+table {
+ margin: 1.0em auto 1.5em 0;
+ border-collapse: collapse;
+}
+th, td {
+ padding: 0.5em 1em 0.5em 1em;
+ border: 1px solid;
+ border-color: black;
+}
+a, button {
+ color: #2A6286;
+}
+button {
+ background: #eee;
+ cursor: pointer;
+ border: none;
+ border-radius: 3px;
+ padding: 0.2em 0.5em;
+}
+button.big-btn {
+ font-size: 1.3em;
+}
+button.linkish {
+ background: none;
+ text-decoration: underline;
+ padding: 0;
+}
+button:disabled {
+ color: #888;
+ cursor: not-allowed;
+}
+pre {
+ font-family: "Source Code Pro", monospace;
+ font-size: 1em;
+ background-color: #F5F5F5;
+ padding: 1em;
+ margin: 0;
+ overflow-x: auto;
+}
+:not(pre) > code {
+ white-space: break-spaces;
+}
+code {
+ font-family: "Source Code Pro", monospace;
+ font-size: 0.9em;
+}
+code a {
+ color: #000000;
+}
+kbd {
+ color: #000;
+ background-color: #fafbfc;
+ border-color: #d1d5da;
+ border-bottom-color: #c6cbd1;
+ box-shadow-color: #c6cbd1;
+ display: inline-block;
+ padding: 0.3em 0.2em;
+ font: 1.2em monospace;
+ line-height: 0.8em;
+ vertical-align: middle;
+ border: solid 1px;
+ border-radius: 3px;
+ box-shadow: inset 0 -1px 0;
+ cursor: default;
+}
+.status-running { color: #181; }
+.status-idle { color: #444; }
+.step-success { color: #181; }
+.step-failure { color: #d11; }
+.step-wip::before {
+ content: '';
+ position: absolute;
+ margin-left: -1.5em;
+ width: 1em;
+ text-align: center;
+ animation-name: spinner;
+ animation-duration: 0.5s;
+ animation-iteration-count: infinite;
+ animation-timing-function: step-start;
+}
+@keyframes spinner {
+ 0% { content: '|'; }
+ 25% { content: '/'; }
+ 50% { content: '-'; }
+ 75% { content: '\\'; }
+ 100% { content: '|'; }
+}
+
+.l {
+ display: inline-block;
+ background: red;
+ width: 1em;
+ height: 1em;
+ border-radius: 1em;
+}
+.c {
+ background-color: green;
+}
+
+.tok-kw {
+ color: #333;
+ font-weight: bold;
+}
+.tok-str {
+ color: #d14;
+}
+.tok-builtin {
+ color: #0086b3;
+}
+.tok-comment {
+ color: #777;
+ font-style: italic;
+}
+.tok-fn {
+ color: #900;
+ font-weight: bold;
+}
+.tok-null {
+ color: #008080;
+}
+.tok-number {
+ color: #008080;
+}
+.tok-type {
+ color: #458;
+ font-weight: bold;
+}
+
+@media (prefers-color-scheme: dark) {
+ body {
+ background-color: #111;
+ color: #ddd;
+ }
+ pre {
+ background-color: #222;
+ }
+ a, button {
+ color: #88f;
+ }
+ button {
+ background: #333;
+ }
+ button:disabled {
+ color: #555;
+ }
+ code a {
+ color: #eee;
+ }
+ th, td {
+ border-color: white;
+ }
+ .empty-cell {
+ background: #000;
+ }
+ .tooltip-content {
+ background: #060606;
+ border-color: white;
+ }
+ .status-running { color: #90ee90; }
+ .status-idle { color: #bbb; }
+ .step-success { color: #90ee90; }
+ .step-failure { color: #f66; }
+ .l {
+ background-color: red;
+ }
+ .c {
+ background-color: green;
+ }
+ .tok-kw {
+ color: #eee;
+ }
+ .tok-str {
+ color: #2e5;
+ }
+ .tok-builtin {
+ color: #ff894c;
+ }
+ .tok-comment {
+ color: #aa7;
+ }
+ .tok-fn {
+ color: #B1A0F8;
+ }
+ .tok-null {
+ color: #ff8080;
+ }
+ .tok-number {
+ color: #ff8080;
+ }
+ .tok-type {
+ color: #68f;
+ }
+}
diff --git a/lib/build-web/time_report.css b/lib/build-web/time_report.css
new file mode 100644
index 000000000000..27a7c4b02238
--- /dev/null
+++ b/lib/build-web/time_report.css
@@ -0,0 +1,43 @@
+:host > details {
+ padding: 0.5em 1em;
+ background: #f2f2f2;
+ margin-bottom: 1.0em;
+ overflow-x: scroll;
+}
+:host > details.pending {
+ pointer-events: none;
+ background: #fafafa;
+ color: #666;
+}
+:host > details > div {
+ margin: 1em 2em;
+ overflow: scroll; /* we'll try to avoid overflow, but if it does happen, this makes sense */
+}
+.stats {
+ font-size: 1.2em;
+}
+details.section {
+ margin: 1.0em 0 0 0;
+}
+details.section > summary {
+ font-weight: bold;
+}
+details.section > :not(summary) {
+ margin-left: 2em;
+}
+:host > details.no-llvm .llvm-only {
+ display: none;
+}
+@media (prefers-color-scheme: dark) {
+ :host > details {
+ background: #222;
+ }
+ :host > details.pending {
+ background: #181818;
+ color: #888;
+ }
+}
+th {
+ max-width: 20em; /* don't let the 'file' column get crazy long */
+ overflow-wrap: anywhere; /* avoid overflow where possible */
+}
diff --git a/lib/build-web/time_report.zig b/lib/build-web/time_report.zig
new file mode 100644
index 000000000000..de5f9abd0c4a
--- /dev/null
+++ b/lib/build-web/time_report.zig
@@ -0,0 +1,234 @@
+const std = @import("std");
+const gpa = std.heap.wasm_allocator;
+const abi = std.Build.abi.time_report;
+const fmtEscapeHtml = @import("root").fmtEscapeHtml;
+const step_list = &@import("root").step_list;
+
+const js = struct {
+ extern "time_report" fn updateGeneric(
+ /// The index of the step.
+ step_idx: u32,
+ // The HTML which will be used to populate the template slots.
+ inner_html_ptr: [*]const u8,
+ inner_html_len: usize,
+ ) void;
+ extern "time_report" fn updateCompile(
+ /// The index of the step.
+ step_idx: u32,
+ // The HTML which will be used to populate the template slots.
+ inner_html_ptr: [*]const u8,
+ inner_html_len: usize,
+ // The HTML which will populate the of the file table.
+ file_table_html_ptr: [*]const u8,
+ file_table_html_len: usize,
+ // The HTML which will populate the of the decl table.
+ decl_table_html_ptr: [*]const u8,
+ decl_table_html_len: usize,
+ /// Whether the LLVM backend was used. If not, LLVM-specific statistics are hidden.
+ use_llvm: bool,
+ ) void;
+};
+
+pub fn genericResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
+ if (msg_bytes.len != @sizeOf(abi.GenericResult)) @panic("malformed GenericResult message");
+ const msg: *const abi.GenericResult = @ptrCast(msg_bytes);
+ if (msg.step_idx >= step_list.*.len) @panic("malformed GenericResult message");
+ const inner_html = try std.fmt.allocPrint(gpa,
+ \\{[step_name]f}
+ \\{[stat_total_time]D}
+ , .{
+ .step_name = fmtEscapeHtml(step_list.*[msg.step_idx].name),
+ .stat_total_time = msg.ns_total,
+ });
+ defer gpa.free(inner_html);
+ js.updateGeneric(msg.step_idx, inner_html.ptr, inner_html.len);
+}
+
+pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
+ const max_table_rows = 500;
+
+ if (msg_bytes.len < @sizeOf(abi.CompileResult)) @panic("malformed CompileResult message");
+ const hdr: *const abi.CompileResult = @ptrCast(msg_bytes[0..@sizeOf(abi.CompileResult)]);
+ if (hdr.step_idx >= step_list.*.len) @panic("malformed CompileResult message");
+ var trailing = msg_bytes[@sizeOf(abi.CompileResult)..];
+
+ const llvm_pass_timings = trailing[0..hdr.llvm_pass_timings_len];
+ trailing = trailing[hdr.llvm_pass_timings_len..];
+
+ const FileTimeReport = struct {
+ name: []const u8,
+ ns_sema: u64,
+ ns_codegen: u64,
+ ns_link: u64,
+ };
+ const DeclTimeReport = struct {
+ file_name: []const u8,
+ name: []const u8,
+ sema_count: u32,
+ ns_sema: u64,
+ ns_codegen: u64,
+ ns_link: u64,
+ };
+
+ const slowest_files = try gpa.alloc(FileTimeReport, hdr.files_len);
+ defer gpa.free(slowest_files);
+
+ const slowest_decls = try gpa.alloc(DeclTimeReport, hdr.decls_len);
+ defer gpa.free(slowest_decls);
+
+ for (slowest_files) |*file_out| {
+ const i = std.mem.indexOfScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message");
+ file_out.* = .{
+ .name = trailing[0..i],
+ .ns_sema = 0,
+ .ns_codegen = 0,
+ .ns_link = 0,
+ };
+ trailing = trailing[i + 1 ..];
+ }
+
+ for (slowest_decls) |*decl_out| {
+ const i = std.mem.indexOfScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message");
+ const file_idx = std.mem.readInt(u32, trailing[i..][1..5], .little);
+ const sema_count = std.mem.readInt(u32, trailing[i..][5..9], .little);
+ const sema_ns = std.mem.readInt(u64, trailing[i..][9..17], .little);
+ const codegen_ns = std.mem.readInt(u64, trailing[i..][17..25], .little);
+ const link_ns = std.mem.readInt(u64, trailing[i..][25..33], .little);
+ const file = &slowest_files[file_idx];
+ decl_out.* = .{
+ .file_name = file.name,
+ .name = trailing[0..i],
+ .sema_count = sema_count,
+ .ns_sema = sema_ns,
+ .ns_codegen = codegen_ns,
+ .ns_link = link_ns,
+ };
+ trailing = trailing[i + 33 ..];
+ file.ns_sema += sema_ns;
+ file.ns_codegen += codegen_ns;
+ file.ns_link += link_ns;
+ }
+
+ const S = struct {
+ fn fileLessThan(_: void, lhs: FileTimeReport, rhs: FileTimeReport) bool {
+ const lhs_ns = lhs.ns_sema + lhs.ns_codegen + lhs.ns_link;
+ const rhs_ns = rhs.ns_sema + rhs.ns_codegen + rhs.ns_link;
+ return lhs_ns > rhs_ns; // flipped to sort in reverse order
+ }
+ fn declLessThan(_: void, lhs: DeclTimeReport, rhs: DeclTimeReport) bool {
+ //if (true) return lhs.sema_count > rhs.sema_count;
+ const lhs_ns = lhs.ns_sema + lhs.ns_codegen + lhs.ns_link;
+ const rhs_ns = rhs.ns_sema + rhs.ns_codegen + rhs.ns_link;
+ return lhs_ns > rhs_ns; // flipped to sort in reverse order
+ }
+ };
+ std.mem.sort(FileTimeReport, slowest_files, {}, S.fileLessThan);
+ std.mem.sort(DeclTimeReport, slowest_decls, {}, S.declLessThan);
+
+ const stats = hdr.stats;
+ const inner_html = try std.fmt.allocPrint(gpa,
+ \\{[step_name]f}
+ \\{[stat_reachable_files]d}
+ \\{[stat_imported_files]d}
+ \\{[stat_generic_instances]d}
+ \\{[stat_inline_calls]d}
+ \\{[stat_compilation_time]D}
+ \\{[cpu_time_parse]D}
+ \\{[cpu_time_astgen]D}
+ \\{[cpu_time_sema]D}
+ \\{[cpu_time_codegen]D}
+ \\{[cpu_time_link]D}
+ \\{[real_time_files]D}
+ \\{[real_time_decls]D}
+ \\{[real_time_llvm_emit]D}
+ \\{[real_time_link_flush]D}
+ \\{[llvm_pass_timings]f}
+ \\
+ , .{
+ .step_name = fmtEscapeHtml(step_list.*[hdr.step_idx].name),
+ .stat_reachable_files = stats.n_reachable_files,
+ .stat_imported_files = stats.n_imported_files,
+ .stat_generic_instances = stats.n_generic_instances,
+ .stat_inline_calls = stats.n_inline_calls,
+ .stat_compilation_time = hdr.ns_total,
+
+ .cpu_time_parse = stats.cpu_ns_parse,
+ .cpu_time_astgen = stats.cpu_ns_astgen,
+ .cpu_time_sema = stats.cpu_ns_sema,
+ .cpu_time_codegen = stats.cpu_ns_codegen,
+ .cpu_time_link = stats.cpu_ns_link,
+ .real_time_files = stats.real_ns_files,
+ .real_time_decls = stats.real_ns_decls,
+ .real_time_llvm_emit = stats.real_ns_llvm_emit,
+ .real_time_link_flush = stats.real_ns_link_flush,
+
+ .llvm_pass_timings = fmtEscapeHtml(llvm_pass_timings),
+ });
+ defer gpa.free(inner_html);
+
+ var file_table_html: std.ArrayListUnmanaged(u8) = .empty;
+ defer file_table_html.deinit(gpa);
+ for (slowest_files[0..@min(max_table_rows, slowest_files.len)]) |file| {
+ try file_table_html.writer(gpa).print(
+ \\
+ \\ {f}
+ \\ {D}
+ \\ {D}
+ \\ {D}
+ \\
+ \\
+ , .{
+ fmtEscapeHtml(file.name),
+ file.ns_sema,
+ file.ns_codegen,
+ file.ns_link,
+ });
+ }
+ if (slowest_files.len > max_table_rows) {
+ try file_table_html.writer(gpa).print(
+ \\{d} more rows omitted
+ \\
+ , .{slowest_files.len - max_table_rows});
+ }
+
+ var decl_table_html: std.ArrayListUnmanaged(u8) = .empty;
+ defer decl_table_html.deinit(gpa);
+
+ for (slowest_decls[0..@min(max_table_rows, slowest_decls.len)]) |decl| {
+ try decl_table_html.writer(gpa).print(
+ \\
+ \\ {f}
+ \\ {f}
+ \\ {d}
+ \\ {D}
+ \\ {D}
+ \\ {D}
+ \\
+ \\
+ , .{
+ fmtEscapeHtml(decl.file_name),
+ fmtEscapeHtml(decl.name),
+ decl.sema_count,
+ decl.ns_sema,
+ decl.ns_codegen,
+ decl.ns_link,
+ });
+ }
+ if (slowest_decls.len > max_table_rows) {
+ try decl_table_html.writer(gpa).print(
+ \\{d} more rows omitted
+ \\
+ , .{slowest_decls.len - max_table_rows});
+ }
+
+ js.updateCompile(
+ hdr.step_idx,
+ inner_html.ptr,
+ inner_html.len,
+ file_table_html.items.ptr,
+ file_table_html.items.len,
+ decl_table_html.items.ptr,
+ decl_table_html.items.len,
+ hdr.flags.use_llvm,
+ );
+}
diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig
index 6b7266ee710b..2e84309ccd49 100644
--- a/lib/compiler/build_runner.zig
+++ b/lib/compiler/build_runner.zig
@@ -9,7 +9,7 @@ const ArrayList = std.ArrayList;
const File = std.fs.File;
const Step = std.Build.Step;
const Watch = std.Build.Watch;
-const Fuzz = std.Build.Fuzz;
+const WebServer = std.Build.WebServer;
const Allocator = std.mem.Allocator;
const fatal = std.process.fatal;
const Writer = std.io.Writer;
@@ -25,15 +25,16 @@ pub const std_options: std.Options = .{
};
pub fn main() !void {
- // Here we use an ArenaAllocator backed by a page allocator because a build is a short-lived,
- // one shot program. We don't need to waste time freeing memory and finding places to squish
- // bytes into. So we free everything all at once at the very end.
- var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+ // The build runner is often short-lived, but thanks to `--watch` and `--webui`, that's not
+ // always the case. So, we do need a true gpa for some things.
+ var debug_gpa_state: std.heap.DebugAllocator(.{}) = .init;
+ defer _ = debug_gpa_state.deinit();
+ const gpa = debug_gpa_state.allocator();
+
+ // ...but we'll back our arena by `std.heap.page_allocator` for efficiency.
+ var single_threaded_arena: std.heap.ArenaAllocator = .init(std.heap.page_allocator);
defer single_threaded_arena.deinit();
-
- var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
- .child_allocator = single_threaded_arena.allocator(),
- };
+ var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ .child_allocator = single_threaded_arena.allocator() };
const arena = thread_safe_arena.allocator();
const args = try process.argsAlloc(arena);
@@ -81,6 +82,7 @@ pub fn main() !void {
.query = .{},
.result = try std.zig.system.resolveTargetQuery(.{}),
},
+ .time_report = false,
};
graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
@@ -113,7 +115,7 @@ pub fn main() !void {
var watch = false;
var fuzz = false;
var debounce_interval_ms: u16 = 50;
- var listen_port: u16 = 0;
+ var webui_listen: ?std.net.Address = null;
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-Z")) {
@@ -220,13 +222,13 @@ pub fn main() !void {
next_arg, @errorName(err),
});
};
- } else if (mem.eql(u8, arg, "--port")) {
- const next_arg = nextArg(args, &arg_idx) orelse
- fatalWithHint("expected u16 after '{s}'", .{arg});
- listen_port = std.fmt.parseUnsigned(u16, next_arg, 10) catch |err| {
- fatal("unable to parse port '{s}' as unsigned 16-bit integer: {s}\n", .{
- next_arg, @errorName(err),
- });
+ } else if (mem.eql(u8, arg, "--webui")) {
+ webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
+ } else if (mem.startsWith(u8, arg, "--webui=")) {
+ const addr_str = arg["--webui=".len..];
+ if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{});
+ webui_listen = std.net.Address.parseIpAndPort(addr_str) catch |err| {
+ fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) });
};
} else if (mem.eql(u8, arg, "--debug-log")) {
const next_arg = nextArgOrFatal(args, &arg_idx);
@@ -267,8 +269,16 @@ pub fn main() !void {
prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "--watch")) {
watch = true;
+ } else if (mem.eql(u8, arg, "--time-report")) {
+ graph.time_report = true;
+ if (webui_listen == null) {
+ webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
+ }
} else if (mem.eql(u8, arg, "--fuzz")) {
fuzz = true;
+ if (webui_listen == null) {
+ webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
+ }
} else if (mem.eql(u8, arg, "-fincremental")) {
graph.incremental = true;
} else if (mem.eql(u8, arg, "-fno-incremental")) {
@@ -331,6 +341,10 @@ pub fn main() !void {
}
}
+ if (webui_listen != null and watch) fatal(
+ \\the build system does not yet support combining '--webui' and '--watch'; consider omitting '--watch' in favour of the web UI "Rebuild" button
+ , .{});
+
const stderr: std.fs.File = .stderr();
const ttyconf = get_tty_conf(color, stderr);
switch (ttyconf) {
@@ -394,14 +408,16 @@ pub fn main() !void {
}
var run: Run = .{
+ .gpa = gpa,
+
.max_rss = max_rss,
.max_rss_is_default = false,
.max_rss_mutex = .{},
.skip_oom_steps = skip_oom_steps,
.watch = watch,
- .fuzz = fuzz,
- .memory_blocked_steps = std.ArrayList(*Step).init(arena),
- .step_stack = .{},
+ .web_server = undefined, // set after `prepare`
+ .memory_blocked_steps = .empty,
+ .step_stack = .empty,
.prominent_compile_errors = prominent_compile_errors,
.claimed_rss = 0,
@@ -410,74 +426,81 @@ pub fn main() !void {
.stderr = stderr,
.thread_pool = undefined,
};
+ defer {
+ run.memory_blocked_steps.deinit(gpa);
+ run.step_stack.deinit(gpa);
+ }
if (run.max_rss == 0) {
run.max_rss = process.totalSystemMemory() catch std.math.maxInt(u64);
run.max_rss_is_default = true;
}
- const gpa = arena;
- prepare(gpa, arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
+ prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
error.UncleanExit => process.exit(1),
else => return err,
};
- var w: Watch = if (watch and Watch.have_impl) try Watch.init() else undefined;
+ var w: Watch = w: {
+ if (!watch) break :w undefined;
+ if (!Watch.have_impl) fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)});
+ break :w try .init();
+ };
try run.thread_pool.init(thread_pool_options);
defer run.thread_pool.deinit();
+ run.web_server = if (webui_listen) |listen_address| .init(.{
+ .gpa = gpa,
+ .thread_pool = &run.thread_pool,
+ .graph = &graph,
+ .all_steps = run.step_stack.keys(),
+ .ttyconf = run.ttyconf,
+ .root_prog_node = main_progress_node,
+ .watch = watch,
+ .listen_address = listen_address,
+ }) else null;
+
+ if (run.web_server) |*ws| {
+ ws.start() catch |err| fatal("failed to start web server: {s}", .{@errorName(err)});
+ }
+
rebuild: while (true) {
+ if (run.web_server) |*ws| ws.startBuild();
+
runStepNames(
- gpa,
builder,
targets.items,
main_progress_node,
&run,
) catch |err| switch (err) {
error.UncleanExit => {
- assert(!run.watch);
+ assert(!run.watch and run.web_server == null);
process.exit(1);
},
else => return err,
};
- if (fuzz) {
- if (builtin.single_threaded) {
- fatal("--fuzz not yet implemented for single-threaded builds", .{});
- }
- switch (builtin.os.tag) {
- // Current implementation depends on two things that need to be ported to Windows:
- // * Memory-mapping to share data between the fuzzer and build runner.
- // * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
- // many addresses to source locations).
- .windows => fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
- else => {},
- }
- if (@bitSizeOf(usize) != 64) {
- // Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
- // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
- // on 32-bit platforms.
- // Affects or affected by issues #5185, #22523, and #22464.
- fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
- }
- const listen_address = std.net.Address.parseIp("127.0.0.1", listen_port) catch unreachable;
- try Fuzz.start(
- gpa,
- arena,
- global_cache_directory,
- zig_lib_directory,
- zig_exe,
- &run.thread_pool,
- run.step_stack.keys(),
- run.ttyconf,
- listen_address,
- main_progress_node,
- );
+
+ if (run.web_server) |*web_server| {
+ web_server.finishBuild(.{ .fuzz = fuzz });
}
- if (!watch) return cleanExit();
+ if (!watch and run.web_server == null) {
+ return cleanExit();
+ }
- if (!Watch.have_impl) fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)});
+ if (run.web_server) |*ws| {
+ assert(!watch); // fatal error after CLI parsing
+ while (true) switch (ws.wait()) {
+ .rebuild => {
+ for (run.step_stack.keys()) |step| {
+ step.state = .precheck_done;
+ step.reset(gpa);
+ }
+ continue :rebuild;
+ },
+ };
+ }
try w.update(gpa, run.step_stack.keys());
@@ -491,15 +514,16 @@ pub fn main() !void {
w.dir_table.entries.len, countSubProcesses(run.step_stack.keys()),
}) catch &caption_buf;
var debouncing_node = main_progress_node.start(caption, 0);
- var debounce_timeout: Watch.Timeout = .none;
- while (true) switch (try w.wait(gpa, debounce_timeout)) {
+ var in_debounce = false;
+ while (true) switch (try w.wait(gpa, if (in_debounce) .{ .ms = debounce_interval_ms } else .none)) {
.timeout => {
+ assert(in_debounce);
debouncing_node.end();
markFailedStepsDirty(gpa, run.step_stack.keys());
continue :rebuild;
},
- .dirty => if (debounce_timeout == .none) {
- debounce_timeout = .{ .ms = debounce_interval_ms };
+ .dirty => if (!in_debounce) {
+ in_debounce = true;
debouncing_node.end();
debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0);
},
@@ -530,13 +554,16 @@ fn countSubProcesses(all_steps: []const *Step) usize {
}
const Run = struct {
+ gpa: Allocator,
max_rss: u64,
max_rss_is_default: bool,
max_rss_mutex: std.Thread.Mutex,
skip_oom_steps: bool,
watch: bool,
- fuzz: bool,
- memory_blocked_steps: std.ArrayList(*Step),
+ web_server: ?WebServer,
+ /// Allocated into `gpa`.
+ memory_blocked_steps: std.ArrayListUnmanaged(*Step),
+ /// Allocated into `gpa`.
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
prominent_compile_errors: bool,
thread_pool: std.Thread.Pool,
@@ -547,19 +574,19 @@ const Run = struct {
stderr: File,
fn cleanExit(run: Run) void {
- if (run.watch or run.fuzz) return;
+ if (run.watch or run.web_server != null) return;
return runner.cleanExit();
}
};
fn prepare(
- gpa: Allocator,
arena: Allocator,
b: *std.Build,
step_names: []const []const u8,
run: *Run,
seed: u32,
) !void {
+ const gpa = run.gpa;
const step_stack = &run.step_stack;
if (step_names.len == 0) {
@@ -583,7 +610,7 @@ fn prepare(
rand.shuffle(*Step, starting_steps);
for (starting_steps) |s| {
- constructGraphAndCheckForDependencyLoop(b, s, &run.step_stack, rand) catch |err| switch (err) {
+ constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand) catch |err| switch (err) {
error.DependencyLoopDetected => return uncleanExit(),
else => |e| return e,
};
@@ -614,12 +641,12 @@ fn prepare(
}
fn runStepNames(
- gpa: Allocator,
b: *std.Build,
step_names: []const []const u8,
parent_prog_node: std.Progress.Node,
run: *Run,
) !void {
+ const gpa = run.gpa;
const step_stack = &run.step_stack;
const thread_pool = &run.thread_pool;
@@ -675,6 +702,7 @@ fn runStepNames(
// B will be marked as dependency_failure, while A may never be queued, and thus
// remain in the initial state of precheck_done.
s.state = .dependency_failure;
+ if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
pending_count += 1;
},
.dependency_failure => pending_count += 1,
@@ -768,7 +796,7 @@ fn runStepNames(
}
}
- if (!run.watch) {
+ if (!run.watch and run.web_server == null) {
// Signal to parent process that we have printed compile errors. The
// parent process may choose to omit the "following command failed"
// line in this case.
@@ -777,7 +805,7 @@ fn runStepNames(
}
}
- if (!run.watch) return uncleanExit();
+ if (!run.watch and run.web_server == null) return uncleanExit();
}
const PrintNode = struct {
@@ -1022,6 +1050,7 @@ fn printTreeStep(
/// when it finishes executing in `workerMakeOneStep`, it spawns next steps
/// to run in random order
fn constructGraphAndCheckForDependencyLoop(
+ gpa: Allocator,
b: *std.Build,
s: *Step,
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
@@ -1035,17 +1064,19 @@ fn constructGraphAndCheckForDependencyLoop(
.precheck_unstarted => {
s.state = .precheck_started;
- try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len);
+ try step_stack.ensureUnusedCapacity(gpa, s.dependencies.items.len);
// We dupe to avoid shuffling the steps in the summary, it depends
// on s.dependencies' order.
- const deps = b.allocator.dupe(*Step, s.dependencies.items) catch @panic("OOM");
+ const deps = gpa.dupe(*Step, s.dependencies.items) catch @panic("OOM");
+ defer gpa.free(deps);
+
rand.shuffle(*Step, deps);
for (deps) |dep| {
- try step_stack.put(b.allocator, dep, {});
+ try step_stack.put(gpa, dep, {});
try dep.dependants.append(b.allocator, s);
- constructGraphAndCheckForDependencyLoop(b, dep, step_stack, rand) catch |err| {
+ constructGraphAndCheckForDependencyLoop(gpa, b, dep, step_stack, rand) catch |err| {
if (err == error.DependencyLoopDetected) {
std.debug.print(" {s}\n", .{s.name});
}
@@ -1084,6 +1115,7 @@ fn workerMakeOneStep(
.success, .skipped => continue,
.failure, .dependency_failure, .skipped_oom => {
@atomicStore(Step.State, &s.state, .dependency_failure, .seq_cst);
+ if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
return;
},
.precheck_done, .running => {
@@ -1109,7 +1141,7 @@ fn workerMakeOneStep(
if (new_claimed_rss > run.max_rss) {
// Running this step right now could possibly exceed the allotted RSS.
// Add this step to the queue of memory-blocked steps.
- run.memory_blocked_steps.append(s) catch @panic("OOM");
+ run.memory_blocked_steps.append(run.gpa, s) catch @panic("OOM");
return;
}
@@ -1126,10 +1158,14 @@ fn workerMakeOneStep(
const sub_prog_node = prog_node.start(s.name, 0);
defer sub_prog_node.end();
+ if (run.web_server) |*ws| ws.updateStepStatus(s, .wip);
+
const make_result = s.make(.{
.progress_node = sub_prog_node,
.thread_pool = thread_pool,
.watch = run.watch,
+ .web_server = if (run.web_server) |*ws| ws else null,
+ .gpa = run.gpa,
});
// No matter the result, we want to display error/warning messages.
@@ -1141,21 +1177,24 @@ fn workerMakeOneStep(
if (show_error_msgs or show_compile_errors or show_stderr) {
const bw = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
-
- const gpa = b.allocator;
- printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
+ printErrorMessages(run.gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
}
handle_result: {
if (make_result) |_| {
@atomicStore(Step.State, &s.state, .success, .seq_cst);
+ if (run.web_server) |*ws| ws.updateStepStatus(s, .success);
} else |err| switch (err) {
error.MakeFailed => {
@atomicStore(Step.State, &s.state, .failure, .seq_cst);
+ if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
std.Progress.setStatus(.failure_working);
break :handle_result;
},
- error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst),
+ error.MakeSkipped => {
+ @atomicStore(Step.State, &s.state, .skipped, .seq_cst);
+ if (run.web_server) |*ws| ws.updateStepStatus(s, .success);
+ },
}
// Successful completion of a step, so we queue up its dependants as well.
@@ -1255,10 +1294,10 @@ pub fn printErrorMessages(
}
fn printSteps(builder: *std.Build, w: *Writer) !void {
- const allocator = builder.allocator;
+ const arena = builder.graph.arena;
for (builder.top_level_steps.values()) |top_level_step| {
const name = if (&top_level_step.step == builder.default_step)
- try fmt.allocPrint(allocator, "{s} (default)", .{top_level_step.step.name})
+ try fmt.allocPrint(arena, "{s} (default)", .{top_level_step.step.name})
else
top_level_step.step.name;
try w.print(" {s:<28} {s}\n", .{ name, top_level_step.description });
@@ -1319,8 +1358,11 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
\\ needed (Default) Lazy dependencies are fetched as needed
\\ all Lazy dependencies are always fetched
\\ --watch Continuously rebuild when source files are modified
- \\ --fuzz Continuously search for unit test failures
\\ --debounce Delay before rebuilding after changed file detected
+ \\ --webui[=ip] Enable the web interface on the given IP address
+ \\ --fuzz Continuously search for unit test failures (implies '--webui')
+ \\ --time-report Force full rebuild and provide detailed information on
+ \\ compilation time of Zig source code (implies '--webui')
\\ -fincremental Enable incremental compilation
\\ -fno-incremental Disable incremental compilation
\\
@@ -1328,7 +1370,7 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
\\
);
- const arena = b.allocator;
+ const arena = b.graph.arena;
if (b.available_options_list.items.len == 0) {
try w.print(" (none)\n", .{});
} else {
diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig
index ce23f63421ac..7f1592f0de7c 100644
--- a/lib/fuzzer.zig
+++ b/lib/fuzzer.zig
@@ -3,7 +3,7 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.process.fatal;
-const SeenPcsHeader = std.Build.Fuzz.abi.SeenPcsHeader;
+const SeenPcsHeader = std.Build.abi.fuzz.SeenPcsHeader;
pub const std_options = std.Options{
.logFn = logOverride,
diff --git a/lib/fuzzer/web/index.html b/lib/fuzzer/web/index.html
deleted file mode 100644
index 325342e8ebf5..000000000000
--- a/lib/fuzzer/web/index.html
+++ /dev/null
@@ -1,161 +0,0 @@
-
-
-
-
- Zig Build System Interface
-
-
-
- Loading JavaScript...
-
-
- Total Runs:
- Unique Runs:
- Speed (Runs/Second):
- Coverage:
- Entry Points:
-
-
-
-
-
-
-
diff --git a/lib/fuzzer/web/main.js b/lib/fuzzer/web/main.js
deleted file mode 100644
index 94f09391bb67..000000000000
--- a/lib/fuzzer/web/main.js
+++ /dev/null
@@ -1,252 +0,0 @@
-(function() {
- const domStatus = document.getElementById("status");
- const domSectSource = document.getElementById("sectSource");
- const domSectStats = document.getElementById("sectStats");
- const domSourceText = document.getElementById("sourceText");
- const domStatTotalRuns = document.getElementById("statTotalRuns");
- const domStatUniqueRuns = document.getElementById("statUniqueRuns");
- const domStatSpeed = document.getElementById("statSpeed");
- const domStatCoverage = document.getElementById("statCoverage");
- const domEntryPointsList = document.getElementById("entryPointsList");
-
- let wasm_promise = fetch("main.wasm");
- let sources_promise = fetch("sources.tar").then(function(response) {
- if (!response.ok) throw new Error("unable to download sources");
- return response.arrayBuffer();
- });
- var wasm_exports = null;
- var curNavSearch = null;
- var curNavLocation = null;
-
- const text_decoder = new TextDecoder();
- const text_encoder = new TextEncoder();
-
- domStatus.textContent = "Loading WebAssembly...";
- WebAssembly.instantiateStreaming(wasm_promise, {
- js: {
- log: function(ptr, len) {
- const msg = decodeString(ptr, len);
- console.log(msg);
- },
- panic: function (ptr, len) {
- const msg = decodeString(ptr, len);
- throw new Error("panic: " + msg);
- },
- timestamp: function () {
- return BigInt(new Date());
- },
- emitSourceIndexChange: onSourceIndexChange,
- emitCoverageUpdate: onCoverageUpdate,
- emitEntryPointsUpdate: renderStats,
- },
- }).then(function(obj) {
- wasm_exports = obj.instance.exports;
- window.wasm = obj; // for debugging
- domStatus.textContent = "Loading sources tarball...";
-
- sources_promise.then(function(buffer) {
- domStatus.textContent = "Parsing sources...";
- const js_array = new Uint8Array(buffer);
- const ptr = wasm_exports.alloc(js_array.length);
- const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
- wasm_array.set(js_array);
- wasm_exports.unpack(ptr, js_array.length);
-
- window.addEventListener('popstate', onPopState, false);
- onHashChange(null);
-
- domStatus.textContent = "Waiting for server to send source location metadata...";
- connectWebSocket();
- });
- });
-
- function onPopState(ev) {
- onHashChange(ev.state);
- }
-
- function onHashChange(state) {
- history.replaceState({}, "");
- navigate(location.hash);
- if (state == null) window.scrollTo({top: 0});
- }
-
- function navigate(location_hash) {
- domSectSource.classList.add("hidden");
-
- curNavLocation = null;
- curNavSearch = null;
-
- if (location_hash.length > 1 && location_hash[0] === '#') {
- const query = location_hash.substring(1);
- const qpos = query.indexOf("?");
- let nonSearchPart;
- if (qpos === -1) {
- nonSearchPart = query;
- } else {
- nonSearchPart = query.substring(0, qpos);
- curNavSearch = decodeURIComponent(query.substring(qpos + 1));
- }
-
- if (nonSearchPart[0] == "l") {
- curNavLocation = +nonSearchPart.substring(1);
- renderSource(curNavLocation);
- }
- }
-
- render();
- }
-
- function connectWebSocket() {
- const host = document.location.host;
- const pathname = document.location.pathname;
- const isHttps = document.location.protocol === 'https:';
- const match = host.match(/^(.+):(\d+)$/);
- const defaultPort = isHttps ? 443 : 80;
- const port = match ? parseInt(match[2], 10) : defaultPort;
- const hostName = match ? match[1] : host;
- const wsProto = isHttps ? "wss:" : "ws:";
- const wsUrl = wsProto + '//' + hostName + ':' + port + pathname;
- ws = new WebSocket(wsUrl);
- ws.binaryType = "arraybuffer";
- ws.addEventListener('message', onWebSocketMessage, false);
- ws.addEventListener('error', timeoutThenCreateNew, false);
- ws.addEventListener('close', timeoutThenCreateNew, false);
- ws.addEventListener('open', onWebSocketOpen, false);
- }
-
- function onWebSocketOpen() {
- //console.log("web socket opened");
- }
-
- function onWebSocketMessage(ev) {
- wasmOnMessage(ev.data);
- }
-
- function timeoutThenCreateNew() {
- ws.removeEventListener('message', onWebSocketMessage, false);
- ws.removeEventListener('error', timeoutThenCreateNew, false);
- ws.removeEventListener('close', timeoutThenCreateNew, false);
- ws.removeEventListener('open', onWebSocketOpen, false);
- ws = null;
- setTimeout(connectWebSocket, 1000);
- }
-
- function wasmOnMessage(data) {
- const jsArray = new Uint8Array(data);
- const ptr = wasm_exports.message_begin(jsArray.length);
- const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, jsArray.length);
- wasmArray.set(jsArray);
- wasm_exports.message_end();
- }
-
- function onSourceIndexChange() {
- render();
- if (curNavLocation != null) renderSource(curNavLocation);
- }
-
- function onCoverageUpdate() {
- renderStats();
- renderCoverage();
- }
-
- function render() {
- domStatus.classList.add("hidden");
- }
-
- function renderStats() {
- const totalRuns = wasm_exports.totalRuns();
- const uniqueRuns = wasm_exports.uniqueRuns();
- const totalSourceLocations = wasm_exports.totalSourceLocations();
- const coveredSourceLocations = wasm_exports.coveredSourceLocations();
- domStatTotalRuns.innerText = totalRuns;
- domStatUniqueRuns.innerText = uniqueRuns + " (" + percent(uniqueRuns, totalRuns) + "%)";
- domStatCoverage.innerText = coveredSourceLocations + " / " + totalSourceLocations + " (" + percent(coveredSourceLocations, totalSourceLocations) + "%)";
- domStatSpeed.innerText = wasm_exports.totalRunsPerSecond().toFixed(0);
-
- const entryPoints = unwrapInt32Array(wasm_exports.entryPoints());
- resizeDomList(domEntryPointsList, entryPoints.length, " ");
- for (let i = 0; i < entryPoints.length; i += 1) {
- const liDom = domEntryPointsList.children[i];
- liDom.innerHTML = unwrapString(wasm_exports.sourceLocationLinkHtml(entryPoints[i]));
- }
-
-
- domSectStats.classList.remove("hidden");
- }
-
- function renderCoverage() {
- if (curNavLocation == null) return;
- const sourceLocationIndex = curNavLocation;
-
- for (let i = 0; i < domSourceText.children.length; i += 1) {
- const childDom = domSourceText.children[i];
- if (childDom.id != null && childDom.id[0] == "l") {
- childDom.classList.add("l");
- childDom.classList.remove("c");
- }
- }
- const coveredList = unwrapInt32Array(wasm_exports.sourceLocationFileCoveredList(sourceLocationIndex));
- for (let i = 0; i < coveredList.length; i += 1) {
- document.getElementById("l" + coveredList[i]).classList.add("c");
- }
- }
-
- function resizeDomList(listDom, desiredLen, templateHtml) {
- for (let i = listDom.childElementCount; i < desiredLen; i += 1) {
- listDom.insertAdjacentHTML('beforeend', templateHtml);
- }
- while (desiredLen < listDom.childElementCount) {
- listDom.removeChild(listDom.lastChild);
- }
- }
-
- function percent(a, b) {
- return ((Number(a) / Number(b)) * 100).toFixed(1);
- }
-
- function renderSource(sourceLocationIndex) {
- const pathName = unwrapString(wasm_exports.sourceLocationPath(sourceLocationIndex));
- if (pathName.length === 0) return;
-
- const h2 = domSectSource.children[0];
- h2.innerText = pathName;
- domSourceText.innerHTML = unwrapString(wasm_exports.sourceLocationFileHtml(sourceLocationIndex));
-
- domSectSource.classList.remove("hidden");
-
- // Empirically, Firefox needs this requestAnimationFrame in order for the scrollIntoView to work.
- requestAnimationFrame(function() {
- const slDom = document.getElementById("l" + sourceLocationIndex);
- if (slDom != null) slDom.scrollIntoView({
- behavior: "smooth",
- block: "center",
- });
- });
- }
-
- function decodeString(ptr, len) {
- if (len === 0) return "";
- return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
- }
-
- function unwrapInt32Array(bigint) {
- const ptr = Number(bigint & 0xffffffffn);
- const len = Number(bigint >> 32n);
- if (len === 0) return new Uint32Array();
- return new Uint32Array(wasm_exports.memory.buffer, ptr, len);
- }
-
- function setInputString(s) {
- const jsArray = text_encoder.encode(s);
- const len = jsArray.length;
- const ptr = wasm_exports.set_input_string(len);
- const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, len);
- wasmArray.set(jsArray);
- }
-
- function unwrapString(bigint) {
- const ptr = Number(bigint & 0xffffffffn);
- const len = Number(bigint >> 32n);
- return decodeString(ptr, len);
- }
-})();
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index d6b0e68f5d4a..db539e7b09ab 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -22,6 +22,8 @@ pub const Step = @import("Build/Step.zig");
pub const Module = @import("Build/Module.zig");
pub const Watch = @import("Build/Watch.zig");
pub const Fuzz = @import("Build/Fuzz.zig");
+pub const WebServer = @import("Build/WebServer.zig");
+pub const abi = @import("Build/abi.zig");
/// Shared state among all Build instances.
graph: *Graph,
@@ -125,6 +127,7 @@ pub const Graph = struct {
random_seed: u32 = 0,
dependency_cache: InitializedDepMap = .empty,
allow_so_scripts: ?bool = null,
+ time_report: bool,
};
const AvailableDeps = []const struct { []const u8, []const u8 };
diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig
index 28f8781dd196..a25b50175597 100644
--- a/lib/std/Build/Fuzz.zig
+++ b/lib/std/Build/Fuzz.zig
@@ -1,108 +1,134 @@
-const builtin = @import("builtin");
const std = @import("../std.zig");
const Build = std.Build;
+const Cache = Build.Cache;
const Step = std.Build.Step;
const assert = std.debug.assert;
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const log = std.log;
+const Coverage = std.debug.Coverage;
+const abi = Build.abi.fuzz;
const Fuzz = @This();
const build_runner = @import("root");
-pub const WebServer = @import("Fuzz/WebServer.zig");
-pub const abi = @import("Fuzz/abi.zig");
-
-pub fn start(
- gpa: Allocator,
- arena: Allocator,
- global_cache_directory: Build.Cache.Directory,
- zig_lib_directory: Build.Cache.Directory,
- zig_exe_path: []const u8,
- thread_pool: *std.Thread.Pool,
- all_steps: []const *Step,
- ttyconf: std.io.tty.Config,
- listen_address: std.net.Address,
- prog_node: std.Progress.Node,
-) Allocator.Error!void {
- const fuzz_run_steps = block: {
- const rebuild_node = prog_node.start("Rebuilding Unit Tests", 0);
+ws: *Build.WebServer,
+
+/// Allocated into `ws.gpa`.
+run_steps: []const *Step.Run,
+
+wait_group: std.Thread.WaitGroup,
+prog_node: std.Progress.Node,
+
+/// Protects `coverage_files`.
+coverage_mutex: std.Thread.Mutex,
+coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
+
+queue_mutex: std.Thread.Mutex,
+queue_cond: std.Thread.Condition,
+msg_queue: std.ArrayListUnmanaged(Msg),
+
+const Msg = union(enum) {
+ coverage: struct {
+ id: u64,
+ run: *Step.Run,
+ },
+ entry_point: struct {
+ coverage_id: u64,
+ addr: u64,
+ },
+};
+
+const CoverageMap = struct {
+ mapped_memory: []align(std.heap.page_size_min) const u8,
+ coverage: Coverage,
+ source_locations: []Coverage.SourceLocation,
+ /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
+ entry_points: std.ArrayListUnmanaged(u32),
+ start_timestamp: i64,
+
+ fn deinit(cm: *CoverageMap, gpa: Allocator) void {
+ std.posix.munmap(cm.mapped_memory);
+ cm.coverage.deinit(gpa);
+ cm.* = undefined;
+ }
+};
+
+pub fn init(ws: *Build.WebServer) Allocator.Error!Fuzz {
+ const gpa = ws.gpa;
+
+ const run_steps: []const *Step.Run = steps: {
+ var steps: std.ArrayListUnmanaged(*Step.Run) = .empty;
+ defer steps.deinit(gpa);
+ const rebuild_node = ws.root_prog_node.start("Rebuilding Unit Tests", 0);
defer rebuild_node.end();
- var wait_group: std.Thread.WaitGroup = .{};
- defer wait_group.wait();
- var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .empty;
- defer fuzz_run_steps.deinit(gpa);
- for (all_steps) |step| {
+ var rebuild_wg: std.Thread.WaitGroup = .{};
+ defer rebuild_wg.wait();
+
+ for (ws.all_steps) |step| {
const run = step.cast(Step.Run) orelse continue;
- if (run.fuzz_tests.items.len > 0 and run.producer != null) {
- thread_pool.spawnWg(&wait_group, rebuildTestsWorkerRun, .{ run, ttyconf, rebuild_node });
- try fuzz_run_steps.append(gpa, run);
- }
+ if (run.producer == null) continue;
+ if (run.fuzz_tests.items.len == 0) continue;
+ try steps.append(gpa, run);
+ ws.thread_pool.spawnWg(&rebuild_wg, rebuildTestsWorkerRun, .{ run, gpa, ws.ttyconf, rebuild_node });
}
- if (fuzz_run_steps.items.len == 0) fatal("no fuzz tests found", .{});
- rebuild_node.setEstimatedTotalItems(fuzz_run_steps.items.len);
- break :block try arena.dupe(*Step.Run, fuzz_run_steps.items);
+
+ if (steps.items.len == 0) fatal("no fuzz tests found", .{});
+ rebuild_node.setEstimatedTotalItems(steps.items.len);
+ break :steps try gpa.dupe(*Step.Run, steps.items);
};
+ errdefer gpa.free(run_steps);
- // Detect failure.
- for (fuzz_run_steps) |run| {
+ for (run_steps) |run| {
assert(run.fuzz_tests.items.len > 0);
if (run.rebuilt_executable == null)
fatal("one or more unit tests failed to be rebuilt in fuzz mode", .{});
}
- var web_server: WebServer = .{
- .gpa = gpa,
- .global_cache_directory = global_cache_directory,
- .zig_lib_directory = zig_lib_directory,
- .zig_exe_path = zig_exe_path,
- .listen_address = listen_address,
- .fuzz_run_steps = fuzz_run_steps,
-
- .msg_queue = .{},
- .mutex = .{},
- .condition = .{},
-
- .coverage_files = .{},
+ return .{
+ .ws = ws,
+ .run_steps = run_steps,
+ .wait_group = .{},
+ .prog_node = .none,
+ .coverage_files = .empty,
.coverage_mutex = .{},
- .coverage_condition = .{},
-
- .base_timestamp = std.time.nanoTimestamp(),
+ .queue_mutex = .{},
+ .queue_cond = .{},
+ .msg_queue = .empty,
};
+}
- // For accepting HTTP connections.
- const web_server_thread = std.Thread.spawn(.{}, WebServer.run, .{&web_server}) catch |err| {
- fatal("unable to spawn web server thread: {s}", .{@errorName(err)});
- };
- defer web_server_thread.join();
+pub fn start(fuzz: *Fuzz) void {
+ const ws = fuzz.ws;
+ fuzz.prog_node = ws.root_prog_node.start("Fuzzing", fuzz.run_steps.len);
// For polling messages and sending updates to subscribers.
- const coverage_thread = std.Thread.spawn(.{}, WebServer.coverageRun, .{&web_server}) catch |err| {
+ fuzz.wait_group.start();
+ _ = std.Thread.spawn(.{}, coverageRun, .{fuzz}) catch |err| {
+ fuzz.wait_group.finish();
fatal("unable to spawn coverage thread: {s}", .{@errorName(err)});
};
- defer coverage_thread.join();
-
- {
- const fuzz_node = prog_node.start("Fuzzing", fuzz_run_steps.len);
- defer fuzz_node.end();
- var wait_group: std.Thread.WaitGroup = .{};
- defer wait_group.wait();
- for (fuzz_run_steps) |run| {
- for (run.fuzz_tests.items) |unit_test_index| {
- assert(run.rebuilt_executable != null);
- thread_pool.spawnWg(&wait_group, fuzzWorkerRun, .{
- run, &web_server, unit_test_index, ttyconf, fuzz_node,
- });
- }
+ for (fuzz.run_steps) |run| {
+ for (run.fuzz_tests.items) |unit_test_index| {
+ assert(run.rebuilt_executable != null);
+ ws.thread_pool.spawnWg(&fuzz.wait_group, fuzzWorkerRun, .{
+ fuzz, run, unit_test_index,
+ });
}
}
+}
+pub fn deinit(fuzz: *Fuzz) void {
+ if (true) @panic("TODO: terminate the fuzzer processes");
+ fuzz.wait_group.wait();
+ fuzz.prog_node.end();
- log.err("all fuzz workers crashed", .{});
+ const gpa = fuzz.ws.gpa;
+ gpa.free(fuzz.run_steps);
}
-fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
- rebuildTestsWorkerRunFallible(run, ttyconf, parent_prog_node) catch |err| {
+fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
+ rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| {
const compile = run.producer.?;
log.err("step '{s}': failed to rebuild in fuzz mode: {s}", .{
compile.step.name, @errorName(err),
@@ -110,14 +136,12 @@ fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog
};
}
-fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void {
- const gpa = run.step.owner.allocator;
-
+fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void {
const compile = run.producer.?;
const prog_node = parent_prog_node.start(compile.step.name, 0);
defer prog_node.end();
- const result = compile.rebuildInFuzzMode(prog_node);
+ const result = compile.rebuildInFuzzMode(gpa, prog_node);
const show_compile_errors = compile.step.result_error_bundle.errorMessageCount() > 0;
const show_error_msgs = compile.step.result_error_msgs.items.len > 0;
@@ -138,24 +162,22 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par
}
fn fuzzWorkerRun(
+ fuzz: *Fuzz,
run: *Step.Run,
- web_server: *WebServer,
unit_test_index: u32,
- ttyconf: std.io.tty.Config,
- parent_prog_node: std.Progress.Node,
) void {
const gpa = run.step.owner.allocator;
const test_name = run.cached_test_metadata.?.testName(unit_test_index);
- const prog_node = parent_prog_node.start(test_name, 0);
+ const prog_node = fuzz.prog_node.start(test_name, 0);
defer prog_node.end();
- run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) {
+ run.rerunInFuzzMode(fuzz, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
- build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, w, false) catch {};
+ build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ws.ttyconf }, w, false) catch {};
return;
},
else => {
@@ -166,3 +188,270 @@ fn fuzzWorkerRun(
},
};
}
+
+pub fn serveSourcesTar(fuzz: *Fuzz, req: *std.http.Server.Request) !void {
+ const gpa = fuzz.ws.gpa;
+
+ var arena_state: std.heap.ArenaAllocator = .init(gpa);
+ defer arena_state.deinit();
+ const arena = arena_state.allocator();
+
+ const DedupTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false);
+ var dedup_table: DedupTable = .empty;
+ defer dedup_table.deinit(gpa);
+
+ for (fuzz.run_steps) |run_step| {
+ const compile_inputs = run_step.producer.?.step.inputs.table;
+ for (compile_inputs.keys(), compile_inputs.values()) |dir_path, *file_list| {
+ try dedup_table.ensureUnusedCapacity(gpa, file_list.items.len);
+ for (file_list.items) |sub_path| {
+ if (!std.mem.endsWith(u8, sub_path, ".zig")) continue;
+ const joined_path = try dir_path.join(arena, sub_path);
+ dedup_table.putAssumeCapacity(joined_path, {});
+ }
+ }
+ }
+
+ const deduped_paths = dedup_table.keys();
+ const SortContext = struct {
+ pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool {
+ _ = this;
+ return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) {
+ .lt => true,
+ .gt => false,
+ .eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path),
+ };
+ }
+ };
+ std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan);
+ return fuzz.ws.serveTarFile(req, deduped_paths);
+}
+
+pub const Previous = struct {
+ unique_runs: usize,
+ entry_points: usize,
+ pub const init: Previous = .{ .unique_runs = 0, .entry_points = 0 };
+};
+pub fn sendUpdate(
+ fuzz: *Fuzz,
+ socket: *std.http.WebSocket,
+ prev: *Previous,
+) !void {
+ fuzz.coverage_mutex.lock();
+ defer fuzz.coverage_mutex.unlock();
+
+ const coverage_maps = fuzz.coverage_files.values();
+ if (coverage_maps.len == 0) return;
+ // TODO: handle multiple fuzz steps in the WebSocket packets
+ const coverage_map = &coverage_maps[0];
+ const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
+ // TODO: this isn't sound! We need to do volatile reads of these bits rather than handing the
+ // buffer off to the kernel, because we might race with the fuzzer process[es]. This brings the
+ // whole mmap strategy into question. Incidentally, I wonder if post-writergate we could pass
+ // this data straight to the socket with sendfile...
+ const seen_pcs = cov_header.seenBits();
+ const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic);
+ const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic);
+ if (prev.unique_runs != unique_runs) {
+ // There has been an update.
+ if (prev.unique_runs == 0) {
+ // We need to send initial context.
+ const header: abi.SourceIndexHeader = .{
+ .directories_len = @intCast(coverage_map.coverage.directories.entries.len),
+ .files_len = @intCast(coverage_map.coverage.files.entries.len),
+ .source_locations_len = @intCast(coverage_map.source_locations.len),
+ .string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
+ .start_timestamp = coverage_map.start_timestamp,
+ };
+ const iovecs: [5]std.posix.iovec_const = .{
+ makeIov(@ptrCast(&header)),
+ makeIov(@ptrCast(coverage_map.coverage.directories.keys())),
+ makeIov(@ptrCast(coverage_map.coverage.files.keys())),
+ makeIov(@ptrCast(coverage_map.source_locations)),
+ makeIov(coverage_map.coverage.string_bytes.items),
+ };
+ try socket.writeMessagev(&iovecs, .binary);
+ }
+
+ const header: abi.CoverageUpdateHeader = .{
+ .n_runs = n_runs,
+ .unique_runs = unique_runs,
+ };
+ const iovecs: [2]std.posix.iovec_const = .{
+ makeIov(@ptrCast(&header)),
+ makeIov(@ptrCast(seen_pcs)),
+ };
+ try socket.writeMessagev(&iovecs, .binary);
+
+ prev.unique_runs = unique_runs;
+ }
+
+ if (prev.entry_points != coverage_map.entry_points.items.len) {
+ const header: abi.EntryPointHeader = .init(@intCast(coverage_map.entry_points.items.len));
+ const iovecs: [2]std.posix.iovec_const = .{
+ makeIov(@ptrCast(&header)),
+ makeIov(@ptrCast(coverage_map.entry_points.items)),
+ };
+ try socket.writeMessagev(&iovecs, .binary);
+
+ prev.entry_points = coverage_map.entry_points.items.len;
+ }
+}
+
+fn coverageRun(fuzz: *Fuzz) void {
+ defer fuzz.wait_group.finish();
+
+ fuzz.queue_mutex.lock();
+ defer fuzz.queue_mutex.unlock();
+
+ while (true) {
+ fuzz.queue_cond.wait(&fuzz.queue_mutex);
+ for (fuzz.msg_queue.items) |msg| switch (msg) {
+ .coverage => |coverage| prepareTables(fuzz, coverage.run, coverage.id) catch |err| switch (err) {
+ error.AlreadyReported => continue,
+ else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
+ },
+ .entry_point => |entry_point| addEntryPoint(fuzz, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) {
+ error.AlreadyReported => continue,
+ else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
+ },
+ };
+ fuzz.msg_queue.clearRetainingCapacity();
+ }
+}
+fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutOfMemory, AlreadyReported }!void {
+ const ws = fuzz.ws;
+ const gpa = ws.gpa;
+
+ fuzz.coverage_mutex.lock();
+ defer fuzz.coverage_mutex.unlock();
+
+ const gop = try fuzz.coverage_files.getOrPut(gpa, coverage_id);
+ if (gop.found_existing) {
+ // We are fuzzing the same executable with multiple threads.
+ // Perhaps the same unit test; perhaps a different one. In any
+ // case, since the coverage file is the same, we only have to
+ // notice changes to that one file in order to learn coverage for
+ // this particular executable.
+ return;
+ }
+ errdefer _ = fuzz.coverage_files.pop();
+
+ gop.value_ptr.* = .{
+ .coverage = std.debug.Coverage.init,
+ .mapped_memory = undefined, // populated below
+ .source_locations = undefined, // populated below
+ .entry_points = .{},
+ .start_timestamp = ws.now(),
+ };
+ errdefer gop.value_ptr.coverage.deinit(gpa);
+
+ const rebuilt_exe_path = run_step.rebuilt_executable.?;
+ var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
+ log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
+ run_step.step.name, rebuilt_exe_path, @errorName(err),
+ });
+ return error.AlreadyReported;
+ };
+ defer debug_info.deinit(gpa);
+
+ const coverage_file_path: Build.Cache.Path = .{
+ .root_dir = run_step.step.owner.cache_root,
+ .sub_path = "v/" ++ std.fmt.hex(coverage_id),
+ };
+ var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
+ log.err("step '{s}': failed to load coverage file '{f}': {s}", .{
+ run_step.step.name, coverage_file_path, @errorName(err),
+ });
+ return error.AlreadyReported;
+ };
+ defer coverage_file.close();
+
+ const file_size = coverage_file.getEndPos() catch |err| {
+ log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
+ return error.AlreadyReported;
+ };
+
+ const mapped_memory = std.posix.mmap(
+ null,
+ file_size,
+ std.posix.PROT.READ,
+ .{ .TYPE = .SHARED },
+ coverage_file.handle,
+ 0,
+ ) catch |err| {
+ log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
+ return error.AlreadyReported;
+ };
+ gop.value_ptr.mapped_memory = mapped_memory;
+
+ const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
+ const pcs = header.pcAddrs();
+ const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
+ errdefer gpa.free(source_locations);
+
+ // Unfortunately the PCs array that LLVM gives us from the 8-bit PC
+ // counters feature is not sorted.
+ var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{};
+ defer sorted_pcs.deinit(gpa);
+ try sorted_pcs.resize(gpa, pcs.len);
+ @memcpy(sorted_pcs.items(.pc), pcs);
+ for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i);
+ sorted_pcs.sortUnstable(struct {
+ addrs: []const u64,
+
+ pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
+ return ctx.addrs[a_index] < ctx.addrs[b_index];
+ }
+ }{ .addrs = sorted_pcs.items(.pc) });
+
+ debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
+ log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
+ return error.AlreadyReported;
+ };
+
+ for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl;
+ gop.value_ptr.source_locations = source_locations;
+
+ ws.notifyUpdate();
+}
+fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void {
+ fuzz.coverage_mutex.lock();
+ defer fuzz.coverage_mutex.unlock();
+
+ const coverage_map = fuzz.coverage_files.getPtr(coverage_id).?;
+ const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
+ const pcs = header.pcAddrs();
+
+ // Since this pcs list is unsorted, we must linear scan for the best index.
+ const index = i: {
+ var best: usize = 0;
+ for (pcs[1..], 1..) |elem_addr, i| {
+ if (elem_addr == addr) break :i i;
+ if (elem_addr > addr) continue;
+ if (elem_addr > pcs[best]) best = i;
+ }
+ break :i best;
+ };
+ if (index >= pcs.len) {
+ log.err("unable to find unit test entry address 0x{x} in source locations (range: 0x{x} to 0x{x})", .{
+ addr, pcs[0], pcs[pcs.len - 1],
+ });
+ return error.AlreadyReported;
+ }
+ if (false) {
+ const sl = coverage_map.source_locations[index];
+ const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
+ log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
+ addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
+ });
+ }
+ try coverage_map.entry_points.append(fuzz.ws.gpa, @intCast(index));
+}
+
+fn makeIov(s: []const u8) std.posix.iovec_const {
+ return .{
+ .base = s.ptr,
+ .len = s.len,
+ };
+}
diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig
deleted file mode 100644
index 18582a60ef6a..000000000000
--- a/lib/std/Build/Fuzz/WebServer.zig
+++ /dev/null
@@ -1,709 +0,0 @@
-const builtin = @import("builtin");
-
-const std = @import("../../std.zig");
-const Allocator = std.mem.Allocator;
-const Build = std.Build;
-const Step = std.Build.Step;
-const Coverage = std.debug.Coverage;
-const abi = std.Build.Fuzz.abi;
-const log = std.log;
-const assert = std.debug.assert;
-const Cache = std.Build.Cache;
-const Path = Cache.Path;
-
-const WebServer = @This();
-
-gpa: Allocator,
-global_cache_directory: Build.Cache.Directory,
-zig_lib_directory: Build.Cache.Directory,
-zig_exe_path: []const u8,
-listen_address: std.net.Address,
-fuzz_run_steps: []const *Step.Run,
-
-/// Messages from fuzz workers. Protected by mutex.
-msg_queue: std.ArrayListUnmanaged(Msg),
-/// Protects `msg_queue` only.
-mutex: std.Thread.Mutex,
-/// Signaled when there is a message in `msg_queue`.
-condition: std.Thread.Condition,
-
-coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
-/// Protects `coverage_files` only.
-coverage_mutex: std.Thread.Mutex,
-/// Signaled when `coverage_files` changes.
-coverage_condition: std.Thread.Condition,
-
-/// Time at initialization of WebServer.
-base_timestamp: i128,
-
-const fuzzer_bin_name = "fuzzer";
-const fuzzer_arch_os_abi = "wasm32-freestanding";
-const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
-
-const CoverageMap = struct {
- mapped_memory: []align(std.heap.page_size_min) const u8,
- coverage: Coverage,
- source_locations: []Coverage.SourceLocation,
- /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
- entry_points: std.ArrayListUnmanaged(u32),
- start_timestamp: i64,
-
- fn deinit(cm: *CoverageMap, gpa: Allocator) void {
- std.posix.munmap(cm.mapped_memory);
- cm.coverage.deinit(gpa);
- cm.* = undefined;
- }
-};
-
-const Msg = union(enum) {
- coverage: struct {
- id: u64,
- run: *Step.Run,
- },
- entry_point: struct {
- coverage_id: u64,
- addr: u64,
- },
-};
-
-pub fn run(ws: *WebServer) void {
- var http_server = ws.listen_address.listen(.{
- .reuse_address = true,
- }) catch |err| {
- log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.in.getPort(), @errorName(err) });
- return;
- };
- const port = http_server.listen_address.in.getPort();
- log.info("web interface listening at http://127.0.0.1:{d}/", .{port});
- if (ws.listen_address.in.getPort() == 0)
- log.info("hint: pass --port {d} to use this same port next time", .{port});
-
- while (true) {
- const connection = http_server.accept() catch |err| {
- log.err("failed to accept connection: {s}", .{@errorName(err)});
- return;
- };
- _ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
- log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
- connection.stream.close();
- continue;
- };
- }
-}
-
-fn now(s: *const WebServer) i64 {
- return @intCast(std.time.nanoTimestamp() - s.base_timestamp);
-}
-
-fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
- defer connection.stream.close();
-
- var read_buffer: [0x4000]u8 = undefined;
- var server = std.http.Server.init(connection, &read_buffer);
- var web_socket: std.http.WebSocket = undefined;
- var send_buffer: [0x4000]u8 = undefined;
- var ws_recv_buffer: [0x4000]u8 align(4) = undefined;
- while (server.state == .ready) {
- var request = server.receiveHead() catch |err| switch (err) {
- error.HttpConnectionClosing => return,
- else => {
- log.err("closing http connection: {s}", .{@errorName(err)});
- return;
- },
- };
- if (web_socket.init(&request, &send_buffer, &ws_recv_buffer) catch |err| {
- log.err("initializing web socket: {s}", .{@errorName(err)});
- return;
- }) {
- serveWebSocket(ws, &web_socket) catch |err| {
- log.err("unable to serve web socket connection: {s}", .{@errorName(err)});
- return;
- };
- } else {
- serveRequest(ws, &request) catch |err| switch (err) {
- error.AlreadyReported => return,
- else => |e| {
- log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) });
- return;
- },
- };
- }
- }
-}
-
-fn serveRequest(ws: *WebServer, request: *std.http.Server.Request) !void {
- if (std.mem.eql(u8, request.head.target, "/") or
- std.mem.eql(u8, request.head.target, "/debug") or
- std.mem.eql(u8, request.head.target, "/debug/"))
- {
- try serveFile(ws, request, "fuzzer/web/index.html", "text/html");
- } else if (std.mem.eql(u8, request.head.target, "/main.js") or
- std.mem.eql(u8, request.head.target, "/debug/main.js"))
- {
- try serveFile(ws, request, "fuzzer/web/main.js", "application/javascript");
- } else if (std.mem.eql(u8, request.head.target, "/main.wasm")) {
- try serveWasm(ws, request, .ReleaseFast);
- } else if (std.mem.eql(u8, request.head.target, "/debug/main.wasm")) {
- try serveWasm(ws, request, .Debug);
- } else if (std.mem.eql(u8, request.head.target, "/sources.tar") or
- std.mem.eql(u8, request.head.target, "/debug/sources.tar"))
- {
- try serveSourcesTar(ws, request);
- } else {
- try request.respond("not found", .{
- .status = .not_found,
- .extra_headers = &.{
- .{ .name = "content-type", .value = "text/plain" },
- },
- });
- }
-}
-
-fn serveFile(
- ws: *WebServer,
- request: *std.http.Server.Request,
- name: []const u8,
- content_type: []const u8,
-) !void {
- const gpa = ws.gpa;
- // The desired API is actually sendfile, which will require enhancing std.http.Server.
- // We load the file with every request so that the user can make changes to the file
- // and refresh the HTML page without restarting this server.
- const file_contents = ws.zig_lib_directory.handle.readFileAlloc(gpa, name, 10 * 1024 * 1024) catch |err| {
- log.err("failed to read '{f}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) });
- return error.AlreadyReported;
- };
- defer gpa.free(file_contents);
- try request.respond(file_contents, .{
- .extra_headers = &.{
- .{ .name = "content-type", .value = content_type },
- cache_control_header,
- },
- });
-}
-
-fn serveWasm(
- ws: *WebServer,
- request: *std.http.Server.Request,
- optimize_mode: std.builtin.OptimizeMode,
-) !void {
- const gpa = ws.gpa;
-
- var arena_instance = std.heap.ArenaAllocator.init(gpa);
- defer arena_instance.deinit();
- const arena = arena_instance.allocator();
-
- // Do the compilation every request, so that the user can edit the files
- // and see the changes without restarting the server.
- const wasm_base_path = try buildWasmBinary(ws, arena, optimize_mode);
- const bin_name = try std.zig.binNameAlloc(arena, .{
- .root_name = fuzzer_bin_name,
- .target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
- .arch_os_abi = fuzzer_arch_os_abi,
- .cpu_features = fuzzer_cpu_features,
- }) catch unreachable) catch unreachable),
- .output_mode = .Exe,
- });
- // std.http.Server does not have a sendfile API yet.
- const bin_path = try wasm_base_path.join(arena, bin_name);
- const file_contents = try bin_path.root_dir.handle.readFileAlloc(gpa, bin_path.sub_path, 10 * 1024 * 1024);
- defer gpa.free(file_contents);
- try request.respond(file_contents, .{
- .extra_headers = &.{
- .{ .name = "content-type", .value = "application/wasm" },
- cache_control_header,
- },
- });
-}
-
-fn buildWasmBinary(
- ws: *WebServer,
- arena: Allocator,
- optimize_mode: std.builtin.OptimizeMode,
-) !Path {
- const gpa = ws.gpa;
-
- const main_src_path: Build.Cache.Path = .{
- .root_dir = ws.zig_lib_directory,
- .sub_path = "fuzzer/web/main.zig",
- };
- const walk_src_path: Build.Cache.Path = .{
- .root_dir = ws.zig_lib_directory,
- .sub_path = "docs/wasm/Walk.zig",
- };
- const html_render_src_path: Build.Cache.Path = .{
- .root_dir = ws.zig_lib_directory,
- .sub_path = "docs/wasm/html_render.zig",
- };
-
- var argv: std.ArrayListUnmanaged([]const u8) = .empty;
-
- try argv.appendSlice(arena, &.{
- ws.zig_exe_path, "build-exe", //
- "-fno-entry", //
- "-O", @tagName(optimize_mode), //
- "-target", fuzzer_arch_os_abi, //
- "-mcpu", fuzzer_cpu_features, //
- "--cache-dir", ws.global_cache_directory.path orelse ".", //
- "--global-cache-dir", ws.global_cache_directory.path orelse ".", //
- "--name", fuzzer_bin_name, //
- "-rdynamic", //
- "-fsingle-threaded", //
- "--dep", "Walk", //
- "--dep", "html_render", //
- try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), //
- try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), //
- "--dep", "Walk", //
- try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), //
- "--listen=-",
- });
-
- var child = std.process.Child.init(argv.items, gpa);
- child.stdin_behavior = .Pipe;
- child.stdout_behavior = .Pipe;
- child.stderr_behavior = .Pipe;
- try child.spawn();
-
- var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
- .stdout = child.stdout.?,
- .stderr = child.stderr.?,
- });
- defer poller.deinit();
-
- try sendMessage(child.stdin.?, .update);
- try sendMessage(child.stdin.?, .exit);
-
- var result: ?Path = null;
- var result_error_bundle = std.zig.ErrorBundle.empty;
-
- const stdout = poller.reader(.stdout);
-
- poll: while (true) {
- const Header = std.zig.Server.Message.Header;
- while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
- const header = stdout.takeStruct(Header, .little) catch unreachable;
- while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
- const body = stdout.take(header.bytes_len) catch unreachable;
-
- switch (header.tag) {
- .zig_version => {
- if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
- return error.ZigProtocolVersionMismatch;
- }
- },
- .error_bundle => {
- const EbHdr = std.zig.Server.Message.ErrorBundle;
- const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
- const extra_bytes =
- body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
- const string_bytes =
- body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
- // TODO: use @ptrCast when the compiler supports it
- const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
- const extra_array = try arena.alloc(u32, unaligned_extra.len);
- @memcpy(extra_array, unaligned_extra);
- result_error_bundle = .{
- .string_bytes = try arena.dupe(u8, string_bytes),
- .extra = extra_array,
- };
- },
- .emit_digest => {
- const EmitDigest = std.zig.Server.Message.EmitDigest;
- const ebp_hdr = @as(*align(1) const EmitDigest, @ptrCast(body));
- if (!ebp_hdr.flags.cache_hit) {
- log.info("source changes detected; rebuilt wasm component", .{});
- }
- const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len];
- result = .{
- .root_dir = ws.global_cache_directory,
- .sub_path = try arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*)),
- };
- },
- else => {}, // ignore other messages
- }
- }
-
- const stderr_contents = try poller.toOwnedSlice(.stderr);
- if (stderr_contents.len > 0) {
- std.debug.print("{s}", .{stderr_contents});
- }
-
- // Send EOF to stdin.
- child.stdin.?.close();
- child.stdin = null;
-
- switch (try child.wait()) {
- .Exited => |code| {
- if (code != 0) {
- log.err(
- "the following command exited with error code {d}:\n{s}",
- .{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) },
- );
- return error.WasmCompilationFailed;
- }
- },
- .Signal, .Stopped, .Unknown => {
- log.err(
- "the following command terminated unexpectedly:\n{s}",
- .{try Build.Step.allocPrintCmd(arena, null, argv.items)},
- );
- return error.WasmCompilationFailed;
- },
- }
-
- if (result_error_bundle.errorMessageCount() > 0) {
- const color = std.zig.Color.auto;
- result_error_bundle.renderToStdErr(color.renderOptions());
- log.err("the following command failed with {d} compilation errors:\n{s}", .{
- result_error_bundle.errorMessageCount(),
- try Build.Step.allocPrintCmd(arena, null, argv.items),
- });
- return error.WasmCompilationFailed;
- }
-
- return result orelse {
- log.err("child process failed to report result\n{s}", .{
- try Build.Step.allocPrintCmd(arena, null, argv.items),
- });
- return error.WasmCompilationFailed;
- };
-}
-
-fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
- const header: std.zig.Client.Message.Header = .{
- .tag = tag,
- .bytes_len = 0,
- };
- try file.writeAll(std.mem.asBytes(&header));
-}
-
-fn serveWebSocket(ws: *WebServer, web_socket: *std.http.WebSocket) !void {
- ws.coverage_mutex.lock();
- defer ws.coverage_mutex.unlock();
-
- // On first connection, the client needs to know what time the server
- // thinks it is to rebase timestamps.
- {
- const timestamp_message: abi.CurrentTime = .{ .base = ws.now() };
- try web_socket.writeMessage(std.mem.asBytes(×tamp_message), .binary);
- }
-
- // On first connection, the client needs all the coverage information
- // so that subsequent updates can contain only the updated bits.
- var prev_unique_runs: usize = 0;
- var prev_entry_points: usize = 0;
- try sendCoverageContext(ws, web_socket, &prev_unique_runs, &prev_entry_points);
- while (true) {
- ws.coverage_condition.timedWait(&ws.coverage_mutex, std.time.ns_per_ms * 500) catch {};
- try sendCoverageContext(ws, web_socket, &prev_unique_runs, &prev_entry_points);
- }
-}
-
-fn sendCoverageContext(
- ws: *WebServer,
- web_socket: *std.http.WebSocket,
- prev_unique_runs: *usize,
- prev_entry_points: *usize,
-) !void {
- const coverage_maps = ws.coverage_files.values();
- if (coverage_maps.len == 0) return;
- // TODO: make each events URL correspond to one coverage map
- const coverage_map = &coverage_maps[0];
- const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
- const seen_pcs = cov_header.seenBits();
- const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic);
- const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic);
- if (prev_unique_runs.* != unique_runs) {
- // There has been an update.
- if (prev_unique_runs.* == 0) {
- // We need to send initial context.
- const header: abi.SourceIndexHeader = .{
- .flags = .{},
- .directories_len = @intCast(coverage_map.coverage.directories.entries.len),
- .files_len = @intCast(coverage_map.coverage.files.entries.len),
- .source_locations_len = @intCast(coverage_map.source_locations.len),
- .string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
- .start_timestamp = coverage_map.start_timestamp,
- };
- const iovecs: [5]std.posix.iovec_const = .{
- makeIov(std.mem.asBytes(&header)),
- makeIov(std.mem.sliceAsBytes(coverage_map.coverage.directories.keys())),
- makeIov(std.mem.sliceAsBytes(coverage_map.coverage.files.keys())),
- makeIov(std.mem.sliceAsBytes(coverage_map.source_locations)),
- makeIov(coverage_map.coverage.string_bytes.items),
- };
- try web_socket.writeMessagev(&iovecs, .binary);
- }
-
- const header: abi.CoverageUpdateHeader = .{
- .n_runs = n_runs,
- .unique_runs = unique_runs,
- };
- const iovecs: [2]std.posix.iovec_const = .{
- makeIov(std.mem.asBytes(&header)),
- makeIov(std.mem.sliceAsBytes(seen_pcs)),
- };
- try web_socket.writeMessagev(&iovecs, .binary);
-
- prev_unique_runs.* = unique_runs;
- }
-
- if (prev_entry_points.* != coverage_map.entry_points.items.len) {
- const header: abi.EntryPointHeader = .{
- .flags = .{
- .locs_len = @intCast(coverage_map.entry_points.items.len),
- },
- };
- const iovecs: [2]std.posix.iovec_const = .{
- makeIov(std.mem.asBytes(&header)),
- makeIov(std.mem.sliceAsBytes(coverage_map.entry_points.items)),
- };
- try web_socket.writeMessagev(&iovecs, .binary);
-
- prev_entry_points.* = coverage_map.entry_points.items.len;
- }
-}
-
-fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
- const gpa = ws.gpa;
-
- var arena_instance = std.heap.ArenaAllocator.init(gpa);
- defer arena_instance.deinit();
- const arena = arena_instance.allocator();
-
- var send_buffer: [0x4000]u8 = undefined;
- var response = request.respondStreaming(.{
- .send_buffer = &send_buffer,
- .respond_options = .{
- .extra_headers = &.{
- .{ .name = "content-type", .value = "application/x-tar" },
- cache_control_header,
- },
- },
- });
-
- const DedupeTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false);
- var dedupe_table: DedupeTable = .{};
- defer dedupe_table.deinit(gpa);
-
- for (ws.fuzz_run_steps) |run_step| {
- const compile_step_inputs = run_step.producer.?.step.inputs.table;
- for (compile_step_inputs.keys(), compile_step_inputs.values()) |dir_path, *file_list| {
- try dedupe_table.ensureUnusedCapacity(gpa, file_list.items.len);
- for (file_list.items) |sub_path| {
- // Special file "." means the entire directory.
- if (std.mem.eql(u8, sub_path, ".")) continue;
- const joined_path = try dir_path.join(arena, sub_path);
- _ = dedupe_table.getOrPutAssumeCapacity(joined_path);
- }
- }
- }
-
- const deduped_paths = dedupe_table.keys();
- const SortContext = struct {
- pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool {
- _ = this;
- return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) {
- .lt => true,
- .gt => false,
- .eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path),
- };
- }
- };
- std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan);
-
- var cwd_cache: ?[]const u8 = null;
-
- var adapter = response.writer().adaptToNewApi();
- var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface };
- var read_buffer: [1024]u8 = undefined;
-
- for (deduped_paths) |joined_path| {
- var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| {
- log.err("failed to open {f}: {s}", .{ joined_path, @errorName(err) });
- continue;
- };
- defer file.close();
- const stat = try file.stat();
- var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
- archiver.prefix = joined_path.root_dir.path orelse try memoizedCwd(arena, &cwd_cache);
- try archiver.writeFile(joined_path.sub_path, &file_reader, stat.mtime);
- }
-
- // intentionally not calling `archiver.finishPedantically`
- try adapter.new_interface.flush();
- try response.end();
-}
-
-fn memoizedCwd(arena: Allocator, opt_ptr: *?[]const u8) ![]const u8 {
- if (opt_ptr.*) |cached| return cached;
- const result = try std.process.getCwdAlloc(arena);
- opt_ptr.* = result;
- return result;
-}
-
-const cache_control_header: std.http.Header = .{
- .name = "cache-control",
- .value = "max-age=0, must-revalidate",
-};
-
-pub fn coverageRun(ws: *WebServer) void {
- ws.mutex.lock();
- defer ws.mutex.unlock();
-
- while (true) {
- ws.condition.wait(&ws.mutex);
- for (ws.msg_queue.items) |msg| switch (msg) {
- .coverage => |coverage| prepareTables(ws, coverage.run, coverage.id) catch |err| switch (err) {
- error.AlreadyReported => continue,
- else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
- },
- .entry_point => |entry_point| addEntryPoint(ws, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) {
- error.AlreadyReported => continue,
- else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
- },
- };
- ws.msg_queue.clearRetainingCapacity();
- }
-}
-
-fn prepareTables(
- ws: *WebServer,
- run_step: *Step.Run,
- coverage_id: u64,
-) error{ OutOfMemory, AlreadyReported }!void {
- const gpa = ws.gpa;
-
- ws.coverage_mutex.lock();
- defer ws.coverage_mutex.unlock();
-
- const gop = try ws.coverage_files.getOrPut(gpa, coverage_id);
- if (gop.found_existing) {
- // We are fuzzing the same executable with multiple threads.
- // Perhaps the same unit test; perhaps a different one. In any
- // case, since the coverage file is the same, we only have to
- // notice changes to that one file in order to learn coverage for
- // this particular executable.
- return;
- }
- errdefer _ = ws.coverage_files.pop();
-
- gop.value_ptr.* = .{
- .coverage = std.debug.Coverage.init,
- .mapped_memory = undefined, // populated below
- .source_locations = undefined, // populated below
- .entry_points = .{},
- .start_timestamp = ws.now(),
- };
- errdefer gop.value_ptr.coverage.deinit(gpa);
-
- const rebuilt_exe_path = run_step.rebuilt_executable.?;
- var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
- log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
- run_step.step.name, rebuilt_exe_path, @errorName(err),
- });
- return error.AlreadyReported;
- };
- defer debug_info.deinit(gpa);
-
- const coverage_file_path: Build.Cache.Path = .{
- .root_dir = run_step.step.owner.cache_root,
- .sub_path = "v/" ++ std.fmt.hex(coverage_id),
- };
- var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
- log.err("step '{s}': failed to load coverage file '{f}': {s}", .{
- run_step.step.name, coverage_file_path, @errorName(err),
- });
- return error.AlreadyReported;
- };
- defer coverage_file.close();
-
- const file_size = coverage_file.getEndPos() catch |err| {
- log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
- return error.AlreadyReported;
- };
-
- const mapped_memory = std.posix.mmap(
- null,
- file_size,
- std.posix.PROT.READ,
- .{ .TYPE = .SHARED },
- coverage_file.handle,
- 0,
- ) catch |err| {
- log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
- return error.AlreadyReported;
- };
- gop.value_ptr.mapped_memory = mapped_memory;
-
- const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
- const pcs = header.pcAddrs();
- const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
- errdefer gpa.free(source_locations);
-
- // Unfortunately the PCs array that LLVM gives us from the 8-bit PC
- // counters feature is not sorted.
- var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{};
- defer sorted_pcs.deinit(gpa);
- try sorted_pcs.resize(gpa, pcs.len);
- @memcpy(sorted_pcs.items(.pc), pcs);
- for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i);
- sorted_pcs.sortUnstable(struct {
- addrs: []const u64,
-
- pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
- return ctx.addrs[a_index] < ctx.addrs[b_index];
- }
- }{ .addrs = sorted_pcs.items(.pc) });
-
- debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
- log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
- return error.AlreadyReported;
- };
-
- for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl;
- gop.value_ptr.source_locations = source_locations;
-
- ws.coverage_condition.broadcast();
-}
-
-fn addEntryPoint(ws: *WebServer, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void {
- ws.coverage_mutex.lock();
- defer ws.coverage_mutex.unlock();
-
- const coverage_map = ws.coverage_files.getPtr(coverage_id).?;
- const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
- const pcs = header.pcAddrs();
- // Since this pcs list is unsorted, we must linear scan for the best index.
- const index = i: {
- var best: usize = 0;
- for (pcs[1..], 1..) |elem_addr, i| {
- if (elem_addr == addr) break :i i;
- if (elem_addr > addr) continue;
- if (elem_addr > pcs[best]) best = i;
- }
- break :i best;
- };
- if (index >= pcs.len) {
- log.err("unable to find unit test entry address 0x{x} in source locations (range: 0x{x} to 0x{x})", .{
- addr, pcs[0], pcs[pcs.len - 1],
- });
- return error.AlreadyReported;
- }
- if (false) {
- const sl = coverage_map.source_locations[index];
- const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
- log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
- addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
- });
- }
- const gpa = ws.gpa;
- try coverage_map.entry_points.append(gpa, @intCast(index));
-}
-
-fn makeIov(s: []const u8) std.posix.iovec_const {
- return .{
- .base = s.ptr,
- .len = s.len,
- };
-}
diff --git a/lib/std/Build/Fuzz/abi.zig b/lib/std/Build/Fuzz/abi.zig
deleted file mode 100644
index a6abc13feebb..000000000000
--- a/lib/std/Build/Fuzz/abi.zig
+++ /dev/null
@@ -1,112 +0,0 @@
-//! This file is shared among Zig code running in wildly different contexts:
-//! libfuzzer, compiled alongside unit tests, the build runner, running on the
-//! host computer, and the fuzzing web interface webassembly code running in
-//! the browser. All of these components interface to some degree via an ABI.
-
-/// libfuzzer uses this and its usize is the one that counts. To match the ABI,
-/// make the ints be the size of the target used with libfuzzer.
-///
-/// Trailing:
-/// * 1 bit per pc_addr, usize elements
-/// * pc_addr: usize for each pcs_len
-pub const SeenPcsHeader = extern struct {
- n_runs: usize,
- unique_runs: usize,
- pcs_len: usize,
-
- /// Used for comptime assertions. Provides a mechanism for strategically
- /// causing compile errors.
- pub const trailing = .{
- .pc_bits_usize,
- .pc_addr,
- };
-
- pub fn headerEnd(header: *const SeenPcsHeader) []const usize {
- const ptr: [*]align(@alignOf(usize)) const u8 = @ptrCast(header);
- const header_end_ptr: [*]const usize = @ptrCast(ptr + @sizeOf(SeenPcsHeader));
- const pcs_len = header.pcs_len;
- return header_end_ptr[0 .. pcs_len + seenElemsLen(pcs_len)];
- }
-
- pub fn seenBits(header: *const SeenPcsHeader) []const usize {
- return header.headerEnd()[0..seenElemsLen(header.pcs_len)];
- }
-
- pub fn seenElemsLen(pcs_len: usize) usize {
- return (pcs_len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
- }
-
- pub fn pcAddrs(header: *const SeenPcsHeader) []const usize {
- const pcs_len = header.pcs_len;
- return header.headerEnd()[seenElemsLen(pcs_len)..][0..pcs_len];
- }
-};
-
-pub const ToClientTag = enum(u8) {
- current_time,
- source_index,
- coverage_update,
- entry_points,
- _,
-};
-
-pub const CurrentTime = extern struct {
- tag: ToClientTag = .current_time,
- /// Number of nanoseconds that all other timestamps are in reference to.
- base: i64 align(1),
-};
-
-/// Sent to the fuzzer web client on first connection to the websocket URL.
-///
-/// Trailing:
-/// * std.debug.Coverage.String for each directories_len
-/// * std.debug.Coverage.File for each files_len
-/// * std.debug.Coverage.SourceLocation for each source_locations_len
-/// * u8 for each string_bytes_len
-pub const SourceIndexHeader = extern struct {
- flags: Flags,
- directories_len: u32,
- files_len: u32,
- source_locations_len: u32,
- string_bytes_len: u32,
- /// When, according to the server, fuzzing started.
- start_timestamp: i64 align(4),
-
- pub const Flags = packed struct(u32) {
- tag: ToClientTag = .source_index,
- _: u24 = 0,
- };
-};
-
-/// Sent to the fuzzer web client whenever the set of covered source locations
-/// changes.
-///
-/// Trailing:
-/// * one bit per source_locations_len, contained in u64 elements
-pub const CoverageUpdateHeader = extern struct {
- flags: Flags = .{},
- n_runs: u64,
- unique_runs: u64,
-
- pub const Flags = packed struct(u64) {
- tag: ToClientTag = .coverage_update,
- _: u56 = 0,
- };
-
- pub const trailing = .{
- .pc_bits_usize,
- };
-};
-
-/// Sent to the fuzzer web client when the set of entry points is updated.
-///
-/// Trailing:
-/// * one u32 index of source_locations per locs_len
-pub const EntryPointHeader = extern struct {
- flags: Flags,
-
- pub const Flags = packed struct(u32) {
- tag: ToClientTag = .entry_points,
- locs_len: u24,
- };
-};
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 8583427aad0c..ee883ee15225 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -72,6 +72,14 @@ pub const MakeOptions = struct {
progress_node: std.Progress.Node,
thread_pool: *std.Thread.Pool,
watch: bool,
+ web_server: switch (builtin.target.cpu.arch) {
+ else => ?*Build.WebServer,
+ // WASM code references `Build.abi` which happens to incidentally reference this type, but
+ // it currently breaks because `std.net.Address` doesn't work there. Work around for now.
+ .wasm32 => void,
+ },
+ /// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`.
+ gpa: Allocator,
};
pub const MakeFn = *const fn (step: *Step, options: MakeOptions) anyerror!void;
@@ -229,7 +237,17 @@ pub fn init(options: StepOptions) Step {
pub fn make(s: *Step, options: MakeOptions) error{ MakeFailed, MakeSkipped }!void {
const arena = s.owner.allocator;
- s.makeFn(s, options) catch |err| switch (err) {
+ var timer: ?std.time.Timer = t: {
+ if (!s.owner.graph.time_report) break :t null;
+ if (s.id == .compile) break :t null;
+ break :t std.time.Timer.start() catch @panic("--time-report not supported on this host");
+ };
+ const make_result = s.makeFn(s, options);
+ if (timer) |*t| {
+ options.web_server.?.updateTimeReportGeneric(s, t.read());
+ }
+
+ make_result catch |err| switch (err) {
error.MakeFailed => return error.MakeFailed,
error.MakeSkipped => return error.MakeSkipped,
else => {
@@ -372,18 +390,20 @@ pub fn evalZigProcess(
argv: []const []const u8,
prog_node: std.Progress.Node,
watch: bool,
+ web_server: ?*Build.WebServer,
+ gpa: Allocator,
) !?Path {
if (s.getZigProcess()) |zp| update: {
assert(watch);
if (std.Progress.have_ipc) if (zp.progress_ipc_fd) |fd| prog_node.setIpcFd(fd);
- const result = zigProcessUpdate(s, zp, watch) catch |err| switch (err) {
+ const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) {
error.BrokenPipe => {
// Process restart required.
const term = zp.child.wait() catch |e| {
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) });
};
_ = term;
- s.clearZigProcess();
+ s.clearZigProcess(gpa);
break :update;
},
else => |e| return e,
@@ -398,7 +418,7 @@ pub fn evalZigProcess(
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) });
};
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
- s.clearZigProcess();
+ s.clearZigProcess(gpa);
try handleChildProcessTerm(s, term, null, argv);
return error.MakeFailed;
}
@@ -408,7 +428,6 @@ pub fn evalZigProcess(
assert(argv.len != 0);
const b = s.owner;
const arena = b.allocator;
- const gpa = arena;
try handleChildProcUnsupported(s, null, argv);
try handleVerbose(s.owner, null, argv);
@@ -435,9 +454,12 @@ pub fn evalZigProcess(
.progress_ipc_fd = if (std.Progress.have_ipc) child.progress_node.getIpcFd() else {},
};
if (watch) s.setZigProcess(zp);
- defer if (!watch) zp.poller.deinit();
+ defer if (!watch) {
+ zp.poller.deinit();
+ gpa.destroy(zp);
+ };
- const result = try zigProcessUpdate(s, zp, watch);
+ const result = try zigProcessUpdate(s, zp, watch, web_server, gpa);
if (!watch) {
// Send EOF to stdin.
@@ -499,7 +521,7 @@ pub fn installDir(s: *Step, dest_path: []const u8) !std.fs.Dir.MakePathStatus {
};
}
-fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
+fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.WebServer, gpa: Allocator) !?Path {
const b = s.owner;
const arena = b.allocator;
@@ -537,12 +559,14 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
// TODO: use @ptrCast when the compiler supports it
const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
- const extra_array = try arena.alloc(u32, unaligned_extra.len);
- @memcpy(extra_array, unaligned_extra);
- s.result_error_bundle = .{
- .string_bytes = try arena.dupe(u8, string_bytes),
- .extra = extra_array,
- };
+ {
+ s.result_error_bundle = .{ .string_bytes = &.{}, .extra = &.{} };
+ errdefer s.result_error_bundle.deinit(gpa);
+ s.result_error_bundle.string_bytes = try gpa.dupe(u8, string_bytes);
+ const extra = try gpa.alloc(u32, unaligned_extra.len);
+ @memcpy(extra, unaligned_extra);
+ s.result_error_bundle.extra = extra;
+ }
// This message indicates the end of the update.
if (watch) break :poll;
},
@@ -602,6 +626,20 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
}
}
},
+ .time_report => if (web_server) |ws| {
+ const TimeReport = std.zig.Server.Message.TimeReport;
+ const tr: *align(1) const TimeReport = @ptrCast(body[0..@sizeOf(TimeReport)]);
+ ws.updateTimeReportCompile(.{
+ .compile = s.cast(Step.Compile).?,
+ .use_llvm = tr.flags.use_llvm,
+ .stats = tr.stats,
+ .ns_total = timer.read(),
+ .llvm_pass_timings_len = tr.llvm_pass_timings_len,
+ .files_len = tr.files_len,
+ .decls_len = tr.decls_len,
+ .trailing = body[@sizeOf(TimeReport)..],
+ });
+ },
else => {}, // ignore other messages
}
}
@@ -630,8 +668,7 @@ fn setZigProcess(s: *Step, zp: *ZigProcess) void {
}
}
-fn clearZigProcess(s: *Step) void {
- const gpa = s.owner.allocator;
+fn clearZigProcess(s: *Step, gpa: Allocator) void {
switch (s.id) {
.compile => {
const compile = s.cast(Compile).?;
@@ -947,7 +984,8 @@ fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const
try gop.value_ptr.append(gpa, basename);
}
-fn reset(step: *Step, gpa: Allocator) void {
+/// Implementation detail of file watching and forced rebuilds. Prepares the step for being re-evaluated.
+pub fn reset(step: *Step, gpa: Allocator) void {
assert(step.state == .precheck_done);
step.result_error_msgs.clearRetainingCapacity();
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 141d18a7bff1..5e147967920e 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -1491,6 +1491,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
if (b.verbose_link or compile.verbose_link) try zig_args.append("--verbose-link");
if (b.verbose_cc or compile.verbose_cc) try zig_args.append("--verbose-cc");
if (b.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
+ if (b.graph.time_report) try zig_args.append("--time-report");
if (compile.generated_asm != null) try zig_args.append("-femit-asm");
if (compile.generated_bin == null) try zig_args.append("-fno-emit-bin");
@@ -1851,6 +1852,8 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
zig_args,
options.progress_node,
(b.graph.incremental == true) and options.watch,
+ options.web_server,
+ options.gpa,
) catch |err| switch (err) {
error.NeedCompileErrorCheck => {
assert(compile.expect_errors != null);
@@ -1905,9 +1908,7 @@ fn outputPath(c: *Compile, out_dir: std.Build.Cache.Path, ea: std.zig.EmitArtifa
return out_dir.joinString(arena, name) catch @panic("OOM");
}
-pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path {
- const gpa = c.step.owner.allocator;
-
+pub fn rebuildInFuzzMode(c: *Compile, gpa: Allocator, progress_node: std.Progress.Node) !Path {
c.step.result_error_msgs.clearRetainingCapacity();
c.step.result_stderr = "";
@@ -1915,7 +1916,7 @@ pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path {
c.step.result_error_bundle = std.zig.ErrorBundle.empty;
const zig_args = try getZigArgs(c, true);
- const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false);
+ const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false, null, gpa);
return maybe_output_bin_path.?;
}
diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig
index 74f871d2fc95..3c9eb2eaa7f1 100644
--- a/lib/std/Build/Step/ObjCopy.zig
+++ b/lib/std/Build/Step/ObjCopy.zig
@@ -236,7 +236,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try argv.appendSlice(&.{ full_src_path, full_dest_path });
try argv.append("--listen=-");
- _ = try step.evalZigProcess(argv.items, prog_node, false);
+ _ = try step.evalZigProcess(argv.items, prog_node, false, options.web_server, options.gpa);
objcopy.output_file.path = full_dest_path;
if (objcopy.output_file_debug) |*file| file.path = full_dest_path_debug;
diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig
index 6f8c40b1d57d..fd6194f7ff7b 100644
--- a/lib/std/Build/Step/Options.zig
+++ b/lib/std/Build/Step/Options.zig
@@ -549,6 +549,7 @@ test Options {
.result = try std.zig.system.resolveTargetQuery(.{}),
},
.zig_lib_directory = std.Build.Cache.Directory.cwd(),
+ .time_report = false,
};
var builder = try std.Build.create(
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index 819fc6745d4e..d317422bd9ab 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -944,7 +944,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
pub fn rerunInFuzzMode(
run: *Run,
- web_server: *std.Build.Fuzz.WebServer,
+ fuzz: *std.Build.Fuzz,
unit_test_index: u32,
prog_node: std.Progress.Node,
) !void {
@@ -984,7 +984,7 @@ pub fn rerunInFuzzMode(
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, .{
.unit_test_index = unit_test_index,
- .web_server = web_server,
+ .fuzz = fuzz,
});
}
@@ -1054,7 +1054,7 @@ fn termMatches(expected: ?std.process.Child.Term, actual: std.process.Child.Term
}
const FuzzContext = struct {
- web_server: *std.Build.Fuzz.WebServer,
+ fuzz: *std.Build.Fuzz,
unit_test_index: u32,
};
@@ -1638,31 +1638,31 @@ fn evalZigTest(
};
},
.coverage_id => {
- const web_server = fuzz_context.?.web_server;
+ const fuzz = fuzz_context.?.fuzz;
const msg_ptr: *align(1) const u64 = @ptrCast(body);
coverage_id = msg_ptr.*;
{
- web_server.mutex.lock();
- defer web_server.mutex.unlock();
- try web_server.msg_queue.append(web_server.gpa, .{ .coverage = .{
+ fuzz.queue_mutex.lock();
+ defer fuzz.queue_mutex.unlock();
+ try fuzz.msg_queue.append(fuzz.ws.gpa, .{ .coverage = .{
.id = coverage_id.?,
.run = run,
} });
- web_server.condition.signal();
+ fuzz.queue_cond.signal();
}
},
.fuzz_start_addr => {
- const web_server = fuzz_context.?.web_server;
+ const fuzz = fuzz_context.?.fuzz;
const msg_ptr: *align(1) const u64 = @ptrCast(body);
const addr = msg_ptr.*;
{
- web_server.mutex.lock();
- defer web_server.mutex.unlock();
- try web_server.msg_queue.append(web_server.gpa, .{ .entry_point = .{
+ fuzz.queue_mutex.lock();
+ defer fuzz.queue_mutex.unlock();
+ try fuzz.msg_queue.append(fuzz.ws.gpa, .{ .entry_point = .{
.addr = addr,
.coverage_id = coverage_id.?,
} });
- web_server.condition.signal();
+ fuzz.queue_cond.signal();
}
},
else => {}, // ignore other messages
diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig
index 7187aaf8c68a..53c4007e26e7 100644
--- a/lib/std/Build/Step/TranslateC.zig
+++ b/lib/std/Build/Step/TranslateC.zig
@@ -187,7 +187,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const c_source_path = translate_c.source.getPath2(b, step);
try argv_list.append(c_source_path);
- const output_dir = try step.evalZigProcess(argv_list.items, prog_node, false);
+ const output_dir = try step.evalZigProcess(argv_list.items, prog_node, false, options.web_server, options.gpa);
const basename = std.fs.path.stem(std.fs.path.basename(c_source_path));
translate_c.out_basename = b.fmt("{s}.zig", .{basename});
diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig
new file mode 100644
index 000000000000..9264d7473c61
--- /dev/null
+++ b/lib/std/Build/WebServer.zig
@@ -0,0 +1,823 @@
+gpa: Allocator,
+thread_pool: *std.Thread.Pool,
+graph: *const Build.Graph,
+all_steps: []const *Build.Step,
+listen_address: std.net.Address,
+ttyconf: std.io.tty.Config,
+root_prog_node: std.Progress.Node,
+watch: bool,
+
+tcp_server: ?std.net.Server,
+serve_thread: ?std.Thread,
+
+base_timestamp: i128,
+/// The "step name" data which trails `abi.Hello`, for the steps in `all_steps`.
+step_names_trailing: []u8,
+
+/// The bit-packed "step status" data. Values are `abi.StepUpdate.Status`. LSBs are earlier steps.
+/// Accessed atomically.
+step_status_bits: []u8,
+
+fuzz: ?Fuzz,
+time_report_mutex: std.Thread.Mutex,
+time_report_msgs: [][]u8,
+time_report_update_times: []i64,
+
+build_status: std.atomic.Value(abi.BuildStatus),
+/// When an event occurs which means WebSocket clients should be sent updates, call `notifyUpdate`
+/// to increment this value. Each client thread waits for this increment with `std.Thread.Futex`, so
+/// `notifyUpdate` will wake those threads. Updates are sent on a short interval regardless, so it
+/// is recommended to only use `notifyUpdate` for changes which the user should see immediately. For
+/// instance, we do not call `notifyUpdate` when the number of "unique runs" in the fuzzer changes,
+/// because this value changes quickly so this would result in constantly spamming all clients with
+/// an unreasonable number of packets.
+update_id: std.atomic.Value(u32),
+
+runner_request_mutex: std.Thread.Mutex,
+runner_request_ready_cond: std.Thread.Condition,
+runner_request_empty_cond: std.Thread.Condition,
+runner_request: ?RunnerRequest,
+
+/// If a client is not explicitly notified of changes with `notifyUpdate`, it will be sent updates
+/// on a fixed interval of this many milliseconds.
+const default_update_interval_ms = 500;
+
+/// Thread-safe. Triggers updates to be sent to connected WebSocket clients; see `update_id`.
+pub fn notifyUpdate(ws: *WebServer) void {
+ _ = ws.update_id.rmw(.Add, 1, .release);
+ std.Thread.Futex.wake(&ws.update_id, 16);
+}
+
+pub const Options = struct {
+ gpa: Allocator,
+ thread_pool: *std.Thread.Pool,
+ graph: *const std.Build.Graph,
+ all_steps: []const *Build.Step,
+ ttyconf: std.io.tty.Config,
+ root_prog_node: std.Progress.Node,
+ watch: bool,
+ listen_address: std.net.Address,
+};
+pub fn init(opts: Options) WebServer {
+ if (builtin.single_threaded) {
+ // The upcoming `std.Io` interface should allow us to use `Io.async` and `Io.concurrent`
+ // instead of threads, so that the web server can function in single-threaded builds.
+ std.process.fatal("--webui not yet implemented for single-threaded builds", .{});
+ }
+
+ if (builtin.os.tag == .windows) {
+ // At the time of writing, there are two bugs in the standard library which break this feature on Windows:
+ // * Reading from a socket on one thread while writing to it on another seems to deadlock.
+ // * Vectored writes to sockets currently trigger an infinite loop when a buffer has length 0.
+ //
+ // Both of these bugs are expected to be solved by changes which are currently in the unmerged
+ // 'wrangle-writer-buffering' branch. Until that makes it in, this must remain disabled.
+ std.process.fatal("--webui is currently disabled on Windows due to bugs", .{});
+ }
+
+ const all_steps = opts.all_steps;
+
+ const step_names_trailing = opts.gpa.alloc(u8, len: {
+ var name_bytes: usize = 0;
+ for (all_steps) |step| name_bytes += step.name.len;
+ break :len name_bytes + all_steps.len * 4;
+ }) catch @panic("out of memory");
+ {
+ const step_name_lens: []align(1) u32 = @ptrCast(step_names_trailing[0 .. all_steps.len * 4]);
+ var idx: usize = all_steps.len * 4;
+ for (all_steps, step_name_lens) |step, *name_len| {
+ name_len.* = @intCast(step.name.len);
+ @memcpy(step_names_trailing[idx..][0..step.name.len], step.name);
+ idx += step.name.len;
+ }
+ assert(idx == step_names_trailing.len);
+ }
+
+ const step_status_bits = opts.gpa.alloc(
+ u8,
+ std.math.divCeil(usize, all_steps.len, 4) catch unreachable,
+ ) catch @panic("out of memory");
+ @memset(step_status_bits, 0);
+
+ const time_reports_len: usize = if (opts.graph.time_report) all_steps.len else 0;
+ const time_report_msgs = opts.gpa.alloc([]u8, time_reports_len) catch @panic("out of memory");
+ const time_report_update_times = opts.gpa.alloc(i64, time_reports_len) catch @panic("out of memory");
+ @memset(time_report_msgs, &.{});
+ @memset(time_report_update_times, std.math.minInt(i64));
+
+ return .{
+ .gpa = opts.gpa,
+ .thread_pool = opts.thread_pool,
+ .graph = opts.graph,
+ .all_steps = all_steps,
+ .listen_address = opts.listen_address,
+ .ttyconf = opts.ttyconf,
+ .root_prog_node = opts.root_prog_node,
+ .watch = opts.watch,
+
+ .tcp_server = null,
+ .serve_thread = null,
+
+ .base_timestamp = std.time.nanoTimestamp(),
+ .step_names_trailing = step_names_trailing,
+
+ .step_status_bits = step_status_bits,
+
+ .fuzz = null,
+ .time_report_mutex = .{},
+ .time_report_msgs = time_report_msgs,
+ .time_report_update_times = time_report_update_times,
+
+ .build_status = .init(.idle),
+ .update_id = .init(0),
+
+ .runner_request_mutex = .{},
+ .runner_request_ready_cond = .{},
+ .runner_request_empty_cond = .{},
+ .runner_request = null,
+ };
+}
+pub fn deinit(ws: *WebServer) void {
+ const gpa = ws.gpa;
+
+ gpa.free(ws.step_names_trailing);
+ gpa.free(ws.step_status_bits);
+
+ if (ws.fuzz) |*f| f.deinit();
+ for (ws.time_report_msgs) |msg| gpa.free(msg);
+ gpa.free(ws.time_report_msgs);
+ gpa.free(ws.time_report_update_times);
+
+ if (ws.serve_thread) |t| {
+ if (ws.tcp_server) |*s| s.stream.close();
+ t.join();
+ }
+ if (ws.tcp_server) |*s| s.deinit();
+
+ gpa.free(ws.step_names_trailing);
+}
+pub fn start(ws: *WebServer) error{AlreadyReported}!void {
+ assert(ws.tcp_server == null);
+ assert(ws.serve_thread == null);
+
+ ws.tcp_server = ws.listen_address.listen(.{ .reuse_address = true }) catch |err| {
+ log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.getPort(), @errorName(err) });
+ return error.AlreadyReported;
+ };
+ ws.serve_thread = std.Thread.spawn(.{}, serve, .{ws}) catch |err| {
+ log.err("unable to spawn web server thread: {s}", .{@errorName(err)});
+ ws.tcp_server.?.deinit();
+ ws.tcp_server = null;
+ return error.AlreadyReported;
+ };
+
+ log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.listen_address});
+ if (ws.listen_address.getPort() == 0) {
+ log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.listen_address});
+ }
+}
+fn serve(ws: *WebServer) void {
+ while (true) {
+ const connection = ws.tcp_server.?.accept() catch |err| {
+ log.err("failed to accept connection: {s}", .{@errorName(err)});
+ return;
+ };
+ _ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
+ log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
+ connection.stream.close();
+ continue;
+ };
+ }
+}
+
+pub fn startBuild(ws: *WebServer) void {
+ if (ws.fuzz) |*fuzz| {
+ fuzz.deinit();
+ ws.fuzz = null;
+ }
+ for (ws.step_status_bits) |*bits| @atomicStore(u8, bits, 0, .monotonic);
+ ws.build_status.store(.running, .monotonic);
+ ws.notifyUpdate();
+}
+
+pub fn updateStepStatus(ws: *WebServer, step: *Build.Step, new_status: abi.StepUpdate.Status) void {
+ const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
+ if (s == step) break @intCast(i);
+ } else unreachable;
+ const ptr = &ws.step_status_bits[step_idx / 4];
+ const bit_offset: u3 = @intCast((step_idx % 4) * 2);
+ const old_bits: u2 = @truncate(@atomicLoad(u8, ptr, .monotonic) >> bit_offset);
+ const mask = @as(u8, @intFromEnum(new_status) ^ old_bits) << bit_offset;
+ _ = @atomicRmw(u8, ptr, .Xor, mask, .monotonic);
+ ws.notifyUpdate();
+}
+
+pub fn finishBuild(ws: *WebServer, opts: struct {
+ fuzz: bool,
+}) void {
+ if (opts.fuzz) {
+ switch (builtin.os.tag) {
+ // Current implementation depends on two things that need to be ported to Windows:
+ // * Memory-mapping to share data between the fuzzer and build runner.
+ // * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
+ // many addresses to source locations).
+ .windows => std.process.fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
+ else => {},
+ }
+ if (@bitSizeOf(usize) != 64) {
+ // Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
+ // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
+ // on 32-bit platforms.
+ // Affects or affected by issues #5185, #22523, and #22464.
+ std.process.fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
+ }
+ assert(ws.fuzz == null);
+
+ ws.build_status.store(.fuzz_init, .monotonic);
+ ws.notifyUpdate();
+
+ ws.fuzz = Fuzz.init(ws) catch |err| std.process.fatal("failed to start fuzzer: {s}", .{@errorName(err)});
+ ws.fuzz.?.start();
+ }
+
+ ws.build_status.store(if (ws.watch) .watching else .idle, .monotonic);
+ ws.notifyUpdate();
+}
+
+pub fn now(s: *const WebServer) i64 {
+ return @intCast(std.time.nanoTimestamp() - s.base_timestamp);
+}
+
+fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
+ defer connection.stream.close();
+
+ var read_buf: [0x4000]u8 = undefined;
+ var server: std.http.Server = .init(connection, &read_buf);
+
+ while (true) {
+ var request = server.receiveHead() catch |err| switch (err) {
+ error.HttpConnectionClosing => return,
+ else => {
+ log.err("failed to receive http request: {s}", .{@errorName(err)});
+ return;
+ },
+ };
+ var ws_send_buf: [0x4000]u8 = undefined;
+ var ws_recv_buf: [0x4000]u8 align(4) = undefined;
+ if (std.http.WebSocket.init(&request, &ws_send_buf, &ws_recv_buf) catch |err| {
+ log.err("failed to initialize websocket connection: {s}", .{@errorName(err)});
+ return;
+ }) |ws_init| {
+ var web_socket = ws_init;
+ ws.serveWebSocket(&web_socket) catch |err| {
+ log.err("failed to serve websocket: {s}", .{@errorName(err)});
+ return;
+ };
+ comptime unreachable;
+ } else {
+ ws.serveRequest(&request) catch |err| switch (err) {
+ error.AlreadyReported => return,
+ else => {
+ log.err("failed to serve '{s}': {s}", .{ request.head.target, @errorName(err) });
+ return;
+ },
+ };
+ }
+ }
+}
+
+fn makeIov(s: []const u8) std.posix.iovec_const {
+ return .{
+ .base = s.ptr,
+ .len = s.len,
+ };
+}
+fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
+ var prev_build_status = ws.build_status.load(.monotonic);
+
+ const prev_step_status_bits = try ws.gpa.alloc(u8, ws.step_status_bits.len);
+ defer ws.gpa.free(prev_step_status_bits);
+ for (prev_step_status_bits, ws.step_status_bits) |*copy, *shared| {
+ copy.* = @atomicLoad(u8, shared, .monotonic);
+ }
+
+ _ = try std.Thread.spawn(.{}, recvWebSocketMessages, .{ ws, sock });
+
+ {
+ const hello_header: abi.Hello = .{
+ .status = prev_build_status,
+ .flags = .{
+ .time_report = ws.graph.time_report,
+ },
+ .timestamp = ws.now(),
+ .steps_len = @intCast(ws.all_steps.len),
+ };
+ try sock.writeMessagev(&.{
+ makeIov(@ptrCast(&hello_header)),
+ makeIov(ws.step_names_trailing),
+ makeIov(prev_step_status_bits),
+ }, .binary);
+ }
+
+ var prev_fuzz: Fuzz.Previous = .init;
+ var prev_time: i64 = std.math.minInt(i64);
+ while (true) {
+ const start_time = ws.now();
+ const start_update_id = ws.update_id.load(.acquire);
+
+ if (ws.fuzz) |*fuzz| {
+ try fuzz.sendUpdate(sock, &prev_fuzz);
+ }
+
+ {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ for (ws.time_report_msgs, ws.time_report_update_times) |msg, update_time| {
+ if (update_time <= prev_time) continue;
+ // We want to send `msg`, but shouldn't block `ws.time_report_mutex` while we do, so
+ // that we don't hold up the build system on the client accepting this packet.
+ const owned_msg = try ws.gpa.dupe(u8, msg);
+ defer ws.gpa.free(owned_msg);
+ // Temporarily unlock, then re-lock after the message is sent.
+ ws.time_report_mutex.unlock();
+ defer ws.time_report_mutex.lock();
+ try sock.writeMessage(msg, .binary);
+ }
+ }
+
+ {
+ const build_status = ws.build_status.load(.monotonic);
+ if (build_status != prev_build_status) {
+ prev_build_status = build_status;
+ const msg: abi.StatusUpdate = .{ .new = build_status };
+ try sock.writeMessage(@ptrCast(&msg), .binary);
+ }
+ }
+
+ for (prev_step_status_bits, ws.step_status_bits, 0..) |*prev_byte, *shared, byte_idx| {
+ const cur_byte = @atomicLoad(u8, shared, .monotonic);
+ if (prev_byte.* == cur_byte) continue;
+ const cur: [4]abi.StepUpdate.Status = .{
+ @enumFromInt(@as(u2, @truncate(cur_byte >> 0))),
+ @enumFromInt(@as(u2, @truncate(cur_byte >> 2))),
+ @enumFromInt(@as(u2, @truncate(cur_byte >> 4))),
+ @enumFromInt(@as(u2, @truncate(cur_byte >> 6))),
+ };
+ const prev: [4]abi.StepUpdate.Status = .{
+ @enumFromInt(@as(u2, @truncate(prev_byte.* >> 0))),
+ @enumFromInt(@as(u2, @truncate(prev_byte.* >> 2))),
+ @enumFromInt(@as(u2, @truncate(prev_byte.* >> 4))),
+ @enumFromInt(@as(u2, @truncate(prev_byte.* >> 6))),
+ };
+ for (cur, prev, byte_idx * 4..) |cur_status, prev_status, step_idx| {
+ const msg: abi.StepUpdate = .{ .step_idx = @intCast(step_idx), .bits = .{ .status = cur_status } };
+ if (cur_status != prev_status) try sock.writeMessage(@ptrCast(&msg), .binary);
+ }
+ prev_byte.* = cur_byte;
+ }
+
+ prev_time = start_time;
+ std.Thread.Futex.timedWait(&ws.update_id, start_update_id, std.time.ns_per_ms * default_update_interval_ms) catch {};
+ }
+}
+fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void {
+ while (true) {
+ const msg = sock.readSmallMessage() catch return;
+ if (msg.opcode != .binary) continue;
+ if (msg.data.len == 0) continue;
+ const tag: abi.ToServerTag = @enumFromInt(msg.data[0]);
+ switch (tag) {
+ _ => continue,
+ .rebuild => while (true) {
+ ws.runner_request_mutex.lock();
+ defer ws.runner_request_mutex.unlock();
+ if (ws.runner_request == null) {
+ ws.runner_request = .rebuild;
+ ws.runner_request_ready_cond.signal();
+ break;
+ }
+ ws.runner_request_empty_cond.wait(&ws.runner_request_mutex);
+ },
+ }
+ }
+}
+
+fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void {
+ // Strip an optional leading '/debug' component from the request.
+ const target: []const u8, const debug: bool = target: {
+ if (mem.eql(u8, req.head.target, "/debug")) break :target .{ "/", true };
+ if (mem.eql(u8, req.head.target, "/debug/")) break :target .{ "/", true };
+ if (mem.startsWith(u8, req.head.target, "/debug/")) break :target .{ req.head.target["/debug".len..], true };
+ break :target .{ req.head.target, false };
+ };
+
+ if (mem.eql(u8, target, "/")) return serveLibFile(ws, req, "build-web/index.html", "text/html");
+ if (mem.eql(u8, target, "/main.js")) return serveLibFile(ws, req, "build-web/main.js", "application/javascript");
+ if (mem.eql(u8, target, "/style.css")) return serveLibFile(ws, req, "build-web/style.css", "text/css");
+ if (mem.eql(u8, target, "/time_report.css")) return serveLibFile(ws, req, "build-web/time_report.css", "text/css");
+ if (mem.eql(u8, target, "/main.wasm")) return serveClientWasm(ws, req, if (debug) .Debug else .ReleaseFast);
+
+ if (ws.fuzz) |*fuzz| {
+ if (mem.eql(u8, target, "/sources.tar")) return fuzz.serveSourcesTar(req);
+ }
+
+ try req.respond("not found", .{
+ .status = .not_found,
+ .extra_headers = &.{
+ .{ .name = "Content-Type", .value = "text/plain" },
+ },
+ });
+}
+
+fn serveLibFile(
+ ws: *WebServer,
+ request: *std.http.Server.Request,
+ sub_path: []const u8,
+ content_type: []const u8,
+) !void {
+ return serveFile(ws, request, .{
+ .root_dir = ws.graph.zig_lib_directory,
+ .sub_path = sub_path,
+ }, content_type);
+}
+fn serveClientWasm(
+ ws: *WebServer,
+ req: *std.http.Server.Request,
+ optimize_mode: std.builtin.OptimizeMode,
+) !void {
+ var arena_state: std.heap.ArenaAllocator = .init(ws.gpa);
+ defer arena_state.deinit();
+ const arena = arena_state.allocator();
+
+ // We always rebuild the wasm on-the-fly, so that if it is edited the user can just refresh the page.
+ const bin_path = try buildClientWasm(ws, arena, optimize_mode);
+ return serveFile(ws, req, bin_path, "application/wasm");
+}
+
+pub fn serveFile(
+ ws: *WebServer,
+ request: *std.http.Server.Request,
+ path: Cache.Path,
+ content_type: []const u8,
+) !void {
+ const gpa = ws.gpa;
+ // The desired API is actually sendfile, which will require enhancing std.http.Server.
+ // We load the file with every request so that the user can make changes to the file
+ // and refresh the HTML page without restarting this server.
+ const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| {
+ log.err("failed to read '{f}': {s}", .{ path, @errorName(err) });
+ return error.AlreadyReported;
+ };
+ defer gpa.free(file_contents);
+ try request.respond(file_contents, .{
+ .extra_headers = &.{
+ .{ .name = "Content-Type", .value = content_type },
+ cache_control_header,
+ },
+ });
+}
+pub fn serveTarFile(
+ ws: *WebServer,
+ request: *std.http.Server.Request,
+ paths: []const Cache.Path,
+) !void {
+ const gpa = ws.gpa;
+
+ var send_buf: [0x4000]u8 = undefined;
+ var response = request.respondStreaming(.{
+ .send_buffer = &send_buf,
+ .respond_options = .{
+ .extra_headers = &.{
+ .{ .name = "Content-Type", .value = "application/x-tar" },
+ cache_control_header,
+ },
+ },
+ });
+
+ var cached_cwd_path: ?[]const u8 = null;
+ defer if (cached_cwd_path) |p| gpa.free(p);
+
+ var response_buf: [1024]u8 = undefined;
+ var adapter = response.writer().adaptToNewApi();
+ adapter.new_interface.buffer = &response_buf;
+ var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface };
+
+ for (paths) |path| {
+ var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| {
+ log.err("failed to open '{f}': {s}", .{ path, @errorName(err) });
+ continue;
+ };
+ defer file.close();
+ const stat = try file.stat();
+ var read_buffer: [1024]u8 = undefined;
+ var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
+
+ // TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can
+ // be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI:
+ // it turns out the WASM treats the first path component as the module name, typically
+ // resulting in modules named "" and "src". The compiler needs to tell the build system
+ // about the module graph so that the build system can correctly encode this information in
+ // the tar file.
+ archiver.prefix = path.root_dir.path orelse cwd: {
+ if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa);
+ break :cwd cached_cwd_path.?;
+ };
+ try archiver.writeFile(path.sub_path, &file_reader, stat.mtime);
+ }
+
+ // intentionally not calling `archiver.finishPedantically`
+ try adapter.new_interface.flush();
+ try response.end();
+}
+
+fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.OptimizeMode) !Cache.Path {
+ const root_name = "build-web";
+ const arch_os_abi = "wasm32-freestanding";
+ const cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
+
+ const gpa = ws.gpa;
+ const graph = ws.graph;
+
+ const main_src_path: Cache.Path = .{
+ .root_dir = graph.zig_lib_directory,
+ .sub_path = "build-web/main.zig",
+ };
+ const walk_src_path: Cache.Path = .{
+ .root_dir = graph.zig_lib_directory,
+ .sub_path = "docs/wasm/Walk.zig",
+ };
+ const html_render_src_path: Cache.Path = .{
+ .root_dir = graph.zig_lib_directory,
+ .sub_path = "docs/wasm/html_render.zig",
+ };
+
+ var argv: std.ArrayListUnmanaged([]const u8) = .empty;
+
+ try argv.appendSlice(arena, &.{
+ graph.zig_exe, "build-exe", //
+ "-fno-entry", //
+ "-O", @tagName(optimize), //
+ "-target", arch_os_abi, //
+ "-mcpu", cpu_features, //
+ "--cache-dir", graph.global_cache_root.path orelse ".", //
+ "--global-cache-dir", graph.global_cache_root.path orelse ".", //
+ "--zig-lib-dir", graph.zig_lib_directory.path orelse ".", //
+ "--name", root_name, //
+ "-rdynamic", //
+ "-fsingle-threaded", //
+ "--dep", "Walk", //
+ "--dep", "html_render", //
+ try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), //
+ try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), //
+ "--dep", "Walk", //
+ try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), //
+ "--listen=-",
+ });
+
+ var child: std.process.Child = .init(argv.items, gpa);
+ child.stdin_behavior = .Pipe;
+ child.stdout_behavior = .Pipe;
+ child.stderr_behavior = .Pipe;
+ try child.spawn();
+
+ var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
+ .stdout = child.stdout.?,
+ .stderr = child.stderr.?,
+ });
+ defer poller.deinit();
+
+ try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{
+ .{ .tag = .update, .bytes_len = 0 },
+ .{ .tag = .exit, .bytes_len = 0 },
+ })));
+
+ const Header = std.zig.Server.Message.Header;
+ var result: ?Cache.Path = null;
+ var result_error_bundle = std.zig.ErrorBundle.empty;
+
+ const stdout = poller.reader(.stdout);
+
+ poll: while (true) {
+ while (stdout.buffered().len < @sizeOf(Header)) if (!(try poller.poll())) break :poll;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
+ const body = stdout.take(header.bytes_len) catch unreachable;
+
+ switch (header.tag) {
+ .zig_version => {
+ if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
+ return error.ZigProtocolVersionMismatch;
+ }
+ },
+ .error_bundle => {
+ const EbHdr = std.zig.Server.Message.ErrorBundle;
+ const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
+ const extra_bytes =
+ body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
+ const string_bytes =
+ body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
+ const unaligned_extra: []align(1) const u32 = @ptrCast(extra_bytes);
+ const extra_array = try arena.alloc(u32, unaligned_extra.len);
+ @memcpy(extra_array, unaligned_extra);
+ result_error_bundle = .{
+ .string_bytes = try arena.dupe(u8, string_bytes),
+ .extra = extra_array,
+ };
+ },
+ .emit_digest => {
+ const EmitDigest = std.zig.Server.Message.EmitDigest;
+ const ebp_hdr: *align(1) const EmitDigest = @ptrCast(body);
+ if (!ebp_hdr.flags.cache_hit) {
+ log.info("source changes detected; rebuilt wasm component", .{});
+ }
+ const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len];
+ result = .{
+ .root_dir = graph.global_cache_root,
+ .sub_path = try arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*)),
+ };
+ },
+ else => {}, // ignore other messages
+ }
+ }
+
+ const stderr_contents = try poller.toOwnedSlice(.stderr);
+ if (stderr_contents.len > 0) {
+ std.debug.print("{s}", .{stderr_contents});
+ }
+
+ // Send EOF to stdin.
+ child.stdin.?.close();
+ child.stdin = null;
+
+ switch (try child.wait()) {
+ .Exited => |code| {
+ if (code != 0) {
+ log.err(
+ "the following command exited with error code {d}:\n{s}",
+ .{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) },
+ );
+ return error.WasmCompilationFailed;
+ }
+ },
+ .Signal, .Stopped, .Unknown => {
+ log.err(
+ "the following command terminated unexpectedly:\n{s}",
+ .{try Build.Step.allocPrintCmd(arena, null, argv.items)},
+ );
+ return error.WasmCompilationFailed;
+ },
+ }
+
+ if (result_error_bundle.errorMessageCount() > 0) {
+ const color = std.zig.Color.auto;
+ result_error_bundle.renderToStdErr(color.renderOptions());
+ log.err("the following command failed with {d} compilation errors:\n{s}", .{
+ result_error_bundle.errorMessageCount(),
+ try Build.Step.allocPrintCmd(arena, null, argv.items),
+ });
+ return error.WasmCompilationFailed;
+ }
+
+ const base_path = result orelse {
+ log.err("child process failed to report result\n{s}", .{
+ try Build.Step.allocPrintCmd(arena, null, argv.items),
+ });
+ return error.WasmCompilationFailed;
+ };
+ const bin_name = try std.zig.binNameAlloc(arena, .{
+ .root_name = root_name,
+ .target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
+ .arch_os_abi = arch_os_abi,
+ .cpu_features = cpu_features,
+ }) catch unreachable) catch unreachable),
+ .output_mode = .Exe,
+ });
+ return base_path.join(arena, bin_name);
+}
+
+pub fn updateTimeReportCompile(ws: *WebServer, opts: struct {
+ compile: *Build.Step.Compile,
+
+ use_llvm: bool,
+ stats: abi.time_report.CompileResult.Stats,
+ ns_total: u64,
+
+ llvm_pass_timings_len: u32,
+ files_len: u32,
+ decls_len: u32,
+
+ /// The trailing data of `abi.time_report.CompileResult`, except the step name.
+ trailing: []const u8,
+}) void {
+ const gpa = ws.gpa;
+
+ const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
+ if (s == &opts.compile.step) break @intCast(i);
+ } else unreachable;
+
+ const old_buf = old: {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ const old = ws.time_report_msgs[step_idx];
+ ws.time_report_msgs[step_idx] = &.{};
+ break :old old;
+ };
+ const buf = gpa.realloc(old_buf, @sizeOf(abi.time_report.CompileResult) + opts.trailing.len) catch @panic("out of memory");
+
+ const out_header: *align(1) abi.time_report.CompileResult = @ptrCast(buf[0..@sizeOf(abi.time_report.CompileResult)]);
+ out_header.* = .{
+ .step_idx = step_idx,
+ .flags = .{
+ .use_llvm = opts.use_llvm,
+ },
+ .stats = opts.stats,
+ .ns_total = opts.ns_total,
+ .llvm_pass_timings_len = opts.llvm_pass_timings_len,
+ .files_len = opts.files_len,
+ .decls_len = opts.decls_len,
+ };
+ @memcpy(buf[@sizeOf(abi.time_report.CompileResult)..], opts.trailing);
+
+ {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ assert(ws.time_report_msgs[step_idx].len == 0);
+ ws.time_report_msgs[step_idx] = buf;
+ ws.time_report_update_times[step_idx] = ws.now();
+ }
+ ws.notifyUpdate();
+}
+
+pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64) void {
+ const gpa = ws.gpa;
+
+ const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
+ if (s == step) break @intCast(i);
+ } else unreachable;
+
+ const old_buf = old: {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ const old = ws.time_report_msgs[step_idx];
+ ws.time_report_msgs[step_idx] = &.{};
+ break :old old;
+ };
+ const buf = gpa.realloc(old_buf, @sizeOf(abi.time_report.GenericResult)) catch @panic("out of memory");
+ const out: *align(1) abi.time_report.GenericResult = @ptrCast(buf);
+ out.* = .{
+ .step_idx = step_idx,
+ .ns_total = ns_total,
+ };
+ {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ assert(ws.time_report_msgs[step_idx].len == 0);
+ ws.time_report_msgs[step_idx] = buf;
+ ws.time_report_update_times[step_idx] = ws.now();
+ }
+ ws.notifyUpdate();
+}
+
+const RunnerRequest = union(enum) {
+ rebuild,
+};
+pub fn getRunnerRequest(ws: *WebServer) ?RunnerRequest {
+ ws.runner_request_mutex.lock();
+ defer ws.runner_request_mutex.unlock();
+ if (ws.runner_request) |req| {
+ ws.runner_request = null;
+ ws.runner_request_empty_cond.signal();
+ return req;
+ }
+ return null;
+}
+pub fn wait(ws: *WebServer) RunnerRequest {
+ ws.runner_request_mutex.lock();
+ defer ws.runner_request_mutex.unlock();
+ while (true) {
+ if (ws.runner_request) |req| {
+ ws.runner_request = null;
+ ws.runner_request_empty_cond.signal();
+ return req;
+ }
+ ws.runner_request_ready_cond.wait(&ws.runner_request_mutex);
+ }
+}
+
+const cache_control_header: std.http.Header = .{
+ .name = "Cache-Control",
+ .value = "max-age=0, must-revalidate",
+};
+
+const builtin = @import("builtin");
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+const log = std.log.scoped(.web_server);
+const Allocator = std.mem.Allocator;
+const Build = std.Build;
+const Cache = Build.Cache;
+const Fuzz = Build.Fuzz;
+const abi = Build.abi;
+
+const WebServer = @This();
diff --git a/lib/std/Build/abi.zig b/lib/std/Build/abi.zig
new file mode 100644
index 000000000000..d5b02d951abf
--- /dev/null
+++ b/lib/std/Build/abi.zig
@@ -0,0 +1,313 @@
+//! This file is shared among Zig code running in wildly different contexts:
+//! * The build runner, running on the host computer
+//! * The build system web interface Wasm code, running in the browser
+//! * `libfuzzer`, compiled alongside unit tests
+//!
+//! All of these components interface to some degree via an ABI:
+//! * The build runner communicates with the web interface over a WebSocket connection
+//! * The build runner communicates with `libfuzzer` over a shared memory-mapped file
+
+// Check that no WebSocket message type has implicit padding bits. This ensures we never send any
+// undefined bits over the wire, and also helps validate that the layout doesn't differ between, for
+// instance, the web server in `std.Build` and the Wasm client.
+comptime {
+ const check = struct {
+ fn check(comptime T: type) void {
+ const std = @import("std");
+ std.debug.assert(@typeInfo(T) == .@"struct");
+ std.debug.assert(@typeInfo(T).@"struct".layout == .@"extern");
+ std.debug.assert(std.meta.hasUniqueRepresentation(T));
+ }
+ }.check;
+
+ // server->client
+ check(Hello);
+ check(StatusUpdate);
+ check(StepUpdate);
+ check(fuzz.SourceIndexHeader);
+ check(fuzz.CoverageUpdateHeader);
+ check(fuzz.EntryPointHeader);
+ check(time_report.GenericResult);
+ check(time_report.CompileResult);
+
+ // client->server
+ check(Rebuild);
+}
+
+/// All WebSocket messages sent by the server to the client begin with a `ToClientTag` byte. This
+/// enum is non-exhaustive only to avoid Illegal Behavior when malformed messages are sent over the
+/// socket; unnamed tags are an error condition and should terminate the connection.
+///
+/// Every tag has a curresponding `extern struct` representing the full message (or a header of the
+/// message if it is variable-length). For instance, `.hello` corresponds to `Hello`.
+///
+/// When introducing a tag, make sure to add a corresponding `extern struct` whose first field is
+/// this enum, and `check` its layout in the `comptime` block above.
+pub const ToClientTag = enum(u8) {
+ hello,
+ status_update,
+ step_update,
+
+ // `--fuzz`
+ fuzz_source_index,
+ fuzz_coverage_update,
+ fuzz_entry_points,
+
+ // `--time-report`
+ time_report_generic_result,
+ time_report_compile_result,
+
+ _,
+};
+
+/// Like `ToClientTag`, but for messages sent by the client to the server.
+pub const ToServerTag = enum(u8) {
+ rebuild,
+
+ _,
+};
+
+/// The current overall status of the build runner.
+/// Keep in sync with indices in web UI `main.js:updateBuildStatus`.
+pub const BuildStatus = enum(u8) {
+ idle,
+ watching,
+ running,
+ fuzz_init,
+};
+
+/// WebSocket server->client.
+///
+/// Sent by the server as the first message after a WebSocket connection opens to provide basic
+/// information about the server, the build graph, etc.
+///
+/// Trailing:
+/// * `step_name_len: u32` for each `steps_len`
+/// * `step_name: [step_name_len]u8` for each `step_name_len`
+/// * `step_status: u8` for every 4 `steps_len`; every 2 bits is a `StepUpdate.Status`, LSBs first
+pub const Hello = extern struct {
+ tag: ToClientTag = .hello,
+
+ status: BuildStatus,
+ flags: Flags,
+
+ /// Any message containing a timestamp represents it as a number of nanoseconds relative to when
+ /// the build began. This field is the current timestamp, represented in that form.
+ timestamp: i64 align(4),
+
+ /// The number of steps in the build graph which are reachable from the top-level step[s] being
+ /// run; in other words, the number of steps which will be executed by this build. The name of
+ /// each step trails this message.
+ steps_len: u32 align(1),
+
+ pub const Flags = packed struct(u16) {
+ /// Whether time reporting is enabled.
+ time_report: bool,
+ _: u15 = 0,
+ };
+};
+/// WebSocket server->client.
+///
+/// Indicates that the build status has changed.
+pub const StatusUpdate = extern struct {
+ tag: ToClientTag = .status_update,
+ new: BuildStatus,
+};
+/// WebSocket server->client.
+///
+/// Indicates a change in a step's status.
+pub const StepUpdate = extern struct {
+ tag: ToClientTag = .step_update,
+ step_idx: u32 align(1),
+ bits: packed struct(u8) {
+ status: Status,
+ _: u6 = 0,
+ },
+ /// Keep in sync with indices in web UI `main.js:updateStepStatus`.
+ pub const Status = enum(u2) {
+ pending,
+ wip,
+ success,
+ failure,
+ };
+};
+
+pub const Rebuild = extern struct {
+ tag: ToServerTag = .rebuild,
+};
+
+/// ABI bits specifically relating to the fuzzer interface.
+pub const fuzz = struct {
+ /// libfuzzer uses this and its usize is the one that counts. To match the ABI,
+ /// make the ints be the size of the target used with libfuzzer.
+ ///
+ /// Trailing:
+ /// * 1 bit per pc_addr, usize elements
+ /// * pc_addr: usize for each pcs_len
+ pub const SeenPcsHeader = extern struct {
+ n_runs: usize,
+ unique_runs: usize,
+ pcs_len: usize,
+
+ /// Used for comptime assertions. Provides a mechanism for strategically
+ /// causing compile errors.
+ pub const trailing = .{
+ .pc_bits_usize,
+ .pc_addr,
+ };
+
+ pub fn headerEnd(header: *const SeenPcsHeader) []const usize {
+ const ptr: [*]align(@alignOf(usize)) const u8 = @ptrCast(header);
+ const header_end_ptr: [*]const usize = @ptrCast(ptr + @sizeOf(SeenPcsHeader));
+ const pcs_len = header.pcs_len;
+ return header_end_ptr[0 .. pcs_len + seenElemsLen(pcs_len)];
+ }
+
+ pub fn seenBits(header: *const SeenPcsHeader) []const usize {
+ return header.headerEnd()[0..seenElemsLen(header.pcs_len)];
+ }
+
+ pub fn seenElemsLen(pcs_len: usize) usize {
+ return (pcs_len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
+ }
+
+ pub fn pcAddrs(header: *const SeenPcsHeader) []const usize {
+ const pcs_len = header.pcs_len;
+ return header.headerEnd()[seenElemsLen(pcs_len)..][0..pcs_len];
+ }
+ };
+
+ /// WebSocket server->client.
+ ///
+ /// Sent once, when fuzzing starts, to indicate the available coverage data.
+ ///
+ /// Trailing:
+ /// * std.debug.Coverage.String for each directories_len
+ /// * std.debug.Coverage.File for each files_len
+ /// * std.debug.Coverage.SourceLocation for each source_locations_len
+ /// * u8 for each string_bytes_len
+ pub const SourceIndexHeader = extern struct {
+ tag: ToClientTag = .fuzz_source_index,
+ _: [3]u8 = @splat(0),
+ directories_len: u32,
+ files_len: u32,
+ source_locations_len: u32,
+ string_bytes_len: u32,
+ /// When, according to the server, fuzzing started.
+ start_timestamp: i64 align(4),
+ };
+
+ /// WebSocket server->client.
+ ///
+ /// Sent whenever the set of covered source locations is updated.
+ ///
+ /// Trailing:
+ /// * one bit per source_locations_len, contained in u64 elements
+ pub const CoverageUpdateHeader = extern struct {
+ tag: ToClientTag = .fuzz_coverage_update,
+ _: [7]u8 = @splat(0),
+ n_runs: u64,
+ unique_runs: u64,
+
+ pub const trailing = .{
+ .pc_bits_usize,
+ };
+ };
+
+ /// WebSocket server->client.
+ ///
+ /// Sent whenever the set of entry points is updated.
+ ///
+ /// Trailing:
+ /// * one u32 index of source_locations per locsLen()
+ pub const EntryPointHeader = extern struct {
+ tag: ToClientTag = .fuzz_entry_points,
+ locs_len_raw: [3]u8,
+
+ pub fn locsLen(hdr: EntryPointHeader) u24 {
+ return @bitCast(hdr.locs_len_raw);
+ }
+ pub fn init(locs_len: u24) EntryPointHeader {
+ return .{ .locs_len_raw = @bitCast(locs_len) };
+ }
+ };
+};
+
+/// ABI bits specifically relating to the time report interface.
+pub const time_report = struct {
+ /// WebSocket server->client.
+ ///
+ /// Sent after a `Step` finishes, providing the time taken to execute the step.
+ pub const GenericResult = extern struct {
+ tag: ToClientTag = .time_report_generic_result,
+ step_idx: u32 align(1),
+ ns_total: u64 align(1),
+ };
+
+ /// WebSocket server->client.
+ ///
+ /// Sent after a `Step.Compile` finishes, providing the step's time report.
+ ///
+ /// Trailing:
+ /// * `llvm_pass_timings: [llvm_pass_timings_len]u8` (ASCII-encoded)
+ /// * for each `files_len`:
+ /// * `name` (null-terminated UTF-8 string)
+ /// * for each `decls_len`:
+ /// * `name` (null-terminated UTF-8 string)
+ /// * `file: u32` (index of file this decl is in)
+ /// * `sema_ns: u64` (nanoseconds spent semantically analyzing this decl)
+ /// * `codegen_ns: u64` (nanoseconds spent semantically analyzing this decl)
+ /// * `link_ns: u64` (nanoseconds spent semantically analyzing this decl)
+ pub const CompileResult = extern struct {
+ tag: ToClientTag = .time_report_compile_result,
+
+ step_idx: u32 align(1),
+
+ flags: Flags,
+ stats: Stats align(1),
+ ns_total: u64 align(1),
+
+ llvm_pass_timings_len: u32 align(1),
+ files_len: u32 align(1),
+ decls_len: u32 align(1),
+
+ pub const Flags = packed struct(u8) {
+ use_llvm: bool,
+ _: u7 = 0,
+ };
+
+ pub const Stats = extern struct {
+ n_reachable_files: u32,
+ n_imported_files: u32,
+ n_generic_instances: u32,
+ n_inline_calls: u32,
+
+ cpu_ns_parse: u64,
+ cpu_ns_astgen: u64,
+ cpu_ns_sema: u64,
+ cpu_ns_codegen: u64,
+ cpu_ns_link: u64,
+
+ real_ns_files: u64,
+ real_ns_decls: u64,
+ real_ns_llvm_emit: u64,
+ real_ns_link_flush: u64,
+
+ pub const init: Stats = .{
+ .n_reachable_files = 0,
+ .n_imported_files = 0,
+ .n_generic_instances = 0,
+ .n_inline_calls = 0,
+ .cpu_ns_parse = 0,
+ .cpu_ns_astgen = 0,
+ .cpu_ns_sema = 0,
+ .cpu_ns_codegen = 0,
+ .cpu_ns_link = 0,
+ .real_ns_files = 0,
+ .real_ns_decls = 0,
+ .real_ns_llvm_emit = 0,
+ .real_ns_link_flush = 0,
+ };
+ };
+ };
+};
diff --git a/lib/std/http/WebSocket.zig b/lib/std/http/WebSocket.zig
index 8ab434ceae05..b9a66cdbd660 100644
--- a/lib/std/http/WebSocket.zig
+++ b/lib/std/http/WebSocket.zig
@@ -18,14 +18,13 @@ pub const InitError = error{WebSocketUpgradeMissingKey} ||
std.http.Server.Request.ReaderError;
pub fn init(
- ws: *WebSocket,
request: *std.http.Server.Request,
send_buffer: []u8,
recv_buffer: []align(4) u8,
-) InitError!bool {
+) InitError!?WebSocket {
switch (request.head.version) {
- .@"HTTP/1.0" => return false,
- .@"HTTP/1.1" => if (request.head.method != .GET) return false,
+ .@"HTTP/1.0" => return null,
+ .@"HTTP/1.1" => if (request.head.method != .GET) return null,
}
var sec_websocket_key: ?[]const u8 = null;
@@ -36,12 +35,12 @@ pub fn init(
sec_websocket_key = header.value;
} else if (std.ascii.eqlIgnoreCase(header.name, "upgrade")) {
if (!std.ascii.eqlIgnoreCase(header.value, "websocket"))
- return false;
+ return null;
upgrade_websocket = true;
}
}
if (!upgrade_websocket)
- return false;
+ return null;
const key = sec_websocket_key orelse return error.WebSocketUpgradeMissingKey;
@@ -55,7 +54,7 @@ pub fn init(
request.head.content_length = std.math.maxInt(u64);
- ws.* = .{
+ return .{
.key = key,
.recv_fifo = std.fifo.LinearFifo(u8, .Slice).init(recv_buffer),
.reader = try request.reader(),
@@ -74,7 +73,6 @@ pub fn init(
.request = request,
.outstanding_len = 0,
};
- return true;
}
pub const Header0 = packed struct(u8) {
diff --git a/lib/std/net.zig b/lib/std/net.zig
index d7387662c004..ac851059d28a 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -42,6 +42,47 @@ pub const Address = extern union {
in6: Ip6Address,
un: if (has_unix_sockets) posix.sockaddr.un else void,
+ /// Parse an IP address which may include a port. For IPv4, this is just written `address:port`.
+ /// For IPv6, RFC 3986 defines this as an "IP literal", and the port is differentiated from the
+ /// address by surrounding the address part in brackets '[addr]:port'. Even if the port is not
+ /// given, the brackets are mandatory.
+ pub fn parseIpAndPort(str: []const u8) error{ InvalidAddress, InvalidPort }!Address {
+ if (str.len == 0) return error.InvalidAddress;
+ if (str[0] == '[') {
+ const addr_end = std.mem.indexOfScalar(u8, str, ']') orelse
+ return error.InvalidAddress;
+ const addr_str = str[1..addr_end];
+ const port: u16 = p: {
+ if (addr_end == str.len - 1) break :p 0;
+ if (str[addr_end + 1] != ':') return error.InvalidAddress;
+ break :p parsePort(str[addr_end + 2 ..]) orelse return error.InvalidPort;
+ };
+ return parseIp6(addr_str, port) catch error.InvalidAddress;
+ } else {
+ if (std.mem.indexOfScalar(u8, str, ':')) |idx| {
+ // hold off on `error.InvalidPort` since `error.InvalidAddress` might make more sense
+ const port: ?u16 = parsePort(str[idx + 1 ..]);
+ const addr = parseIp4(str[0..idx], port orelse 0) catch return error.InvalidAddress;
+ if (port == null) return error.InvalidPort;
+ return addr;
+ } else {
+ return parseIp4(str, 0) catch error.InvalidAddress;
+ }
+ }
+ }
+ fn parsePort(str: []const u8) ?u16 {
+ var p: u16 = 0;
+ for (str) |c| switch (c) {
+ '0'...'9' => {
+ const shifted = std.math.mul(u16, p, 10) catch return null;
+ p = std.math.add(u16, shifted, c - '0') catch return null;
+ },
+ else => return null,
+ };
+ if (p == 0) return null;
+ return p;
+ }
+
/// Parse the given IP address string into an Address value.
/// It is recommended to use `resolveIp` instead, to handle
/// IPv6 link-local unix addresses.
diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig
index 12bd259b16d8..ea6035474111 100644
--- a/lib/std/zig/Server.zig
+++ b/lib/std/zig/Server.zig
@@ -50,6 +50,8 @@ pub const Message = struct {
/// address of the fuzz unit test. This is used to provide a starting
/// point to view coverage.
fuzz_start_addr,
+ /// Body is a TimeReport.
+ time_report,
_,
};
@@ -95,6 +97,19 @@ pub const Message = struct {
};
};
+ /// Trailing is the same as in `std.Build.abi.time_report.CompileResult`, excluding `step_name`.
+ pub const TimeReport = extern struct {
+ stats: std.Build.abi.time_report.CompileResult.Stats align(4),
+ llvm_pass_timings_len: u32,
+ files_len: u32,
+ decls_len: u32,
+ flags: Flags,
+ pub const Flags = packed struct(u32) {
+ use_llvm: bool,
+ _: u31 = 0,
+ };
+ };
+
/// Trailing:
/// * the hex digest of the cache directory within the /o/ subdirectory.
pub const EmitDigest = extern struct {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 3796ed6acc53..419c56019fd0 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -173,7 +173,6 @@ verbose_cimport: bool,
verbose_llvm_cpu_features: bool,
verbose_link: bool,
disable_c_depfile: bool,
-time_report: bool,
stack_report: bool,
debug_compiler_runtime_libs: bool,
debug_compile_errors: bool,
@@ -263,6 +262,8 @@ link_prog_node: std.Progress.Node = std.Progress.Node.none,
llvm_opt_bisect_limit: c_int,
+time_report: ?TimeReport,
+
file_system_inputs: ?*std.ArrayListUnmanaged(u8),
/// This is the digest of the cache for the current compilation.
@@ -322,6 +323,72 @@ const QueuedJobs = struct {
zigc_lib: bool = false,
};
+pub const Timer = union(enum) {
+ unused,
+ active: struct {
+ start: std.time.Instant,
+ saved_ns: u64,
+ },
+ paused: u64,
+ stopped,
+
+ pub fn pause(t: *Timer) void {
+ switch (t.*) {
+ .unused => return,
+ .active => |a| {
+ const current = std.time.Instant.now() catch unreachable;
+ const new_ns = switch (current.order(a.start)) {
+ .lt, .eq => 0,
+ .gt => current.since(a.start),
+ };
+ t.* = .{ .paused = a.saved_ns + new_ns };
+ },
+ .paused => unreachable,
+ .stopped => unreachable,
+ }
+ }
+ pub fn @"resume"(t: *Timer) void {
+ switch (t.*) {
+ .unused => return,
+ .active => unreachable,
+ .paused => |saved_ns| t.* = .{ .active = .{
+ .start = std.time.Instant.now() catch unreachable,
+ .saved_ns = saved_ns,
+ } },
+ .stopped => unreachable,
+ }
+ }
+ pub fn finish(t: *Timer) ?u64 {
+ defer t.* = .stopped;
+ switch (t.*) {
+ .unused => return null,
+ .active => |a| {
+ const current = std.time.Instant.now() catch unreachable;
+ const new_ns = switch (current.order(a.start)) {
+ .lt, .eq => 0,
+ .gt => current.since(a.start),
+ };
+ return a.saved_ns + new_ns;
+ },
+ .paused => |ns| return ns,
+ .stopped => unreachable,
+ }
+ }
+};
+
+/// Starts a timer for measuring a `--time-report` value. If `comp.time_report` is `null`, the
+/// returned timer does nothing. When the thing being timed is done, call `Timer.finish`. If that
+/// function returns non-`null`, then the value is a number of nanoseconds, and `comp.time_report`
+/// is set.
+pub fn startTimer(comp: *Compilation) Timer {
+ if (comp.time_report == null) return .unused;
+ const now = std.time.Instant.now() catch @panic("std.time.Timer unsupported; cannot emit time report");
+ return .{ .active = .{
+ .start = now,
+ .saved_ns = 0,
+ } };
+}
+
/// A filesystem path, represented relative to one of a few specific directories where possible.
/// Every path (considering symlinks as distinct paths) has a canonical representation in this form.
/// This abstraction allows us to:
@@ -787,6 +854,58 @@ pub inline fn debugIncremental(comp: *const Compilation) bool {
return comp.debug_incremental;
}
+pub const TimeReport = struct {
+ stats: std.Build.abi.time_report.CompileResult.Stats,
+
+ /// Allocated into `gpa`. The pass time statistics emitted by LLVM's "time-passes" option.
+ /// LLVM provides this data in ASCII form as a table, which can be directly shown to users.
+ ///
+ /// Ideally, we would be able to use `printAllJSONValues` to get *structured* data which we can
+ /// then display more nicely. Unfortunately, that function seems to trip an assertion on one of
+ /// the pass timer names at the time of writing.
+ llvm_pass_timings: []u8,
+
+ /// Key is a ZIR `declaration` instruction; value is the number of nanoseconds spent analyzing
+ /// it. This is the total across all instances of the generic parent namespace, and (if this is
+ /// a function) all generic instances of this function. It also includes time spent analyzing
+ /// function bodies if this is a function (generic or otherwise).
+ /// An entry not existing means the declaration has not been analyzed (so far).
+ decl_sema_info: std.AutoArrayHashMapUnmanaged(InternPool.TrackedInst.Index, struct {
+ ns: u64,
+ count: u32,
+ }),
+
+ /// Key is a ZIR `declaration` instruction which is a function or test; value is the number of
+ /// nanoseconds spent running codegen on it. As above, this is the total across all generic
+ /// instances, both of this function itself and of its parent namespace.
+ /// An entry not existing means the declaration has not been codegenned (so far).
+ /// Every key in `decl_codegen_ns` is also in `decl_sema_ns`.
+ decl_codegen_ns: std.AutoArrayHashMapUnmanaged(InternPool.TrackedInst.Index, u64),
+
+ /// Key is a ZIR `declaration` instruction which is anything other than a `comptime` decl; value
+ /// is the number of nanoseconds spent linking it into the binary. As above, this is the total
+ /// across all generic instances.
+ /// An entry not existing means the declaration has not been linked (so far).
+ /// Every key in `decl_link_ns` is also in `decl_sema_ns`.
+ decl_link_ns: std.AutoArrayHashMapUnmanaged(InternPool.TrackedInst.Index, u64),
+
+ pub fn deinit(tr: *TimeReport, gpa: Allocator) void {
+ tr.stats = undefined;
+ gpa.free(tr.llvm_pass_timings);
+ tr.decl_sema_info.deinit(gpa);
+ tr.decl_codegen_ns.deinit(gpa);
+ tr.decl_link_ns.deinit(gpa);
+ }
+
+ pub const init: TimeReport = .{
+ .stats = .init,
+ .llvm_pass_timings = &.{},
+ .decl_sema_info = .empty,
+ .decl_codegen_ns = .empty,
+ .decl_link_ns = .empty,
+ };
+};
+
pub const default_stack_protector_buffer_size = target_util.default_stack_protector_buffer_size;
pub const SemaError = Zcu.SemaError;
@@ -2027,7 +2146,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.verbose_link = options.verbose_link,
.disable_c_depfile = options.disable_c_depfile,
.reference_trace = options.reference_trace,
- .time_report = options.time_report,
+ .time_report = if (options.time_report) .init else null,
.stack_report = options.stack_report,
.test_filters = options.test_filters,
.test_name_prefix = options.test_name_prefix,
@@ -2561,6 +2680,8 @@ pub fn destroy(comp: *Compilation) void {
}
comp.failed_win32_resources.deinit(gpa);
+ if (comp.time_report) |*tr| tr.deinit(gpa);
+
comp.link_diags.deinit();
comp.clearMiscFailures();
@@ -2657,6 +2778,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
comp.clearMiscFailures();
comp.last_update_was_cache_hit = false;
+ if (comp.time_report) |*tr| {
+ tr.deinit(gpa); // this is information about an old update
+ tr.* = .init;
+ }
var tmp_dir_rand_int: u64 = undefined;
var man: Cache.Manifest = undefined;
@@ -2688,6 +2813,14 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
whole.cache_manifest = &man;
try addNonIncrementalStuffToCacheManifest(comp, arena, &man);
+ // Under `--time-report`, ignore cache hits; do the work anyway for those juicy numbers.
+ const ignore_hit = comp.time_report != null;
+
+ if (ignore_hit) {
+ // We're going to do the work regardless of whether this is a hit or a miss.
+ man.want_shared_lock = false;
+ }
+
const is_hit = man.hit() catch |err| switch (err) {
error.CacheCheckFailed => switch (man.diagnostic) {
.none => unreachable,
@@ -2713,7 +2846,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
.{},
),
};
- if (is_hit) {
+ if (is_hit and !ignore_hit) {
// In this case the cache hit contains the full set of file system inputs. Nice!
if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
if (comp.parent_whole_cache) |pwc| {
@@ -2734,6 +2867,11 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
log.debug("CacheMode.whole cache miss for {s}", .{comp.root_name});
+ if (ignore_hit) {
+ // Okay, now set this back so that `writeManifest` will downgrade our lock later.
+ man.want_shared_lock = true;
+ }
+
// Compile the artifacts to a temporary directory.
whole.tmp_artifact_directory = d: {
tmp_dir_rand_int = std.crypto.random.int(u64);
@@ -2786,6 +2924,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
+ assert(zcu.cur_analysis_timer == null);
+
zcu.skip_analysis_this_update = false;
// TODO: doing this in `resolveReferences` later could avoid adding inputs for dead embedfiles. Investigate!
@@ -2829,6 +2969,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
+ assert(zcu.cur_analysis_timer == null);
+
if (!zcu.skip_analysis_this_update) {
if (comp.config.is_test) {
// The `test_functions` decl has been intentionally postponed until now,
@@ -3040,11 +3182,22 @@ fn flush(
) !void {
if (comp.zcu) |zcu| {
if (zcu.llvm_object) |llvm_object| {
+ const pt: Zcu.PerThread = .activate(zcu, tid);
+ defer pt.deactivate();
+
// Emit the ZCU object from LLVM now; it's required to flush the output file.
// If there's an output file, it wants to decide where the LLVM object goes!
const sub_prog_node = comp.link_prog_node.start("LLVM Emit Object", 0);
defer sub_prog_node.end();
- try llvm_object.emit(.{ .zcu = zcu, .tid = tid }, .{
+
+ var timer = comp.startTimer();
+ defer if (timer.finish()) |ns| {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.real_ns_llvm_emit = ns;
+ };
+
+ try llvm_object.emit(pt, .{
.pre_ir_path = comp.verbose_llvm_ir,
.pre_bc_path = comp.verbose_llvm_bc,
@@ -3071,7 +3224,7 @@ fn flush(
.is_debug = comp.root_mod.optimize_mode == .Debug,
.is_small = comp.root_mod.optimize_mode == .ReleaseSmall,
- .time_report = comp.time_report,
+ .time_report = if (comp.time_report) |*p| p else null,
.sanitize_thread = comp.config.any_sanitize_thread,
.fuzz = comp.config.any_fuzz,
.lto = comp.config.lto,
@@ -3079,6 +3232,12 @@ fn flush(
}
}
if (comp.bin_file) |lf| {
+ var timer = comp.startTimer();
+ defer if (timer.finish()) |ns| {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.real_ns_link_flush = ns;
+ };
// This is needed before reading the error flags.
lf.flush(arena, tid, comp.link_prog_node) catch |err| switch (err) {
error.LinkFailure => {}, // Already reported.
@@ -4223,6 +4382,17 @@ fn performAllTheWork(
zcu.generation += 1;
};
+ // This is awkward: we don't want to start the timer until later, but we won't want to stop it
+ // until the wait groups finish. That means we need do do this.
+ var decl_work_timer: ?Timer = null;
+ defer commit_timer: {
+ const t = &(decl_work_timer orelse break :commit_timer);
+ const ns = t.finish() orelse break :commit_timer;
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.real_ns_decls = ns;
+ }
+
// Here we queue up all the AstGen tasks first, followed by C object compilation.
// We wait until the AstGen tasks are all completed before proceeding to the
// (at least for now) single-threaded main work queue. However, C object compilation
@@ -4431,6 +4601,13 @@ fn performAllTheWork(
const zir_prog_node = main_progress_node.start("AST Lowering", 0);
defer zir_prog_node.end();
+ var timer = comp.startTimer();
+ defer if (timer.finish()) |ns| {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.real_ns_files = ns;
+ };
+
var astgen_wait_group: WaitGroup = .{};
defer astgen_wait_group.wait();
@@ -4556,6 +4733,10 @@ fn performAllTheWork(
return;
}
+ if (comp.time_report) |*tr| {
+ tr.stats.n_reachable_files = @intCast(zcu.alive_files.count());
+ }
+
if (comp.incremental) {
const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
defer update_zir_refs_node.end();
@@ -4599,6 +4780,11 @@ fn performAllTheWork(
}
}
+ if (comp.zcu != null) {
+ // Start the timer for the "decls" part of the pipeline (Sema, CodeGen, link).
+ decl_work_timer = comp.startTimer();
+ }
+
work: while (true) {
for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| {
try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job);
diff --git a/src/Sema.zig b/src/Sema.zig
index d7add0724d7e..1d2b658fd341 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3246,21 +3246,25 @@ fn zirEnumDecl(
wip_ty.prepare(ip, new_namespace_index);
done = true;
- try Sema.resolveDeclaredEnum(
- pt,
- wip_ty,
- inst,
- tracked_inst,
- new_namespace_index,
- type_name.name,
- small,
- body,
- tag_type_ref,
- any_values,
- fields_len,
- sema.code,
- body_end,
- );
+ {
+ const tracked_unit = zcu.trackUnitSema(type_name.name.toSlice(ip), null);
+ defer tracked_unit.end(zcu);
+ try Sema.resolveDeclaredEnum(
+ pt,
+ wip_ty,
+ inst,
+ tracked_inst,
+ new_namespace_index,
+ type_name.name,
+ small,
+ body,
+ tag_type_ref,
+ any_values,
+ fields_len,
+ sema.code,
+ body_end,
+ );
+ }
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
@@ -7577,6 +7581,12 @@ fn analyzeCall(
// This is an inline call. The function must be comptime-known. We will analyze its body directly using this `Sema`.
+ if (zcu.comp.time_report) |*tr| {
+ if (!block.isComptime()) {
+ tr.stats.n_inline_calls += 1;
+ }
+ }
+
if (func_ty_info.is_noinline and !block.isComptime()) {
return sema.fail(block, call_src, "inline call of noinline function", .{});
}
diff --git a/src/Type.zig b/src/Type.zig
index 9316bec11e12..0677b8d22729 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -3797,6 +3797,9 @@ fn resolveStructInner(
return error.AnalysisFail;
}
+ const tracked_unit = zcu.trackUnitSema(struct_obj.name.toSlice(&zcu.intern_pool), null);
+ defer tracked_unit.end(zcu);
+
if (zcu.comp.debugIncremental()) {
const info = try zcu.incremental_debug_state.getUnitInfo(gpa, owner);
info.last_update_gen = zcu.generation;
@@ -3856,6 +3859,9 @@ fn resolveUnionInner(
return error.AnalysisFail;
}
+ const tracked_unit = zcu.trackUnitSema(union_obj.name.toSlice(&zcu.intern_pool), null);
+ defer tracked_unit.end(zcu);
+
if (zcu.comp.debugIncremental()) {
const info = try zcu.incremental_debug_state.getUnitInfo(gpa, owner);
info.last_update_gen = zcu.generation;
diff --git a/src/Zcu.zig b/src/Zcu.zig
index c13f7aaac9be..90b3edd00138 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -312,6 +312,10 @@ builtin_decl_values: BuiltinDecl.Memoized = .initFill(.none),
incremental_debug_state: if (build_options.enable_debug_extensions) IncrementalDebugState else void =
if (build_options.enable_debug_extensions) .init else {},
+/// Times semantic analysis of the current `AnalUnit`. When we pause to analyze a different unit,
+/// this timer must be temporarily paused and resumed later.
+cur_analysis_timer: ?Compilation.Timer = null,
+
generation: u32 = 0,
pub const IncrementalDebugState = struct {
@@ -4683,26 +4687,56 @@ fn explainWhyFileIsInModule(
}
}
-const SemaProgNode = struct {
+const TrackedUnitSema = struct {
/// `null` means we created the node, so should end it.
old_name: ?[std.Progress.Node.max_name_len]u8,
- pub fn end(spn: SemaProgNode, zcu: *Zcu) void {
- if (spn.old_name) |old_name| {
+ old_analysis_timer: ?Compilation.Timer,
+ analysis_timer_decl: ?InternPool.TrackedInst.Index,
+ pub fn end(tus: TrackedUnitSema, zcu: *Zcu) void {
+ const comp = zcu.comp;
+ if (tus.old_name) |old_name| {
zcu.sema_prog_node.completeOne(); // we're just renaming, but it's effectively completion
zcu.cur_sema_prog_node.setName(&old_name);
} else {
zcu.cur_sema_prog_node.end();
zcu.cur_sema_prog_node = .none;
}
+ report_time: {
+ const sema_ns = zcu.cur_analysis_timer.?.finish() orelse break :report_time;
+ const zir_decl = tus.analysis_timer_decl orelse break :report_time;
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.cpu_ns_sema += sema_ns;
+ const gop = comp.time_report.?.decl_sema_info.getOrPut(comp.gpa, zir_decl) catch |err| switch (err) {
+ error.OutOfMemory => {
+ comp.setAllocFailure();
+ break :report_time;
+ },
+ };
+ if (!gop.found_existing) gop.value_ptr.* = .{ .ns = 0, .count = 0 };
+ gop.value_ptr.ns += sema_ns;
+ gop.value_ptr.count += 1;
+ }
+ zcu.cur_analysis_timer = tus.old_analysis_timer;
+ if (zcu.cur_analysis_timer) |*t| t.@"resume"();
}
};
-pub fn startSemaProgNode(zcu: *Zcu, name: []const u8) SemaProgNode {
- if (zcu.cur_sema_prog_node.index != .none) {
+pub fn trackUnitSema(zcu: *Zcu, name: []const u8, zir_inst: ?InternPool.TrackedInst.Index) TrackedUnitSema {
+ if (zcu.cur_analysis_timer) |*t| t.pause();
+ const old_analysis_timer = zcu.cur_analysis_timer;
+ zcu.cur_analysis_timer = zcu.comp.startTimer();
+ const old_name: ?[std.Progress.Node.max_name_len]u8 = old_name: {
+ if (zcu.cur_sema_prog_node.index == .none) {
+ zcu.cur_sema_prog_node = zcu.sema_prog_node.start(name, 0);
+ break :old_name null;
+ }
const old_name = zcu.cur_sema_prog_node.getName();
zcu.cur_sema_prog_node.setName(name);
- return .{ .old_name = old_name };
- } else {
- zcu.cur_sema_prog_node = zcu.sema_prog_node.start(name, 0);
- return .{ .old_name = null };
- }
+ break :old_name old_name;
+ };
+ return .{
+ .old_name = old_name,
+ .old_analysis_timer = old_analysis_timer,
+ .analysis_timer_decl = zir_inst,
+ };
}
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 119b742a89ea..de4be438f507 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -215,12 +215,15 @@ pub fn updateFile(
};
defer cache_file.close();
+ // Under `--time-report`, ignore cache hits; do the work anyway for those juicy numbers.
+ const ignore_hit = comp.time_report != null;
+
const need_update = while (true) {
const result = switch (file.getMode()) {
inline else => |mode| try loadZirZoirCache(zcu, cache_file, stat, file, mode),
};
switch (result) {
- .success => {
+ .success => if (!ignore_hit) {
log.debug("AstGen cached success: {f}", .{file.path.fmt(comp)});
break false;
},
@@ -260,9 +263,16 @@ pub fn updateFile(
file.source = source;
+ var timer = comp.startTimer();
// Any potential AST errors are converted to ZIR errors when we run AstGen/ZonGen.
file.tree = try Ast.parse(gpa, source, file.getMode());
+ if (timer.finish()) |ns_parse| {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.cpu_ns_parse += ns_parse;
+ }
+ timer = comp.startTimer();
switch (file.getMode()) {
.zig => {
file.zir = try AstGen.generate(gpa, file.tree.?);
@@ -282,6 +292,11 @@ pub fn updateFile(
};
},
}
+ if (timer.finish()) |ns_astgen| {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.cpu_ns_astgen += ns_astgen;
+ }
log.debug("AstGen fresh success: {f}", .{file.path.fmt(comp)});
}
@@ -801,8 +816,11 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
info.deps.clearRetainingCapacity();
}
- const unit_prog_node = zcu.startSemaProgNode("comptime");
- defer unit_prog_node.end(zcu);
+ const unit_tracking = zcu.trackUnitSema(
+ "comptime",
+ zcu.intern_pool.getComptimeUnit(cu_id).zir_index,
+ );
+ defer unit_tracking.end(zcu);
return pt.analyzeComptimeUnit(cu_id) catch |err| switch (err) {
error.AnalysisFail => {
@@ -981,8 +999,8 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
info.deps.clearRetainingCapacity();
}
- const unit_prog_node = zcu.startSemaProgNode(nav.fqn.toSlice(ip));
- defer unit_prog_node.end(zcu);
+ const unit_tracking = zcu.trackUnitSema(nav.fqn.toSlice(ip), nav.srcInst(ip));
+ defer unit_tracking.end(zcu);
const invalidate_value: bool, const new_failed: bool = if (pt.analyzeNavVal(nav_id)) |result| res: {
break :res .{
@@ -1381,8 +1399,8 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
info.deps.clearRetainingCapacity();
}
- const unit_prog_node = zcu.startSemaProgNode(nav.fqn.toSlice(ip));
- defer unit_prog_node.end(zcu);
+ const unit_tracking = zcu.trackUnitSema(nav.fqn.toSlice(ip), nav.srcInst(ip));
+ defer unit_tracking.end(zcu);
const invalidate_type: bool, const new_failed: bool = if (pt.analyzeNavType(nav_id)) |result| res: {
break :res .{
@@ -1601,8 +1619,12 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
info.deps.clearRetainingCapacity();
}
- const func_prog_node = zcu.startSemaProgNode(ip.getNav(func.owner_nav).fqn.toSlice(ip));
- defer func_prog_node.end(zcu);
+ const owner_nav = ip.getNav(func.owner_nav);
+ const unit_tracking = zcu.trackUnitSema(
+ owner_nav.fqn.toSlice(ip),
+ owner_nav.srcInst(ip),
+ );
+ defer unit_tracking.end(zcu);
const ies_outdated, const new_failed = if (pt.analyzeFuncBody(func_index)) |result|
.{ prev_failed or result.ies_outdated, false }
@@ -1847,6 +1869,10 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
});
const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false);
errdefer zcu.intern_pool.remove(pt.tid, struct_ty);
+
+ if (zcu.comp.time_report) |*tr| {
+ tr.stats.n_imported_files += 1;
+ }
}
/// Called by AstGen worker threads when an import is seen. If `new_file` is returned, the caller is
@@ -2520,6 +2546,12 @@ pub fn scanNamespace(
const gpa = zcu.gpa;
const namespace = zcu.namespacePtr(namespace_index);
+ const tracked_unit = zcu.trackUnitSema(
+ Type.fromInterned(namespace.owner_type).containerTypeName(ip).toSlice(ip),
+ null,
+ );
+ defer tracked_unit.end(zcu);
+
// For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather
// than their name. We'll build an efficient mapping now, then discard the current `decls`.
// We map to the `AnalUnit`, since not every declaration has a `Nav`.
@@ -2755,6 +2787,12 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
func.setResolvedErrorSet(ip, .none);
}
+ if (zcu.comp.time_report) |*tr| {
+ if (func.generic_owner != .none) {
+ tr.stats.n_generic_instances += 1;
+ }
+ }
+
// This is the `Nau` corresponding to the `declaration` instruction which the function or its generic owner originates from.
const decl_nav = ip.getNav(if (func.generic_owner == .none)
func.owner_nav
@@ -4307,6 +4345,9 @@ pub fn addDependency(pt: Zcu.PerThread, unit: AnalUnit, dependee: InternPool.Dep
/// codegen thread, depending on whether the backend supports `Zcu.Feature.separate_thread`.
pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, out: *@import("../link.zig").ZcuTask.LinkFunc.SharedMir) void {
const zcu = pt.zcu;
+
+ var timer = zcu.comp.startTimer();
+
const success: bool = if (runCodegenInner(pt, func_index, air)) |mir| success: {
out.value = mir;
break :success true;
@@ -4327,6 +4368,25 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou
}
break :success false;
};
+
+ if (timer.finish()) |ns_codegen| report_time: {
+ const ip = &zcu.intern_pool;
+ const nav = ip.indexToKey(func_index).func.owner_nav;
+ const zir_decl = ip.getNav(nav).srcInst(ip);
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ const tr = &zcu.comp.time_report.?;
+ tr.stats.cpu_ns_codegen += ns_codegen;
+ const gop = tr.decl_codegen_ns.getOrPut(zcu.gpa, zir_decl) catch |err| switch (err) {
+ error.OutOfMemory => {
+ zcu.comp.setAllocFailure();
+ break :report_time;
+ },
+ };
+ if (!gop.found_existing) gop.value_ptr.* = 0;
+ gop.value_ptr.* += ns_codegen;
+ }
+
// release `out.value` with this store; synchronizes with acquire loads in `link`
out.status.store(if (success) .ready else .failed, .release);
zcu.comp.link_task_queue.mirReady(zcu.comp, func_index, out);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 5e522c3d73d0..6bec900aecaf 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -764,7 +764,7 @@ pub const Object = struct {
is_debug: bool,
is_small: bool,
- time_report: bool,
+ time_report: ?*Compilation.TimeReport,
sanitize_thread: bool,
fuzz: bool,
lto: std.zig.LtoMode,
@@ -1063,7 +1063,7 @@ pub const Object = struct {
var lowered_options: llvm.TargetMachine.EmitOptions = .{
.is_debug = options.is_debug,
.is_small = options.is_small,
- .time_report = options.time_report,
+ .time_report_out = null, // set below to make sure it's only set for a single `emitToFile`
.tsan = options.sanitize_thread,
.lto = switch (options.lto) {
.none => .None,
@@ -1118,6 +1118,11 @@ pub const Object = struct {
lowered_options.llvm_ir_filename = null;
}
+ var time_report_c_str: [*:0]u8 = undefined;
+ if (options.time_report != null) {
+ lowered_options.time_report_out = &time_report_c_str;
+ }
+
lowered_options.asm_filename = options.asm_path;
if (target_machine.emitToFile(module, &error_message, &lowered_options)) {
defer llvm.disposeMessage(error_message);
@@ -1125,6 +1130,12 @@ pub const Object = struct {
emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg, error_message,
});
}
+ if (options.time_report) |tr| {
+ defer std.c.free(time_report_c_str);
+ const time_report_data = std.mem.span(time_report_c_str);
+ assert(tr.llvm_pass_timings.len == 0);
+ tr.llvm_pass_timings = try comp.gpa.dupe(u8, time_report_data);
+ }
}
pub fn updateFunc(
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 7fa2a7da4356..89bdb4b3da54 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -88,7 +88,7 @@ pub const TargetMachine = opaque {
pub const EmitOptions = extern struct {
is_debug: bool,
is_small: bool,
- time_report: bool,
+ time_report_out: ?*[*:0]u8,
tsan: bool,
sancov: bool,
lto: LtoPhase,
diff --git a/src/link.zig b/src/link.zig
index 51bb9330f109..1b63e7793b5a 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -1302,6 +1302,14 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
comp.link_prog_node.completeOne();
return;
};
+
+ var timer = comp.startTimer();
+ defer if (timer.finish()) |ns| {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.time_report.?.stats.cpu_ns_link += ns;
+ };
+
switch (task) {
.load_explicitly_provided => {
const prog_node = comp.link_prog_node.start("Parse Inputs", comp.link_inputs.len);
@@ -1428,6 +1436,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
const ip = &zcu.intern_pool;
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
+
+ var timer = comp.startTimer();
+
switch (task) {
.link_nav => |nav_index| {
const fqn_slice = ip.getNav(nav_index).fqn.toSlice(ip);
@@ -1502,6 +1513,28 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
}
},
}
+
+ if (timer.finish()) |ns_link| report_time: {
+ const zir_decl: ?InternPool.TrackedInst.Index = switch (task) {
+ .link_type, .update_line_number => null,
+ .link_nav => |nav| ip.getNav(nav).srcInst(ip),
+ .link_func => |f| ip.getNav(ip.indexToKey(f.func).func.owner_nav).srcInst(ip),
+ };
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ const tr = &zcu.comp.time_report.?;
+ tr.stats.cpu_ns_link += ns_link;
+ if (zir_decl) |inst| {
+ const gop = tr.decl_link_ns.getOrPut(zcu.gpa, inst) catch |err| switch (err) {
+ error.OutOfMemory => {
+ zcu.comp.setAllocFailure();
+ break :report_time;
+ },
+ };
+ if (!gop.found_existing) gop.value_ptr.* = 0;
+ gop.value_ptr.* += ns_link;
+ }
+ }
}
/// After the main pipeline is done, but before flush, the compilation may need to link one final
/// `Nav` into the binary: the `builtin.test_functions` value. Since the link thread isn't running
diff --git a/src/main.zig b/src/main.zig
index 9349899a561a..2a143c30aefe 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -484,6 +484,7 @@ const usage_build_generic =
\\ -fno-structured-cfg (SPIR-V) force SPIR-V kernels to not use structured control flow
\\ -mexec-model=[value] (WASI) Execution model
\\ -municode (Windows) Use wmain/wWinMain as entry point
+ \\ --time-report Send timing diagnostics to '--listen' clients
\\
\\Per-Module Compile Options:
\\ -target [name] -- see the targets command
@@ -678,7 +679,6 @@ const usage_build_generic =
\\
\\Debug Options (Zig Compiler Development):
\\ -fopt-bisect-limit=[limit] Only run [limit] first LLVM optimization passes
- \\ -ftime-report Print timing diagnostics
\\ -fstack-report Print stack size diagnostics
\\ --verbose-link Display linker invocations
\\ --verbose-cc Display C compiler invocations
@@ -1403,7 +1403,7 @@ fn buildOutputType(
try test_exec_args.append(arena, null);
} else if (mem.eql(u8, arg, "--test-no-exec")) {
test_no_exec = true;
- } else if (mem.eql(u8, arg, "-ftime-report")) {
+ } else if (mem.eql(u8, arg, "--time-report")) {
time_report = true;
} else if (mem.eql(u8, arg, "-fstack-report")) {
stack_report = true;
@@ -2899,6 +2899,10 @@ fn buildOutputType(
fatal("test-obj requires --test-no-exec", .{});
}
+ if (time_report and listen == .none) {
+ fatal("--time-report requires --listen", .{});
+ }
+
if (arg_mode == .translate_c and create_module.c_source_files.items.len != 1) {
fatal("translate-c expects exactly 1 source file (found {d})", .{create_module.c_source_files.items.len});
}
@@ -4208,6 +4212,84 @@ fn serveUpdateResults(s: *Server, comp: *Compilation) !void {
}
}
+ if (comp.time_report) |*tr| {
+ var decls_len: u32 = 0;
+
+ var file_name_bytes: std.ArrayListUnmanaged(u8) = .empty;
+ defer file_name_bytes.deinit(gpa);
+ var files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, void) = .empty;
+ defer files.deinit(gpa);
+ var decl_data: std.ArrayListUnmanaged(u8) = .empty;
+ defer decl_data.deinit(gpa);
+
+ // Each decl needs at least 34 bytes:
+ // * 2 for 1-byte name plus null terminator
+ // * 4 for `file`
+ // * 4 for `sema_count`
+ // * 8 for `sema_ns`
+ // * 8 for `codegen_ns`
+ // * 8 for `link_ns`
+ // Most, if not all, decls in `tr.decl_sema_ns` are valid, so we have a good size estimate.
+ try decl_data.ensureUnusedCapacity(gpa, tr.decl_sema_info.count() * 34);
+
+ for (tr.decl_sema_info.keys(), tr.decl_sema_info.values()) |tracked_inst, sema_info| {
+ const resolved = tracked_inst.resolveFull(&comp.zcu.?.intern_pool) orelse continue;
+ const file = comp.zcu.?.fileByIndex(resolved.file);
+ const zir = file.zir orelse continue;
+ const decl_name = zir.nullTerminatedString(zir.getDeclaration(resolved.inst).name);
+
+ const gop = try files.getOrPut(gpa, resolved.file);
+ if (!gop.found_existing) try file_name_bytes.writer(gpa).print("{f}\x00", .{file.path.fmt(comp)});
+
+ const codegen_ns = tr.decl_codegen_ns.get(tracked_inst) orelse 0;
+ const link_ns = tr.decl_link_ns.get(tracked_inst) orelse 0;
+
+ decls_len += 1;
+
+ try decl_data.ensureUnusedCapacity(gpa, 33 + decl_name.len);
+ decl_data.appendSliceAssumeCapacity(decl_name);
+ decl_data.appendAssumeCapacity(0);
+
+ const out_file = decl_data.addManyAsArrayAssumeCapacity(4);
+ const out_sema_count = decl_data.addManyAsArrayAssumeCapacity(4);
+ const out_sema_ns = decl_data.addManyAsArrayAssumeCapacity(8);
+ const out_codegen_ns = decl_data.addManyAsArrayAssumeCapacity(8);
+ const out_link_ns = decl_data.addManyAsArrayAssumeCapacity(8);
+ std.mem.writeInt(u32, out_file, @intCast(gop.index), .little);
+ std.mem.writeInt(u32, out_sema_count, sema_info.count, .little);
+ std.mem.writeInt(u64, out_sema_ns, sema_info.ns, .little);
+ std.mem.writeInt(u64, out_codegen_ns, codegen_ns, .little);
+ std.mem.writeInt(u64, out_link_ns, link_ns, .little);
+ }
+
+ const header: std.zig.Server.Message.TimeReport = .{
+ .stats = tr.stats,
+ .llvm_pass_timings_len = @intCast(tr.llvm_pass_timings.len),
+ .files_len = @intCast(files.count()),
+ .decls_len = decls_len,
+ .flags = .{
+ .use_llvm = comp.zcu != null and comp.zcu.?.llvm_object != null,
+ },
+ };
+
+ var slices: [4][]const u8 = .{
+ @ptrCast(&header),
+ tr.llvm_pass_timings,
+ file_name_bytes.items,
+ decl_data.items,
+ };
+ try s.serveMessageHeader(.{
+ .tag = .time_report,
+ .bytes_len = len: {
+ var len: u32 = 0;
+ for (slices) |slice| len += @intCast(slice.len);
+ break :len len;
+ },
+ });
+ try s.out.writeVecAll(&slices);
+ try s.out.flush();
+ }
+
if (error_bundle.errorMessageCount() > 0) {
try s.serveErrorBundle(error_bundle);
return;
@@ -5277,7 +5359,9 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var windows_libs: std.StringArrayHashMapUnmanaged(void) = .empty;
if (resolved_target.result.os.tag == .windows) {
- try windows_libs.put(arena, "advapi32", {});
+ try windows_libs.ensureUnusedCapacity(arena, 2);
+ windows_libs.putAssumeCapacity("advapi32", {});
+ windows_libs.putAssumeCapacity("ws2_32", {}); // for `--listen` (web interface)
}
const comp = Compilation.create(gpa, arena, .{
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index c9a30bb167e5..65dae8edcdbb 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -220,7 +220,7 @@ static SanitizerCoverageOptions getSanCovOptions(ZigLLVMCoverageOptions z) {
ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref,
char **error_message, const ZigLLVMEmitOptions *options)
{
- TimePassesIsEnabled = options->time_report;
+ TimePassesIsEnabled = options->time_report_out != nullptr;
raw_fd_ostream *dest_asm_ptr = nullptr;
raw_fd_ostream *dest_bin_ptr = nullptr;
@@ -418,10 +418,17 @@ ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machi
WriteBitcodeToFile(llvm_module, *dest_bitcode);
}
- if (options->time_report) {
- TimerGroup::printAll(errs());
+ // This must only happen once we know we've succeeded and will be returning `false`, because
+ // this code `malloc`s memory which will become owned by the caller (in Zig code).
+ if (options->time_report_out != nullptr) {
+ std::string out_str;
+ auto os = raw_string_ostream(out_str);
+ TimerGroup::printAll(os);
+ TimerGroup::clearAll();
+ auto c_str = (char *)malloc(out_str.length() + 1);
+ strcpy(c_str, out_str.c_str());
+ *options->time_report_out = c_str;
}
-
return false;
}
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index db331512bd90..f1940bb3408f 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -66,7 +66,10 @@ enum ZigLLVMThinOrFullLTOPhase {
struct ZigLLVMEmitOptions {
bool is_debug;
bool is_small;
- bool time_report;
+ // If not null, and `ZigLLVMTargetMachineEmitToFile` returns `false` indicating success, this
+ // `char *` will be populated with a `malloc`-allocated string containing the serialized (as
+ // JSON) time report data. The caller is responsible for freeing that memory.
+ char **time_report_out;
bool tsan;
bool sancov;
ZigLLVMThinOrFullLTOPhase lto;
diff --git a/tools/dump-cov.zig b/tools/dump-cov.zig
index 16df7e34793e..1e2444701183 100644
--- a/tools/dump-cov.zig
+++ b/tools/dump-cov.zig
@@ -5,7 +5,7 @@ const std = @import("std");
const fatal = std.process.fatal;
const Path = std.Build.Cache.Path;
const assert = std.debug.assert;
-const SeenPcsHeader = std.Build.Fuzz.abi.SeenPcsHeader;
+const SeenPcsHeader = std.Build.abi.fuzz.SeenPcsHeader;
pub fn main() !void {
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;