From d6e8ba3f97b778676bdb3c79b37afc8003b883ea Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 2 May 2024 18:27:53 -0700 Subject: [PATCH 01/60] start reworking std.Progress New design ideas: * One global instance, don't try to play nicely with other instances except via IPC. * One process owns the terminal and the other processes communicate via IPC. * Clear the whole terminal and use multiple lines. What's implemented so far: * Query the terminal for size. * Register a SIGWINCH handler. * Use a thread for redraws. To be done: * IPC * Handling single threaded targets * Porting to Windows * More intelligent display of the progress tree rather than only using one line. --- lib/std/Progress.zig | 553 +++++++++++++++++-------------------------- 1 file changed, 220 insertions(+), 333 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 19f90e86a91c..97bfa48b6cf8 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -1,10 +1,7 @@ //! This API is non-allocating, non-fallible, and thread-safe. +//! //! The tradeoff is that users of this API must provide the storage //! for each `Progress.Node`. -//! -//! Initialize the struct directly, overriding these fields as desired: -//! * `refresh_rate_ms` -//! * `initial_delay_ms` const std = @import("std"); const builtin = @import("builtin"); @@ -12,63 +9,64 @@ const windows = std.os.windows; const testing = std.testing; const assert = std.debug.assert; const Progress = @This(); +const posix = std.posix; /// `null` if the current node (and its children) should /// not print on update() -terminal: ?std.fs.File = undefined, +terminal: ?std.fs.File, /// Is this a windows API terminal (note: this is not the same as being run on windows /// because other terminals exist like MSYS/git-bash) -is_windows_terminal: bool = false, +is_windows_terminal: bool, /// Whether the terminal supports ANSI escape codes. -supports_ansi_escape_codes: bool = false, - -/// If the terminal is "dumb", don't print output. -/// This can be useful if you don't want to print all -/// the stages of code generation if there are a lot. -/// You should not use it if the user should see output -/// for example showing the user what tests run. -dont_print_on_dumb: bool = false, - -root: Node = undefined, - -/// Keeps track of how much time has passed since the beginning. -/// Used to compare with `initial_delay_ms` and `refresh_rate_ms`. -timer: ?std.time.Timer = null, - -/// When the previous refresh was written to the terminal. -/// Used to compare with `refresh_rate_ms`. -prev_refresh_timestamp: u64 = undefined, - -/// This buffer represents the maximum number of bytes written to the terminal -/// with each refresh. -output_buffer: [100]u8 = undefined, - -/// How many nanoseconds between writing updates to the terminal. -refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, - -/// How many nanoseconds to keep the output hidden -initial_delay_ns: u64 = 500 * std.time.ns_per_ms, - -done: bool = true, - -/// Protects the `refresh` function, as well as `node.recently_updated_child`. -/// Without this, callsites would call `Node.end` and then free `Node` memory -/// while it was still being accessed by the `refresh` function. -update_mutex: std.Thread.Mutex = .{}, - -/// Keeps track of how many columns in the terminal have been output, so that -/// we can move the cursor back later. -columns_written: usize = undefined, +supports_ansi_escape_codes: bool, + +root: Node, + +/// Protects all the state shared between the update thread and the public API calls. +mutex: std.Thread.Mutex, +update_thread: ?std.Thread, + +/// Atomically set by SIGWINCH as well as the root done() function. +redraw_event: std.Thread.ResetEvent, +/// Ensure there is only 1 global Progress object. +initialized: bool, +/// Indicates a request to shut down and reset global state. +done: bool, + +refresh_rate_ns: u64, +initial_delay_ns: u64, + +rows: u16, +cols: u16, + +/// Accessed only by the update thread. +draw_buffer: []u8, + +pub const Options = struct { + /// User-provided buffer with static lifetime. + /// + /// Used to store the entire write buffer sent to the terminal. Progress output will be truncated if it + /// cannot fit into this buffer which will look bad but not cause any malfunctions. + /// + /// Must be at least 100 bytes. + draw_buffer: []u8, + /// How many nanoseconds between writing updates to the terminal. + refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, + /// How many nanoseconds to keep the output hidden + initial_delay_ns: u64 = 500 * std.time.ns_per_ms, + /// If provided, causes the progress item to have a denominator. + /// 0 means unknown. + estimated_total_items: usize = 0, + root_name: []const u8 = "", +}; /// Represents one unit of progress. Each node can have children nodes, or /// one can use integers with `update`. pub const Node = struct { - context: *Progress, parent: ?*Node, name: []const u8, - unit: []const u8 = "", /// Must be handled atomically to be thread-safe. recently_updated_child: ?*Node = null, /// Must be handled atomically to be thread-safe. 0 means null. @@ -76,15 +74,15 @@ pub const Node = struct { /// Must be handled atomically to be thread-safe. unprotected_completed_items: usize, + pub const ListNode = std.DoublyLinkedList(void); + /// Create a new child progress node. Thread-safe. + /// /// Call `Node.end` when done. - /// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this - /// API to set `self.parent.recently_updated_child` with the return value. - /// Until that is fixed you probably want to call `activate` on the return value. + /// /// Passing 0 for `estimated_total_items` means unknown. pub fn start(self: *Node, name: []const u8, estimated_total_items: usize) Node { - return Node{ - .context = self.context, + return .{ .parent = self, .name = name, .unprotected_estimated_total_items = estimated_total_items, @@ -94,66 +92,33 @@ pub const Node = struct { /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe. pub fn completeOne(self: *Node) void { - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - } _ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic); - self.context.maybeRefresh(); + self.activate(); } /// Finish a started `Node`. Thread-safe. pub fn end(self: *Node) void { - self.context.maybeRefresh(); if (self.parent) |parent| { - { - self.context.update_mutex.lock(); - defer self.context.update_mutex.unlock(); - _ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .monotonic, .monotonic); - } parent.completeOne(); } else { - self.context.update_mutex.lock(); - defer self.context.update_mutex.unlock(); - self.context.done = true; - self.context.refreshWithHeldLock(); + { + global_progress.mutex.lock(); + defer global_progress.mutex.unlock(); + global_progress.done = true; + } + global_progress.redraw_event.set(); + if (global_progress.update_thread) |thread| thread.join(); } } /// Tell the parent node that this node is actively being worked on. Thread-safe. pub fn activate(self: *Node) void { - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - self.context.maybeRefresh(); - } - } - - /// Thread-safe. - pub fn setName(self: *Node, name: []const u8) void { - const progress = self.context; - progress.update_mutex.lock(); - defer progress.update_mutex.unlock(); - self.name = name; - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - if (parent.parent) |grand_parent| { - @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release); - } - if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer); - } - } - - /// Thread-safe. - pub fn setUnit(self: *Node, unit: []const u8) void { - const progress = self.context; - progress.update_mutex.lock(); - defer progress.update_mutex.unlock(); - self.unit = unit; - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - if (parent.parent) |grand_parent| { - @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release); - } - if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer); + var parent = self.parent; + var child = self; + while (parent) |p| { + @atomicStore(?*Node, &p.recently_updated_child, child, .release); + child = p; + parent = p.parent; } } @@ -168,280 +133,202 @@ pub const Node = struct { } }; -/// Create a new progress node. +var global_progress: Progress = .{ + .terminal = null, + .is_windows_terminal = false, + .supports_ansi_escape_codes = false, + .root = undefined, + .mutex = .{}, + .update_thread = null, + .redraw_event = .{}, + .initialized = false, + .refresh_rate_ns = undefined, + .initial_delay_ns = undefined, + .rows = 0, + .cols = 0, + .draw_buffer = undefined, + .done = false, +}; + +/// Initializes a global Progress instance. +/// +/// Asserts there is only one global Progress instance. +/// /// Call `Node.end` when done. -/// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this -/// API to return Progress rather than accept it as a parameter. -/// `estimated_total_items` value of 0 means unknown. -pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *Node { +pub fn start(options: Options) *Node { + assert(!global_progress.initialized); const stderr = std.io.getStdErr(); - self.terminal = null; if (stderr.supportsAnsiEscapeCodes()) { - self.terminal = stderr; - self.supports_ansi_escape_codes = true; + global_progress.terminal = stderr; + global_progress.supports_ansi_escape_codes = true; } else if (builtin.os.tag == .windows and stderr.isTty()) { - self.is_windows_terminal = true; - self.terminal = stderr; + global_progress.is_windows_terminal = true; + global_progress.terminal = stderr; } else if (builtin.os.tag != .windows) { // we are in a "dumb" terminal like in acme or writing to a file - self.terminal = stderr; + global_progress.terminal = stderr; } - self.root = Node{ - .context = self, + global_progress.root = .{ .parent = null, - .name = name, - .unprotected_estimated_total_items = estimated_total_items, + .name = options.root_name, + .unprotected_estimated_total_items = options.estimated_total_items, .unprotected_completed_items = 0, }; - self.columns_written = 0; - self.prev_refresh_timestamp = 0; - self.timer = std.time.Timer.start() catch null; - self.done = false; - return &self.root; -} + global_progress.done = false; + global_progress.initialized = true; + + assert(options.draw_buffer.len >= 100); + global_progress.draw_buffer = options.draw_buffer; + global_progress.refresh_rate_ns = options.refresh_rate_ns; + global_progress.initial_delay_ns = options.initial_delay_ns; + + var act: posix.Sigaction = .{ + .handler = .{ .sigaction = handleSigWinch }, + .mask = posix.empty_sigset, + .flags = (posix.SA.SIGINFO | posix.SA.RESTART), + }; + posix.sigaction(posix.SIG.WINCH, &act, null) catch { + global_progress.terminal = null; + return &global_progress.root; + }; -/// Updates the terminal if enough time has passed since last update. Thread-safe. -pub fn maybeRefresh(self: *Progress) void { - if (self.timer) |*timer| { - if (!self.update_mutex.tryLock()) return; - defer self.update_mutex.unlock(); - maybeRefreshWithHeldLock(self, timer); + if (global_progress.terminal != null) { + if (std.Thread.spawn(.{}, updateThreadRun, .{})) |thread| { + global_progress.update_thread = thread; + } else |_| { + global_progress.terminal = null; + } } + + return &global_progress.root; } -fn maybeRefreshWithHeldLock(self: *Progress, timer: *std.time.Timer) void { - const now = timer.read(); - if (now < self.initial_delay_ns) return; - // TODO I have observed this to happen sometimes. I think we need to follow Rust's - // lead and guarantee monotonically increasing times in the std lib itself. - if (now < self.prev_refresh_timestamp) return; - if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return; - return self.refreshWithHeldLock(); +/// Returns whether a resize is needed to learn the terminal size. +fn wait(timeout_ns: u64) bool { + const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_| + true + else |err| switch (err) { + error.Timeout => false, + }; + global_progress.redraw_event.reset(); + return resize_flag or (global_progress.cols == 0); } -/// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe. -pub fn refresh(self: *Progress) void { - if (!self.update_mutex.tryLock()) return; - defer self.update_mutex.unlock(); +fn updateThreadRun() void { + { + const resize_flag = wait(global_progress.initial_delay_ns); + maybeUpdateSize(resize_flag); - return self.refreshWithHeldLock(); -} + const buffer = b: { + global_progress.mutex.lock(); + defer global_progress.mutex.unlock(); -fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void { - const file = p.terminal orelse return; - var end = end_ptr.*; - if (p.columns_written > 0) { - // restore the cursor position by moving the cursor - // `columns_written` cells to the left, then clear the rest of the - // line - if (p.supports_ansi_escape_codes) { - end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[{d}D", .{p.columns_written}) catch unreachable).len; - end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len; - } else if (builtin.os.tag == .windows) winapi: { - std.debug.assert(p.is_windows_terminal); - - var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } + if (global_progress.done) return clearTerminal(); - var cursor_pos = windows.COORD{ - .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)), - .Y = info.dwCursorPosition.Y, - }; - - if (cursor_pos.X < 0) - cursor_pos.X = 0; - - const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X)); - - var written: windows.DWORD = undefined; - if (windows.kernel32.FillConsoleOutputAttribute( - file.handle, - info.wAttributes, - fill_chars, - cursor_pos, - &written, - ) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } - if (windows.kernel32.FillConsoleOutputCharacterW( - file.handle, - ' ', - fill_chars, - cursor_pos, - &written, - ) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } - if (windows.kernel32.SetConsoleCursorPosition(file.handle, cursor_pos) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } - } else { - // we are in a "dumb" terminal like in acme or writing to a file - p.output_buffer[end] = '\n'; - end += 1; - } + break :b computeRedraw(); + }; + write(buffer); + } + + while (true) { + const resize_flag = wait(global_progress.refresh_rate_ns); + maybeUpdateSize(resize_flag); - p.columns_written = 0; + const buffer = b: { + global_progress.mutex.lock(); + defer global_progress.mutex.unlock(); + + if (global_progress.done) return clearTerminal(); + + break :b computeRedraw(); + }; + write(buffer); } - end_ptr.* = end; } -fn refreshWithHeldLock(self: *Progress) void { - const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal; - if (is_dumb and self.dont_print_on_dumb) return; +const start_sync = "\x1b[?2026h"; +const clear = "\x1b[J"; +const save = "\x1b7"; +const restore = "\x1b8"; +const finish_sync = "\x1b[?2026l"; + +fn clearTerminal() void { + write(clear); +} + +fn computeRedraw() []u8 { + // The strategy is: keep the cursor at the beginning, and then with every redraw: + // erase, save, write, restore + + var i: usize = 0; + const buf = global_progress.draw_buffer; + + const prefix = start_sync ++ clear ++ save; + const suffix = restore ++ finish_sync; + + buf[0..prefix.len].* = prefix.*; + i = prefix.len; - const file = self.terminal orelse return; + // Walk the tree and write the progress output to the buffer. - var end: usize = 0; - clearWithHeldLock(self, &end); + var node: *Node = &global_progress.root; + while (true) { + const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); + const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); - if (!self.done) { - var need_ellipse = false; - var maybe_node: ?*Node = &self.root; - while (maybe_node) |node| { - if (need_ellipse) { - self.bufWrite(&end, "... ", .{}); + if (node.name.len != 0 or eti > 0) { + if (node.name.len != 0) { + i += (std.fmt.bufPrint(buf[i..], "{s}", .{node.name}) catch @panic("TODO")).len; } - need_ellipse = false; - const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); - const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); - const current_item = completed_items + 1; - if (node.name.len != 0 or eti > 0) { - if (node.name.len != 0) { - self.bufWrite(&end, "{s}", .{node.name}); - need_ellipse = true; - } - if (eti > 0) { - if (need_ellipse) self.bufWrite(&end, " ", .{}); - self.bufWrite(&end, "[{d}/{d}{s}] ", .{ current_item, eti, node.unit }); - need_ellipse = false; - } else if (completed_items != 0) { - if (need_ellipse) self.bufWrite(&end, " ", .{}); - self.bufWrite(&end, "[{d}{s}] ", .{ current_item, node.unit }); - need_ellipse = false; - } + if (eti > 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, eti }) catch @panic("TODO")).len; + } else if (completed_items != 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch @panic("TODO")).len; } - maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire); } - if (need_ellipse) { - self.bufWrite(&end, "... ", .{}); - } - } - _ = file.write(self.output_buffer[0..end]) catch { - // stop trying to write to this file - self.terminal = null; - }; - if (self.timer) |*timer| { - self.prev_refresh_timestamp = timer.read(); + node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire) orelse break; } -} -pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { - const file = self.terminal orelse { - std.debug.print(format, args); - return; - }; - self.refresh(); - file.writer().print(format, args) catch { - self.terminal = null; - return; - }; - self.columns_written = 0; -} + i = @min(global_progress.cols + prefix.len, i); -/// Allows the caller to freely write to stderr until unlock_stderr() is called. -/// During the lock, the progress information is cleared from the terminal. -pub fn lock_stderr(p: *Progress) void { - p.update_mutex.lock(); - if (p.terminal) |file| { - var end: usize = 0; - clearWithHeldLock(p, &end); - _ = file.write(p.output_buffer[0..end]) catch { - // stop trying to write to this file - p.terminal = null; - }; - } - std.debug.getStderrMutex().lock(); -} + buf[i..][0..suffix.len].* = suffix.*; + i += suffix.len; -pub fn unlock_stderr(p: *Progress) void { - std.debug.getStderrMutex().unlock(); - p.update_mutex.unlock(); + return buf[0..i]; } -fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { - if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { - const amt = written.len; - end.* += amt; - self.columns_written += amt; - } else |err| switch (err) { - error.NoSpaceLeft => { - self.columns_written += self.output_buffer.len - end.*; - end.* = self.output_buffer.len; - const suffix = "... "; - @memcpy(self.output_buffer[self.output_buffer.len - suffix.len ..], suffix); - }, - } +fn write(buf: []const u8) void { + const tty = global_progress.terminal orelse return; + tty.writeAll(buf) catch { + global_progress.terminal = null; + }; } -test "basic functionality" { - var disable = true; - _ = &disable; - if (disable) { - // This test is disabled because it uses time.sleep() and is therefore slow. It also - // prints bogus progress data to stderr. - return error.SkipZigTest; - } - var progress = Progress{}; - const root_node = progress.start("", 100); - defer root_node.end(); +fn maybeUpdateSize(resize_flag: bool) void { + if (!resize_flag) return; - const speed_factor = std.time.ns_per_ms; - - const sub_task_names = [_][]const u8{ - "reticulating splines", - "adjusting shoes", - "climbing towers", - "pouring juice", + var winsize: posix.winsize = .{ + .ws_row = 0, + .ws_col = 0, + .ws_xpixel = 0, + .ws_ypixel = 0, }; - var next_sub_task: usize = 0; - var i: usize = 0; - while (i < 100) : (i += 1) { - var node = root_node.start(sub_task_names[next_sub_task], 5); - node.activate(); - next_sub_task = (next_sub_task + 1) % sub_task_names.len; - - node.completeOne(); - std.time.sleep(5 * speed_factor); - node.completeOne(); - node.completeOne(); - std.time.sleep(5 * speed_factor); - node.completeOne(); - node.completeOne(); - std.time.sleep(5 * speed_factor); - - node.end(); - - std.time.sleep(5 * speed_factor); - } - { - var node = root_node.start("this is a really long name designed to activate the truncation code. let's find out if it works", 0); - node.activate(); - std.time.sleep(10 * speed_factor); - progress.refresh(); - std.time.sleep(10 * speed_factor); - node.end(); + const fd = (global_progress.terminal orelse return).handle; + + const err = posix.system.ioctl(fd, posix.T.IOCGWINSZ, @intFromPtr(&winsize)); + if (posix.errno(err) == .SUCCESS) { + global_progress.rows = winsize.ws_row; + global_progress.cols = winsize.ws_col; + } else { + @panic("TODO: handle this failure"); } } + +fn handleSigWinch(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) void { + _ = info; + _ = ctx_ptr; + assert(sig == posix.SIG.WINCH); + global_progress.redraw_event.set(); +} From e1e4de2776901a0acb7a28454c0fe080c5c13a5e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 22 May 2024 12:37:17 -0700 Subject: [PATCH 02/60] progress progress Move the mutex into the nodes Track the whole tree instead of only recently activated node --- lib/std/Progress.zig | 86 ++++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 97bfa48b6cf8..54d55e5ca0f9 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -24,8 +24,6 @@ supports_ansi_escape_codes: bool, root: Node, -/// Protects all the state shared between the update thread and the public API calls. -mutex: std.Thread.Mutex, update_thread: ?std.Thread, /// Atomically set by SIGWINCH as well as the root done() function. @@ -33,6 +31,7 @@ redraw_event: std.Thread.ResetEvent, /// Ensure there is only 1 global Progress object. initialized: bool, /// Indicates a request to shut down and reset global state. +/// Accessed atomically. done: bool, refresh_rate_ns: u64, @@ -65,10 +64,13 @@ pub const Options = struct { /// Represents one unit of progress. Each node can have children nodes, or /// one can use integers with `update`. pub const Node = struct { - parent: ?*Node, + mutex: std.Thread.Mutex, + /// Links to the parent and child nodes. + parent_list_node: std.DoublyLinkedList(void).Node, + /// Links to the prev and next sibling nodes. + sibling_list_node: std.DoublyLinkedList(void).Node, + name: []const u8, - /// Must be handled atomically to be thread-safe. - recently_updated_child: ?*Node = null, /// Must be handled atomically to be thread-safe. 0 means null. unprotected_estimated_total_items: usize, /// Must be handled atomically to be thread-safe. @@ -78,50 +80,57 @@ pub const Node = struct { /// Create a new child progress node. Thread-safe. /// - /// Call `Node.end` when done. + /// It is expected for the memory of the result to be stored in the + /// caller's stack and therefore is required to call `activate` immediately + /// on the result after initializing the memory location and `end` when done. /// /// Passing 0 for `estimated_total_items` means unknown. pub fn start(self: *Node, name: []const u8, estimated_total_items: usize) Node { return .{ - .parent = self, + .mutex = .{}, + .parent_list_node = .{ + .prev = &self.parent_list_node, + .next = null, + .data = {}, + }, + .sibling_list_node = .{ .data = {} }, .name = name, .unprotected_estimated_total_items = estimated_total_items, .unprotected_completed_items = 0, }; } + /// To be called exactly once after `start`. + pub fn activate(n: *Node) void { + const p = n.parent().?; + p.mutex.lock(); + defer p.mutex.unlock(); + assert(p.parent_list_node.next == null); + p.parent_list_node.next = &n.parent_list_node; + } + /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe. pub fn completeOne(self: *Node) void { _ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic); - self.activate(); } /// Finish a started `Node`. Thread-safe. - pub fn end(self: *Node) void { - if (self.parent) |parent| { - parent.completeOne(); + pub fn end(child: *Node) void { + if (child.parent()) |p| { + // Make sure the other thread doesn't access this memory that is + // about to be released. + child.mutex.lock(); + + const other = if (child.sibling_list_node.next) |n| n else child.sibling_list_node.prev; + _ = @cmpxchgStrong(std.DoublyLinkedList(void).Node, &p.parent_list_node.next, child, other, .seq_cst, .seq_cst); + p.completeOne(); } else { - { - global_progress.mutex.lock(); - defer global_progress.mutex.unlock(); - global_progress.done = true; - } + @atomicStore(bool, &global_progress.done, true, .seq_cst); global_progress.redraw_event.set(); if (global_progress.update_thread) |thread| thread.join(); } } - /// Tell the parent node that this node is actively being worked on. Thread-safe. - pub fn activate(self: *Node) void { - var parent = self.parent; - var child = self; - while (parent) |p| { - @atomicStore(?*Node, &p.recently_updated_child, child, .release); - child = p; - parent = p.parent; - } - } - /// Thread-safe. 0 means unknown. pub fn setEstimatedTotalItems(self: *Node, count: usize) void { @atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic); @@ -131,6 +140,11 @@ pub const Node = struct { pub fn setCompletedItems(self: *Node, completed_items: usize) void { @atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic); } + + fn parent(child: *Node) ?*Node { + const parent_node = child.parent_list_node.prev orelse return null; + return @fieldParentPtr("parent_list_node", parent_node); + } }; var global_progress: Progress = .{ @@ -138,7 +152,6 @@ var global_progress: Progress = .{ .is_windows_terminal = false, .supports_ansi_escape_codes = false, .root = undefined, - .mutex = .{}, .update_thread = null, .redraw_event = .{}, .initialized = false, @@ -169,7 +182,9 @@ pub fn start(options: Options) *Node { global_progress.terminal = stderr; } global_progress.root = .{ - .parent = null, + .mutex = .{}, + .parent_list_node = .{ .data = {} }, + .sibling_list_node = .{ .data = {} }, .name = options.root_name, .unprotected_estimated_total_items = options.estimated_total_items, .unprotected_completed_items = 0, @@ -220,10 +235,8 @@ fn updateThreadRun() void { maybeUpdateSize(resize_flag); const buffer = b: { - global_progress.mutex.lock(); - defer global_progress.mutex.unlock(); - - if (global_progress.done) return clearTerminal(); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return clearTerminal(); break :b computeRedraw(); }; @@ -235,10 +248,8 @@ fn updateThreadRun() void { maybeUpdateSize(resize_flag); const buffer = b: { - global_progress.mutex.lock(); - defer global_progress.mutex.unlock(); - - if (global_progress.done) return clearTerminal(); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return clearTerminal(); break :b computeRedraw(); }; @@ -270,7 +281,6 @@ fn computeRedraw() []u8 { i = prefix.len; // Walk the tree and write the progress output to the buffer. - var node: *Node = &global_progress.root; while (true) { const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); From a3c9511ab9d56d4c06c612536a27b84d67ae415c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 22 May 2024 20:42:12 -0700 Subject: [PATCH 03/60] rework std.Progress again This time, we preallocate a fixed set of nodes and have the user-visible Node only be an index into them. This allows for lock-free management of the node storage. Only the parent indexes are stored, and the update thread makes a serialized copy of the state before trying to compute children lists. The update thread then walks the tree and outputs an entire tree of progress rather than only one line. There is a problem with clearing from the cursor to the end of the screen when the cursor is at the bottom of the terminal. --- lib/std/Progress.zig | 413 +++++++++++++++++++++++++++++++++---------- 1 file changed, 317 insertions(+), 96 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 54d55e5ca0f9..cac0839de37f 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -22,14 +22,10 @@ is_windows_terminal: bool, /// Whether the terminal supports ANSI escape codes. supports_ansi_escape_codes: bool, -root: Node, - update_thread: ?std.Thread, /// Atomically set by SIGWINCH as well as the root done() function. redraw_event: std.Thread.ResetEvent, -/// Ensure there is only 1 global Progress object. -initialized: bool, /// Indicates a request to shut down and reset global state. /// Accessed atomically. done: bool, @@ -43,13 +39,22 @@ cols: u16, /// Accessed only by the update thread. draw_buffer: []u8, +/// This is in a separate array from `node_storage` but with the same length so +/// that it can be iterated over efficiently without trashing too much of the +/// CPU cache. +node_parents: []Node.Parent, +node_storage: []Node.Storage, +node_freelist: []Node.OptionalIndex, +node_freelist_first: Node.OptionalIndex, +node_end_index: u32, + pub const Options = struct { /// User-provided buffer with static lifetime. /// /// Used to store the entire write buffer sent to the terminal. Progress output will be truncated if it /// cannot fit into this buffer which will look bad but not cause any malfunctions. /// - /// Must be at least 100 bytes. + /// Must be at least 200 bytes. draw_buffer: []u8, /// How many nanoseconds between writing updates to the terminal. refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, @@ -64,66 +69,128 @@ pub const Options = struct { /// Represents one unit of progress. Each node can have children nodes, or /// one can use integers with `update`. pub const Node = struct { - mutex: std.Thread.Mutex, - /// Links to the parent and child nodes. - parent_list_node: std.DoublyLinkedList(void).Node, - /// Links to the prev and next sibling nodes. - sibling_list_node: std.DoublyLinkedList(void).Node, + index: OptionalIndex, + + pub const max_name_len = 38; - name: []const u8, - /// Must be handled atomically to be thread-safe. 0 means null. - unprotected_estimated_total_items: usize, - /// Must be handled atomically to be thread-safe. - unprotected_completed_items: usize, + const Storage = extern struct { + /// Little endian. + completed_count: u32, + /// 0 means unknown. + /// Little endian. + estimated_total_count: u32, + name: [max_name_len]u8, + }; - pub const ListNode = std.DoublyLinkedList(void); + const Parent = enum(u16) { + /// Unallocated storage. + unused = std.math.maxInt(u16) - 1, + /// Indicates root node. + none = std.math.maxInt(u16), + /// Index into `node_storage`. + _, + + fn unwrap(i: @This()) ?Index { + return switch (i) { + .unused, .none => return null, + else => @enumFromInt(@intFromEnum(i)), + }; + } + }; + + const OptionalIndex = enum(u16) { + none = std.math.maxInt(u16), + /// Index into `node_storage`. + _, + + fn unwrap(i: @This()) ?Index { + if (i == .none) return null; + return @enumFromInt(@intFromEnum(i)); + } + + fn toParent(i: @This()) Parent { + assert(@intFromEnum(i) != @intFromEnum(Parent.unused)); + return @enumFromInt(@intFromEnum(i)); + } + }; + + /// Index into `node_storage`. + const Index = enum(u16) { + _, + + fn toParent(i: @This()) Parent { + assert(@intFromEnum(i) != @intFromEnum(Parent.unused)); + assert(@intFromEnum(i) != @intFromEnum(Parent.none)); + return @enumFromInt(@intFromEnum(i)); + } + + fn toOptional(i: @This()) OptionalIndex { + return @enumFromInt(@intFromEnum(i)); + } + }; /// Create a new child progress node. Thread-safe. /// - /// It is expected for the memory of the result to be stored in the - /// caller's stack and therefore is required to call `activate` immediately - /// on the result after initializing the memory location and `end` when done. - /// /// Passing 0 for `estimated_total_items` means unknown. - pub fn start(self: *Node, name: []const u8, estimated_total_items: usize) Node { - return .{ - .mutex = .{}, - .parent_list_node = .{ - .prev = &self.parent_list_node, - .next = null, - .data = {}, - }, - .sibling_list_node = .{ .data = {} }, - .name = name, - .unprotected_estimated_total_items = estimated_total_items, - .unprotected_completed_items = 0, - }; - } + pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node { + const node_index = node.index.unwrap() orelse return .{ .index = .none }; + const parent = node_index.toParent(); + + const freelist_head = &global_progress.node_freelist_first; + var opt_free_index = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst); + while (opt_free_index.unwrap()) |free_index| { + const freelist_ptr = freelistByIndex(free_index); + opt_free_index = @cmpxchgWeak(Node.OptionalIndex, freelist_head, opt_free_index, freelist_ptr.*, .seq_cst, .seq_cst) orelse { + // We won the allocation race. + return init(free_index, parent, name, estimated_total_items); + }; + } + + const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic); + if (free_index >= global_progress.node_storage.len) { + // Ran out of node storage memory. Progress for this node will not be tracked. + _ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic); + return .{ .index = .none }; + } - /// To be called exactly once after `start`. - pub fn activate(n: *Node) void { - const p = n.parent().?; - p.mutex.lock(); - defer p.mutex.unlock(); - assert(p.parent_list_node.next == null); - p.parent_list_node.next = &n.parent_list_node; + return init(@enumFromInt(free_index), parent, name, estimated_total_items); } /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe. - pub fn completeOne(self: *Node) void { - _ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic); + pub fn completeOne(n: Node) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + _ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic); + } + + /// Thread-safe. + pub fn setCompletedItems(n: Node, completed_items: usize) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + @atomicStore(u32, &storage.completed_count, std.math.lossyCast(u32, completed_items), .monotonic); + } + + /// Thread-safe. 0 means unknown. + pub fn setEstimatedTotalItems(n: Node, count: usize) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, count), .monotonic); } /// Finish a started `Node`. Thread-safe. - pub fn end(child: *Node) void { - if (child.parent()) |p| { - // Make sure the other thread doesn't access this memory that is - // about to be released. - child.mutex.lock(); - - const other = if (child.sibling_list_node.next) |n| n else child.sibling_list_node.prev; - _ = @cmpxchgStrong(std.DoublyLinkedList(void).Node, &p.parent_list_node.next, child, other, .seq_cst, .seq_cst); - p.completeOne(); + pub fn end(n: Node) void { + const index = n.index.unwrap() orelse return; + const parent_ptr = parentByIndex(index); + if (parent_ptr.unwrap()) |parent_index| { + _ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic); + @atomicStore(Node.Parent, parent_ptr, .unused, .seq_cst); + + const freelist_head = &global_progress.node_freelist_first; + var first = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst); + while (true) { + freelistByIndex(index).* = first; + first = @cmpxchgWeak(Node.OptionalIndex, freelist_head, first, index.toOptional(), .seq_cst, .seq_cst) orelse break; + } } else { @atomicStore(bool, &global_progress.done, true, .seq_cst); global_progress.redraw_event.set(); @@ -131,19 +198,35 @@ pub const Node = struct { } } - /// Thread-safe. 0 means unknown. - pub fn setEstimatedTotalItems(self: *Node, count: usize) void { - @atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic); + fn storageByIndex(index: Node.Index) *Node.Storage { + return &global_progress.node_storage[@intFromEnum(index)]; } - /// Thread-safe. - pub fn setCompletedItems(self: *Node, completed_items: usize) void { - @atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic); + fn parentByIndex(index: Node.Index) *Node.Parent { + return &global_progress.node_parents[@intFromEnum(index)]; + } + + fn freelistByIndex(index: Node.Index) *Node.OptionalIndex { + return &global_progress.node_freelist[@intFromEnum(index)]; } - fn parent(child: *Node) ?*Node { - const parent_node = child.parent_list_node.prev orelse return null; - return @fieldParentPtr("parent_list_node", parent_node); + fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node { + assert(parent != .unused); + + const storage = storageByIndex(free_index); + storage.* = .{ + .completed_count = 0, + .estimated_total_count = std.math.lossyCast(u32, estimated_total_items), + .name = [1]u8{0} ** max_name_len, + }; + const name_len = @min(max_name_len, name.len); + @memcpy(storage.name[0..name_len], name[0..name_len]); + + const parent_ptr = parentByIndex(free_index); + assert(parent_ptr.* == .unused); + @atomicStore(Node.Parent, parent_ptr, parent, .release); + + return .{ .index = free_index.toOptional() }; } }; @@ -151,25 +234,36 @@ var global_progress: Progress = .{ .terminal = null, .is_windows_terminal = false, .supports_ansi_escape_codes = false, - .root = undefined, .update_thread = null, .redraw_event = .{}, - .initialized = false, .refresh_rate_ns = undefined, .initial_delay_ns = undefined, .rows = 0, .cols = 0, .draw_buffer = undefined, .done = false, + + // TODO: make these configurable and avoid including the globals in .data if unused + .node_parents = &node_parents_buffer, + .node_storage = &node_storage_buffer, + .node_freelist = &node_freelist_buffer, + .node_freelist_first = .none, + .node_end_index = 0, }; +const default_node_storage_buffer_len = 100; +var node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; +var node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; +var node_freelist_buffer: [default_node_storage_buffer_len]Node.OptionalIndex = undefined; + /// Initializes a global Progress instance. /// /// Asserts there is only one global Progress instance. /// /// Call `Node.end` when done. -pub fn start(options: Options) *Node { - assert(!global_progress.initialized); +pub fn start(options: Options) Node { + // Ensure there is only 1 global Progress object. + assert(global_progress.node_end_index == 0); const stderr = std.io.getStdErr(); if (stderr.supportsAnsiEscapeCodes()) { global_progress.terminal = stderr; @@ -181,18 +275,12 @@ pub fn start(options: Options) *Node { // we are in a "dumb" terminal like in acme or writing to a file global_progress.terminal = stderr; } - global_progress.root = .{ - .mutex = .{}, - .parent_list_node = .{ .data = {} }, - .sibling_list_node = .{ .data = {} }, - .name = options.root_name, - .unprotected_estimated_total_items = options.estimated_total_items, - .unprotected_completed_items = 0, - }; + @memset(global_progress.node_parents, .unused); + const root_node = Node.init(@enumFromInt(0), .none, options.root_name, options.estimated_total_items); global_progress.done = false; - global_progress.initialized = true; + global_progress.node_end_index = 1; - assert(options.draw_buffer.len >= 100); + assert(options.draw_buffer.len >= 200); global_progress.draw_buffer = options.draw_buffer; global_progress.refresh_rate_ns = options.refresh_rate_ns; global_progress.initial_delay_ns = options.initial_delay_ns; @@ -204,7 +292,7 @@ pub fn start(options: Options) *Node { }; posix.sigaction(posix.SIG.WINCH, &act, null) catch { global_progress.terminal = null; - return &global_progress.root; + return root_node; }; if (global_progress.terminal != null) { @@ -215,7 +303,7 @@ pub fn start(options: Options) *Node { } } - return &global_progress.root; + return root_node; } /// Returns whether a resize is needed to learn the terminal size. @@ -263,11 +351,85 @@ const save = "\x1b7"; const restore = "\x1b8"; const finish_sync = "\x1b[?2026l"; +const tree_tee = "\x1B\x28\x30\x74\x71\x1B\x28\x42 "; // ├─ +const tree_line = "\x1B\x28\x30\x78\x1B\x28\x42 "; // │ +const tree_langle = "\x1B\x28\x30\x6d\x71\x1B\x28\x42 "; // └─ + fn clearTerminal() void { write(clear); } +const Children = struct { + child: Node.OptionalIndex, + sibling: Node.OptionalIndex, +}; + fn computeRedraw() []u8 { + // TODO make this configurable + var serialized_node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; + var serialized_node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; + var serialized_node_map_buffer: [default_node_storage_buffer_len]Node.Index = undefined; + var serialized_len: usize = 0; + + // Iterate all of the nodes and construct a serializable copy of the state that can be examined + // without atomics. + const end_index = @atomicLoad(u32, &global_progress.node_end_index, .monotonic); + const node_parents = global_progress.node_parents[0..end_index]; + const node_storage = global_progress.node_storage[0..end_index]; + for (node_parents, node_storage, 0..) |*parent_ptr, *storage_ptr, i| { + var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); + while (begin_parent != .unused) { + const dest_storage = &serialized_node_storage_buffer[serialized_len]; + @memcpy(&dest_storage.name, &storage_ptr.name); + dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); + dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .monotonic); + + const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); + if (begin_parent == end_parent) { + serialized_node_parents_buffer[serialized_len] = begin_parent; + serialized_node_map_buffer[i] = @enumFromInt(serialized_len); + serialized_len += 1; + break; + } + + begin_parent = end_parent; + } + } + + // Now we can analyze our copy of the graph without atomics, reconstructing + // children lists which do not exist in the canonical data. These are + // needed for tree traversal below. + const serialized_node_parents = serialized_node_parents_buffer[0..serialized_len]; + const serialized_node_storage = serialized_node_storage_buffer[0..serialized_len]; + + // Remap parents to point inside serialized arrays. + for (serialized_node_parents) |*parent| { + parent.* = switch (parent.*) { + .unused => unreachable, + .none => .none, + _ => |p| serialized_node_map_buffer[@intFromEnum(p)].toParent(), + }; + } + + var children_buffer: [default_node_storage_buffer_len]Children = undefined; + const children = children_buffer[0..serialized_len]; + + @memset(children, .{ .child = .none, .sibling = .none }); + + for (serialized_node_parents, 0..) |parent, child_index_usize| { + const child_index: Node.Index = @enumFromInt(child_index_usize); + assert(parent != .unused); + const parent_index = parent.unwrap() orelse continue; + const children_node = &children[@intFromEnum(parent_index)]; + if (children_node.child.unwrap()) |existing_child_index| { + const existing_child = &children[@intFromEnum(existing_child_index)]; + existing_child.sibling = child_index.toOptional(); + children[@intFromEnum(child_index)].sibling = existing_child.sibling; + } else { + children_node.child = child_index.toOptional(); + } + } + // The strategy is: keep the cursor at the beginning, and then with every redraw: // erase, save, write, restore @@ -280,32 +442,91 @@ fn computeRedraw() []u8 { buf[0..prefix.len].* = prefix.*; i = prefix.len; - // Walk the tree and write the progress output to the buffer. - var node: *Node = &global_progress.root; - while (true) { - const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); - const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); + const root_node_index: Node.Index = @enumFromInt(0); + i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, root_node_index); - if (node.name.len != 0 or eti > 0) { - if (node.name.len != 0) { - i += (std.fmt.bufPrint(buf[i..], "{s}", .{node.name}) catch @panic("TODO")).len; - } - if (eti > 0) { - i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, eti }) catch @panic("TODO")).len; - } else if (completed_items != 0) { - i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch @panic("TODO")).len; - } + buf[i..][0..suffix.len].* = suffix.*; + i += suffix.len; + + return buf[0..i]; +} + +fn computePrefix( + buf: []u8, + start_i: usize, + serialized_node_storage: []const Node.Storage, + serialized_node_parents: []const Node.Parent, + children: []const Children, + node_index: Node.Index, +) usize { + var i = start_i; + const parent_index = serialized_node_parents[@intFromEnum(node_index)].unwrap() orelse return i; + if (serialized_node_parents[@intFromEnum(parent_index)] == .none) return i; + i = computePrefix(buf, i, serialized_node_storage, serialized_node_parents, children, parent_index); + if (children[@intFromEnum(parent_index)].sibling == .none) { + buf[i..][0..3].* = " ".*; + i += 3; + } else { + buf[i..][0..tree_line.len].* = tree_line.*; + i += tree_line.len; + } + return i; +} + +fn computeNode( + buf: []u8, + start_i: usize, + serialized_node_storage: []const Node.Storage, + serialized_node_parents: []const Node.Parent, + children: []const Children, + node_index: Node.Index, +) usize { + var i = start_i; + i = computePrefix(buf, i, serialized_node_storage, serialized_node_parents, children, node_index); + + const storage = &serialized_node_storage[@intFromEnum(node_index)]; + const estimated_total = storage.estimated_total_count; + const completed_items = storage.completed_count; + const name = if (std.mem.indexOfScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name; + const parent = serialized_node_parents[@intFromEnum(node_index)]; + + if (parent != .none) { + if (children[@intFromEnum(node_index)].sibling == .none) { + buf[i..][0..tree_langle.len].* = tree_langle.*; + i += tree_langle.len; + } else { + buf[i..][0..tree_tee.len].* = tree_tee.*; + i += tree_tee.len; } + } - node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire) orelse break; + if (name.len != 0 or estimated_total > 0) { + if (estimated_total > 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total }) catch &.{}).len; + } else if (completed_items != 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch &.{}).len; + } + if (name.len != 0) { + i += (std.fmt.bufPrint(buf[i..], "{s}", .{name}) catch &.{}).len; + } } - i = @min(global_progress.cols + prefix.len, i); + i = @min(global_progress.cols + start_i, i); + buf[i] = '\n'; + i += 1; - buf[i..][0..suffix.len].* = suffix.*; - i += suffix.len; + if (children[@intFromEnum(node_index)].child.unwrap()) |child| { + i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, child); + } - return buf[0..i]; + { + var opt_sibling = children[@intFromEnum(node_index)].sibling; + while (opt_sibling.unwrap()) |sibling| { + i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, sibling); + } + } + + return i; } fn write(buf: []const u8) void { From 66c3b6ac65fad662c121073500c3cf3b7c9fcb60 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 22 May 2024 22:01:15 -0700 Subject: [PATCH 04/60] fix terminal repainting the clear, save, restore thing doesn't work when the terminal is at the bottom --- lib/std/Progress.zig | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index cac0839de37f..5d2d776323de 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -35,6 +35,9 @@ initial_delay_ns: u64, rows: u16, cols: u16, +/// Needed because terminal escape codes require one to take scrolling into +/// account. +newline_count: u16, /// Accessed only by the update thread. draw_buffer: []u8, @@ -240,6 +243,7 @@ var global_progress: Progress = .{ .initial_delay_ns = undefined, .rows = 0, .cols = 0, + .newline_count = 0, .draw_buffer = undefined, .done = false, @@ -346,6 +350,7 @@ fn updateThreadRun() void { } const start_sync = "\x1b[?2026h"; +const up_one_line = "\x1bM"; const clear = "\x1b[J"; const save = "\x1b7"; const restore = "\x1b8"; @@ -431,22 +436,32 @@ fn computeRedraw() []u8 { } // The strategy is: keep the cursor at the beginning, and then with every redraw: - // erase, save, write, restore + // erase to end of screen, write, move cursor to beginning of line, move cursor up N lines var i: usize = 0; const buf = global_progress.draw_buffer; - const prefix = start_sync ++ clear ++ save; - const suffix = restore ++ finish_sync; + buf[i..][0..start_sync.len].* = start_sync.*; + i += start_sync.len; - buf[0..prefix.len].* = prefix.*; - i = prefix.len; + buf[0..clear.len].* = clear.*; + i = clear.len; const root_node_index: Node.Index = @enumFromInt(0); i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, root_node_index); - buf[i..][0..suffix.len].* = suffix.*; - i += suffix.len; + if (buf[i - 1] == '\n') { + buf[i - 1] = '\r'; + const prev_nl_n = global_progress.newline_count - 1; + global_progress.newline_count = 0; + for (0..prev_nl_n) |_| { + buf[i..][0..up_one_line.len].* = up_one_line.*; + i += up_one_line.len; + } + } + + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; return buf[0..i]; } @@ -514,6 +529,7 @@ fn computeNode( i = @min(global_progress.cols + start_i, i); buf[i] = '\n'; i += 1; + global_progress.newline_count += 1; if (children[@intFromEnum(node_index)].child.unwrap()) |child| { i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, child); From 582acdf7212949913c0c902b354172502fb00e73 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 22 May 2024 22:09:22 -0700 Subject: [PATCH 05/60] keep the cursor at the end instead of beginning --- lib/std/Progress.zig | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 5d2d776323de..9f9554dad935 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -60,7 +60,7 @@ pub const Options = struct { /// Must be at least 200 bytes. draw_buffer: []u8, /// How many nanoseconds between writing updates to the terminal. - refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, + refresh_rate_ns: u64 = 60 * std.time.ns_per_ms, /// How many nanoseconds to keep the output hidden initial_delay_ns: u64 = 500 * std.time.ns_per_ms, /// If provided, causes the progress item to have a denominator. @@ -435,8 +435,8 @@ fn computeRedraw() []u8 { } } - // The strategy is: keep the cursor at the beginning, and then with every redraw: - // erase to end of screen, write, move cursor to beginning of line, move cursor up N lines + // The strategy is: keep the cursor at the end, and then with every redraw: + // move cursor to beginning of line, move cursor up N lines, erase to end of screen, write var i: usize = 0; const buf = global_progress.draw_buffer; @@ -444,22 +444,26 @@ fn computeRedraw() []u8 { buf[i..][0..start_sync.len].* = start_sync.*; i += start_sync.len; - buf[0..clear.len].* = clear.*; - i = clear.len; - - const root_node_index: Node.Index = @enumFromInt(0); - i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, root_node_index); - - if (buf[i - 1] == '\n') { - buf[i - 1] = '\r'; - const prev_nl_n = global_progress.newline_count - 1; + const prev_nl_n = global_progress.newline_count; + if (global_progress.newline_count > 0) { global_progress.newline_count = 0; + buf[i] = '\r'; + i += 1; for (0..prev_nl_n) |_| { buf[i..][0..up_one_line.len].* = up_one_line.*; i += up_one_line.len; } } + buf[i..][0..clear.len].* = clear.*; + i += clear.len; + + const root_node_index: Node.Index = @enumFromInt(0); + i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, root_node_index); + + // Truncate trailing newline. + //if (buf[i - 1] == '\n') i -= 1; + buf[i..][0..finish_sync.len].* = finish_sync.*; i += finish_sync.len; From 67e08e7b3cbed652953b21fb50ffff337318ae16 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 22 May 2024 22:23:49 -0700 Subject: [PATCH 06/60] fix clearing and sibling iteration --- lib/std/Progress.zig | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 9f9554dad935..0e625f4785d0 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -361,7 +361,30 @@ const tree_line = "\x1B\x28\x30\x78\x1B\x28\x42 "; // │ const tree_langle = "\x1B\x28\x30\x6d\x71\x1B\x28\x42 "; // └─ fn clearTerminal() void { - write(clear); + var i: usize = 0; + const buf = global_progress.draw_buffer; + + buf[i..][0..start_sync.len].* = start_sync.*; + i += start_sync.len; + + const prev_nl_n = global_progress.newline_count; + if (prev_nl_n > 0) { + global_progress.newline_count = 0; + buf[i] = '\r'; + i += 1; + for (0..prev_nl_n) |_| { + buf[i..][0..up_one_line.len].* = up_one_line.*; + i += up_one_line.len; + } + } + + buf[i..][0..clear.len].* = clear.*; + i += clear.len; + + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; + + write(buf[0..i]); } const Children = struct { @@ -428,8 +451,8 @@ fn computeRedraw() []u8 { const children_node = &children[@intFromEnum(parent_index)]; if (children_node.child.unwrap()) |existing_child_index| { const existing_child = &children[@intFromEnum(existing_child_index)]; - existing_child.sibling = child_index.toOptional(); children[@intFromEnum(child_index)].sibling = existing_child.sibling; + existing_child.sibling = child_index.toOptional(); } else { children_node.child = child_index.toOptional(); } @@ -445,7 +468,7 @@ fn computeRedraw() []u8 { i += start_sync.len; const prev_nl_n = global_progress.newline_count; - if (global_progress.newline_count > 0) { + if (prev_nl_n > 0) { global_progress.newline_count = 0; buf[i] = '\r'; i += 1; @@ -539,11 +562,8 @@ fn computeNode( i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, child); } - { - var opt_sibling = children[@intFromEnum(node_index)].sibling; - while (opt_sibling.unwrap()) |sibling| { - i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, sibling); - } + if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| { + i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, sibling); } return i; From ed36470af1c71a254bbe535bf70220aa27370f89 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 23 May 2024 11:10:41 -0700 Subject: [PATCH 07/60] std.Progress: truncate trailing newline --- lib/std/Progress.zig | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 0e625f4785d0..3703b1f8d1e3 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -367,12 +367,23 @@ fn clearTerminal() void { buf[i..][0..start_sync.len].* = start_sync.*; i += start_sync.len; + i = computeClear(buf, i); + + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; + + write(buf[0..i]); +} + +fn computeClear(buf: []u8, start_i: usize) usize { + var i = start_i; + const prev_nl_n = global_progress.newline_count; if (prev_nl_n > 0) { global_progress.newline_count = 0; buf[i] = '\r'; i += 1; - for (0..prev_nl_n) |_| { + for (1..prev_nl_n) |_| { buf[i..][0..up_one_line.len].* = up_one_line.*; i += up_one_line.len; } @@ -381,10 +392,7 @@ fn clearTerminal() void { buf[i..][0..clear.len].* = clear.*; i += clear.len; - buf[i..][0..finish_sync.len].* = finish_sync.*; - i += finish_sync.len; - - write(buf[0..i]); + return i; } const Children = struct { @@ -467,25 +475,13 @@ fn computeRedraw() []u8 { buf[i..][0..start_sync.len].* = start_sync.*; i += start_sync.len; - const prev_nl_n = global_progress.newline_count; - if (prev_nl_n > 0) { - global_progress.newline_count = 0; - buf[i] = '\r'; - i += 1; - for (0..prev_nl_n) |_| { - buf[i..][0..up_one_line.len].* = up_one_line.*; - i += up_one_line.len; - } - } - - buf[i..][0..clear.len].* = clear.*; - i += clear.len; + i = computeClear(buf, i); const root_node_index: Node.Index = @enumFromInt(0); i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, root_node_index); // Truncate trailing newline. - //if (buf[i - 1] == '\n') i -= 1; + if (buf[i - 1] == '\n') i -= 1; buf[i..][0..finish_sync.len].* = finish_sync.*; i += finish_sync.len; From f07116404ae323efceb57cc48459f62e7a4d6f81 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 23 May 2024 14:10:03 -0700 Subject: [PATCH 08/60] std.Progress: child process sends updates via IPC --- lib/std/Progress.zig | 183 ++++++++++++++++++++++++++------------ lib/std/process.zig | 80 ++++++++++++++--- lib/std/process/Child.zig | 51 +++++++++-- 3 files changed, 241 insertions(+), 73 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 3703b1f8d1e3..199a976b6dc1 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -74,7 +74,7 @@ pub const Options = struct { pub const Node = struct { index: OptionalIndex, - pub const max_name_len = 38; + pub const max_name_len = 40; const Storage = extern struct { /// Little endian. @@ -268,17 +268,7 @@ var node_freelist_buffer: [default_node_storage_buffer_len]Node.OptionalIndex = pub fn start(options: Options) Node { // Ensure there is only 1 global Progress object. assert(global_progress.node_end_index == 0); - const stderr = std.io.getStdErr(); - if (stderr.supportsAnsiEscapeCodes()) { - global_progress.terminal = stderr; - global_progress.supports_ansi_escape_codes = true; - } else if (builtin.os.tag == .windows and stderr.isTty()) { - global_progress.is_windows_terminal = true; - global_progress.terminal = stderr; - } else if (builtin.os.tag != .windows) { - // we are in a "dumb" terminal like in acme or writing to a file - global_progress.terminal = stderr; - } + @memset(global_progress.node_parents, .unused); const root_node = Node.init(@enumFromInt(0), .none, options.root_name, options.estimated_total_items); global_progress.done = false; @@ -289,22 +279,51 @@ pub fn start(options: Options) Node { global_progress.refresh_rate_ns = options.refresh_rate_ns; global_progress.initial_delay_ns = options.initial_delay_ns; - var act: posix.Sigaction = .{ - .handler = .{ .sigaction = handleSigWinch }, - .mask = posix.empty_sigset, - .flags = (posix.SA.SIGINFO | posix.SA.RESTART), - }; - posix.sigaction(posix.SIG.WINCH, &act, null) catch { - global_progress.terminal = null; - return root_node; - }; - - if (global_progress.terminal != null) { - if (std.Thread.spawn(.{}, updateThreadRun, .{})) |thread| { + if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| { + if (std.Thread.spawn(.{}, ipcThreadRun, .{ipc_fd})) |thread| { global_progress.update_thread = thread; - } else |_| { - global_progress.terminal = null; + } else |err| { + std.log.warn("failed to spawn IPC thread for communicating progress to parent: {s}", .{@errorName(err)}); + return .{ .index = .none }; } + } else |env_err| switch (env_err) { + error.EnvironmentVariableNotFound => { + const stderr = std.io.getStdErr(); + if (stderr.supportsAnsiEscapeCodes()) { + global_progress.terminal = stderr; + global_progress.supports_ansi_escape_codes = true; + } else if (builtin.os.tag == .windows and stderr.isTty()) { + global_progress.is_windows_terminal = true; + global_progress.terminal = stderr; + } else if (builtin.os.tag != .windows) { + // we are in a "dumb" terminal like in acme or writing to a file + global_progress.terminal = stderr; + } + + if (global_progress.terminal == null) { + return .{ .index = .none }; + } + + var act: posix.Sigaction = .{ + .handler = .{ .sigaction = handleSigWinch }, + .mask = posix.empty_sigset, + .flags = (posix.SA.SIGINFO | posix.SA.RESTART), + }; + posix.sigaction(posix.SIG.WINCH, &act, null) catch |err| { + std.log.warn("failed to install SIGWINCH signal handler for noticing terminal resizes: {s}", .{@errorName(err)}); + }; + + if (std.Thread.spawn(.{}, updateThreadRun, .{})) |thread| { + global_progress.update_thread = thread; + } else |err| { + std.log.warn("unable to spawn thread for printing progress to terminal: {s}", .{@errorName(err)}); + return .{ .index = .none }; + } + }, + else => |e| { + std.log.warn("invalid ZIG_PROGRESS file descriptor integer: {s}", .{@errorName(e)}); + return .{ .index = .none }; + }, } return root_node; @@ -326,12 +345,10 @@ fn updateThreadRun() void { const resize_flag = wait(global_progress.initial_delay_ns); maybeUpdateSize(resize_flag); - const buffer = b: { - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) - return clearTerminal(); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return clearTerminal(); - break :b computeRedraw(); - }; + const buffer = computeRedraw(); write(buffer); } @@ -339,16 +356,36 @@ fn updateThreadRun() void { const resize_flag = wait(global_progress.refresh_rate_ns); maybeUpdateSize(resize_flag); - const buffer = b: { - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) - return clearTerminal(); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return clearTerminal(); - break :b computeRedraw(); - }; + const buffer = computeRedraw(); write(buffer); } } +fn ipcThreadRun(fd: posix.fd_t) void { + { + _ = wait(global_progress.initial_delay_ns); + + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return; + + const serialized = serialize(); + writeIpc(fd, serialized); + } + + while (true) { + _ = wait(global_progress.refresh_rate_ns); + + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return clearTerminal(); + + const serialized = serialize(); + writeIpc(fd, serialized); + } +} + const start_sync = "\x1b[?2026h"; const up_one_line = "\x1bM"; const clear = "\x1b[J"; @@ -400,11 +437,17 @@ const Children = struct { sibling: Node.OptionalIndex, }; -fn computeRedraw() []u8 { - // TODO make this configurable - var serialized_node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; - var serialized_node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; - var serialized_node_map_buffer: [default_node_storage_buffer_len]Node.Index = undefined; +// TODO make this configurable +var serialized_node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; +var serialized_node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; +var serialized_node_map_buffer: [default_node_storage_buffer_len]Node.Index = undefined; + +const Serialized = struct { + parents: []Node.Parent, + storage: []Node.Storage, +}; + +fn serialize() Serialized { var serialized_len: usize = 0; // Iterate all of the nodes and construct a serializable copy of the state that can be examined @@ -447,12 +490,21 @@ fn computeRedraw() []u8 { }; } + return .{ + .parents = serialized_node_parents, + .storage = serialized_node_storage, + }; +} + +fn computeRedraw() []u8 { + const serialized = serialize(); + var children_buffer: [default_node_storage_buffer_len]Children = undefined; - const children = children_buffer[0..serialized_len]; + const children = children_buffer[0..serialized.parents.len]; @memset(children, .{ .child = .none, .sibling = .none }); - for (serialized_node_parents, 0..) |parent, child_index_usize| { + for (serialized.parents, 0..) |parent, child_index_usize| { const child_index: Node.Index = @enumFromInt(child_index_usize); assert(parent != .unused); const parent_index = parent.unwrap() orelse continue; @@ -478,7 +530,7 @@ fn computeRedraw() []u8 { i = computeClear(buf, i); const root_node_index: Node.Index = @enumFromInt(0); - i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, root_node_index); + i = computeNode(buf, i, serialized, children, root_node_index); // Truncate trailing newline. if (buf[i - 1] == '\n') i -= 1; @@ -492,15 +544,14 @@ fn computeRedraw() []u8 { fn computePrefix( buf: []u8, start_i: usize, - serialized_node_storage: []const Node.Storage, - serialized_node_parents: []const Node.Parent, + serialized: Serialized, children: []const Children, node_index: Node.Index, ) usize { var i = start_i; - const parent_index = serialized_node_parents[@intFromEnum(node_index)].unwrap() orelse return i; - if (serialized_node_parents[@intFromEnum(parent_index)] == .none) return i; - i = computePrefix(buf, i, serialized_node_storage, serialized_node_parents, children, parent_index); + const parent_index = serialized.parents[@intFromEnum(node_index)].unwrap() orelse return i; + if (serialized.parents[@intFromEnum(parent_index)] == .none) return i; + i = computePrefix(buf, i, serialized, children, parent_index); if (children[@intFromEnum(parent_index)].sibling == .none) { buf[i..][0..3].* = " ".*; i += 3; @@ -514,19 +565,18 @@ fn computePrefix( fn computeNode( buf: []u8, start_i: usize, - serialized_node_storage: []const Node.Storage, - serialized_node_parents: []const Node.Parent, + serialized: Serialized, children: []const Children, node_index: Node.Index, ) usize { var i = start_i; - i = computePrefix(buf, i, serialized_node_storage, serialized_node_parents, children, node_index); + i = computePrefix(buf, i, serialized, children, node_index); - const storage = &serialized_node_storage[@intFromEnum(node_index)]; + const storage = &serialized.storage[@intFromEnum(node_index)]; const estimated_total = storage.estimated_total_count; const completed_items = storage.completed_count; const name = if (std.mem.indexOfScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name; - const parent = serialized_node_parents[@intFromEnum(node_index)]; + const parent = serialized.parents[@intFromEnum(node_index)]; if (parent != .none) { if (children[@intFromEnum(node_index)].sibling == .none) { @@ -555,11 +605,11 @@ fn computeNode( global_progress.newline_count += 1; if (children[@intFromEnum(node_index)].child.unwrap()) |child| { - i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, child); + i = computeNode(buf, i, serialized, children, child); } if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| { - i = computeNode(buf, i, serialized_node_storage, serialized_node_parents, children, sibling); + i = computeNode(buf, i, serialized, children, sibling); } return i; @@ -572,6 +622,27 @@ fn write(buf: []const u8) void { }; } +fn writeIpc(fd: posix.fd_t, serialized: Serialized) void { + assert(serialized.parents.len == serialized.storage.len); + const header = std.mem.asBytes(&serialized.parents.len); + const storage = std.mem.sliceAsBytes(serialized.storage); + const parents = std.mem.sliceAsBytes(serialized.parents); + + var vecs: [3]std.posix.iovec_const = .{ + .{ .base = header.ptr, .len = header.len }, + .{ .base = storage.ptr, .len = storage.len }, + .{ .base = parents.ptr, .len = parents.len }, + }; + + // TODO: if big endian, byteswap + // this is needed because the parent or child process might be running in qemu + + const file: std.fs.File = .{ .handle = fd }; + file.writevAll(&vecs) catch |err| { + std.log.warn("failed to send progress to parent process: {s}", .{@errorName(err)}); + }; +} + fn maybeUpdateSize(resize_flag: bool) void { if (!resize_flag) return; diff --git a/lib/std/process.zig b/lib/std/process.zig index 5bdee4971d43..ab83953d27fe 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -431,6 +431,29 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool { } } +pub const ParseEnvVarIntError = std.fmt.ParseIntError || error{EnvironmentVariableNotFound}; + +/// Parses an environment variable as an integer. +/// +/// Since the key is comptime-known, no allocation is needed. +/// +/// On Windows, `key` must be valid UTF-8. +pub fn parseEnvVarInt(comptime key: []const u8, comptime I: type, base: u8) ParseEnvVarIntError!I { + if (native_os == .windows) { + const key_w = comptime std.unicode.utf8ToUtf16LeStringLiteral(key); + const text = getenvW(key_w) orelse return error.EnvironmentVariableNotFound; + // For this implementation perhaps std.fmt.parseInt can be expanded to be generic across + // []u8 and []u16 like how many std.mem functions work. + _ = text; + @compileError("TODO implement this"); + } else if (native_os == .wasi and !builtin.link_libc) { + @compileError("parseEnvVarInt is not supported for WASI without libc"); + } else { + const text = posix.getenv(key) orelse return error.EnvironmentVariableNotFound; + return std.fmt.parseInt(I, text, base); + } +} + pub const HasEnvVarError = error{ OutOfMemory, @@ -1790,24 +1813,61 @@ test raiseFileDescriptorLimit { raiseFileDescriptorLimit(); } -pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 { - const envp_count = env_map.count(); +pub const CreateEnvironOptions = struct { + env_map: ?*const EnvMap = null, + existing: ?[*:null]const ?[*:0]const u8 = null, + extra_usizes: []const ExtraUsize = &.{}, + + pub const ExtraUsize = struct { + name: []const u8, + value: usize, + }; +}; + +/// Creates a null-deliminated environment variable block in the format +/// expected by POSIX, by combining all the sources of key-value pairs together +/// from `options`. +pub fn createEnviron(arena: Allocator, options: CreateEnvironOptions) Allocator.Error![:null]?[*:0]u8 { + const envp_count = c: { + var count: usize = 0; + if (options.existing) |env| { + while (env[count]) |_| : (count += 1) {} + } + if (options.env_map) |env_map| { + count += env_map.count(); + } + count += options.extra_usizes.len; + break :c count; + }; const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); - { + var i: usize = 0; + + if (options.existing) |env| { + while (env[i]) |line| : (i += 1) { + envp_buf[i] = try arena.dupeZ(u8, mem.span(line)); + } + } + + for (options.extra_usizes, envp_buf[i..][0..options.extra_usizes.len]) |extra_usize, *out| { + out.* = try std.fmt.allocPrintZ(arena, "{s}={d}", .{ extra_usize.name, extra_usize.value }); + } + i += options.extra_usizes.len; + + if (options.env_map) |env_map| { var it = env_map.iterator(); - var i: usize = 0; while (it.next()) |pair| : (i += 1) { - const env_buf = try arena.allocSentinel(u8, pair.key_ptr.len + pair.value_ptr.len + 1, 0); - @memcpy(env_buf[0..pair.key_ptr.len], pair.key_ptr.*); - env_buf[pair.key_ptr.len] = '='; - @memcpy(env_buf[pair.key_ptr.len + 1 ..][0..pair.value_ptr.len], pair.value_ptr.*); - envp_buf[i] = env_buf.ptr; + envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={s}", .{ pair.key_ptr.*, pair.value_ptr.* }); } - assert(i == envp_count); } + + assert(i == envp_count); return envp_buf; } +pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 { + return createEnviron(arena, .{ .env_map = env_map }); +} + test createNullDelimitedEnvMap { const allocator = testing.allocator; var envmap = EnvMap.init(allocator); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 48ab67800e97..ef4a5c79c527 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -12,6 +12,7 @@ const EnvMap = std.process.EnvMap; const maxInt = std.math.maxInt; const assert = std.debug.assert; const native_os = builtin.os.tag; +const Allocator = std.mem.Allocator; const ChildProcess = @This(); pub const Id = switch (native_os) { @@ -92,6 +93,13 @@ request_resource_usage_statistics: bool = false, /// `spawn`. resource_usage_statistics: ResourceUsageStatistics = .{}, +/// When populated, a pipe will be created for the child process to +/// communicate progress back to the parent. The file descriptor of the +/// write end of the pipe will be specified in the `ZIG_PROGRESS` +/// environment variable inside the child process. The progress reported by +/// the child will be attached to this progress node in the parent process. +parent_progress_node: std.Progress.Node = .{ .index = .none }, + pub const ResourceUsageStatistics = struct { rusage: @TypeOf(rusage_init) = rusage_init, @@ -572,6 +580,16 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { if (any_ignore) posix.close(dev_null_fd); } + const prog_pipe: [2]posix.fd_t = p: { + if (self.parent_progress_node.index == .none) { + break :p .{ -1, -1 }; + } else { + // No CLOEXEC because the child needs access to this file descriptor. + break :p try posix.pipe2(.{}); + } + }; + errdefer destroyPipe(prog_pipe); + var arena_allocator = std.heap.ArenaAllocator.init(self.allocator); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); @@ -588,16 +606,35 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; - const envp = m: { + const envp: [*:null]const ?[*:0]const u8 = m: { + const extra_usizes: []const process.CreateEnvironOptions.ExtraUsize = if (prog_pipe[1] == -1) &.{} else &.{ + .{ .name = "ZIG_PROGRESS", .value = @intCast(prog_pipe[1]) }, + }; if (self.env_map) |env_map| { - const envp_buf = try process.createNullDelimitedEnvMap(arena, env_map); - break :m envp_buf.ptr; + break :m (try process.createEnviron(arena, .{ + .env_map = env_map, + .extra_usizes = extra_usizes, + })).ptr; } else if (builtin.link_libc) { - break :m std.c.environ; + if (extra_usizes.len == 0) { + break :m std.c.environ; + } else { + break :m (try process.createEnviron(arena, .{ + .existing = std.c.environ, + .extra_usizes = extra_usizes, + })).ptr; + } } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. - // TODO type-safety for null-termination of `os.environ`. - break :m @as([*:null]const ?[*:0]const u8, @ptrCast(std.os.environ.ptr)); + if (extra_usizes.len == 0) { + break :m @ptrCast(std.os.environ.ptr); + } else { + break :m (try process.createEnviron(arena, .{ + // TODO type-safety for null-termination of `os.environ`. + .existing = @ptrCast(std.os.environ.ptr), + .extra_usizes = extra_usizes, + })).ptr; + } } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -962,7 +999,7 @@ fn setUpChildIo(stdio: StdIo, pipe_fd: i32, std_fileno: i32, dev_null_fd: i32) ! } fn destroyPipe(pipe: [2]posix.fd_t) void { - posix.close(pipe[0]); + if (pipe[0] != -1) posix.close(pipe[0]); if (pipe[0] != pipe[1]) posix.close(pipe[1]); } From df46f5af690889508ebcb07b46c8579acaad06a2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 23 May 2024 17:01:06 -0700 Subject: [PATCH 09/60] std.Progress: include subtrees from child processes --- lib/std/Progress.zig | 137 ++++++++++++++++++++++++++++++++++---- lib/std/process/Child.zig | 29 ++++---- 2 files changed, 140 insertions(+), 26 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 199a976b6dc1..76ef51943d9f 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -83,6 +83,22 @@ pub const Node = struct { /// Little endian. estimated_total_count: u32, name: [max_name_len]u8, + + fn getIpcFd(s: Storage) ?posix.fd_t { + if (s.estimated_total_count != std.math.maxInt(u32)) + return null; + + return @bitCast(s.completed_count); + } + + fn setIpcFd(s: *Storage, fd: posix.fd_t) void { + s.estimated_total_count = std.math.maxInt(u32); + s.completed_count = @bitCast(fd); + } + + comptime { + assert((@sizeOf(Storage) % 4) == 0); + } }; const Parent = enum(u16) { @@ -201,6 +217,13 @@ pub const Node = struct { } } + /// Posix-only. Used by `std.process.Child`. + pub fn setIpcFd(node: Node, fd: posix.fd_t) void { + const index = node.index.unwrap() orelse return; + assert(fd != -1); + storageByIndex(index).setIpcFd(fd); + } + fn storageByIndex(index: Node.Index) *Node.Storage { return &global_progress.node_storage[@intFromEnum(index)]; } @@ -475,14 +498,8 @@ fn serialize() Serialized { } } - // Now we can analyze our copy of the graph without atomics, reconstructing - // children lists which do not exist in the canonical data. These are - // needed for tree traversal below. - const serialized_node_parents = serialized_node_parents_buffer[0..serialized_len]; - const serialized_node_storage = serialized_node_storage_buffer[0..serialized_len]; - // Remap parents to point inside serialized arrays. - for (serialized_node_parents) |*parent| { + for (serialized_node_parents_buffer[0..serialized_len]) |*parent| { parent.* = switch (parent.*) { .unused => unreachable, .none => .none, @@ -490,15 +507,99 @@ fn serialize() Serialized { }; } + // Find nodes which correspond to child processes. + var pipe_buf: [4096]u8 align(4) = undefined; + + for ( + serialized_node_parents_buffer[0..serialized_len], + serialized_node_storage_buffer[0..serialized_len], + 0.., + ) |main_parent, *main_storage, main_index| { + if (main_parent == .unused) continue; + const fd = main_storage.getIpcFd() orelse continue; + var bytes_read: usize = 0; + while (true) { + bytes_read += posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) { + error.WouldBlock => break, + else => |e| { + std.log.warn("failed to read child progress data: {s}", .{@errorName(e)}); + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + continue; + }, + }; + } + // Ignore all but the last message on the pipe. + var input: []align(2) u8 = pipe_buf[0..bytes_read]; + if (input.len == 0) { + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + continue; + } + + const storage, const parents = while (true) { + if (input.len < 4) { + std.log.warn("short read: {d} out of 4 header bytes", .{input.len}); + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + continue; + } + const subtree_len = std.mem.readInt(u32, input[0..4], .little); + const expected_bytes = 4 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent)); + if (input.len < expected_bytes) { + std.log.warn("short read: {d} out of {d} ({d} nodes)", .{ input.len, expected_bytes, subtree_len }); + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + continue; + } + if (input.len > expected_bytes) { + input = @alignCast(input[expected_bytes..]); + continue; + } + const storage_bytes = input[4..][0 .. subtree_len * @sizeOf(Node.Storage)]; + const parents_bytes = input[4 + storage_bytes.len ..][0 .. subtree_len * @sizeOf(Node.Parent)]; + break .{ + std.mem.bytesAsSlice(Node.Storage, storage_bytes), + std.mem.bytesAsSlice(Node.Parent, parents_bytes), + }; + }; + + // Mount the root here. + main_storage.* = storage[0]; + + // Copy the rest of the tree to the end. + @memcpy(serialized_node_storage_buffer[serialized_len..][0 .. storage.len - 1], storage[1..]); + + // Patch up parent pointers taking into account how the subtree is mounted. + serialized_node_parents_buffer[serialized_len] = .none; + + for (serialized_node_parents_buffer[serialized_len..][0 .. parents.len - 1], parents[1..]) |*dest, p| { + dest.* = switch (p) { + // Fix bad data so the rest of the code does not see `unused`. + .none, .unused => .none, + // Root node is being mounted here. + @as(Node.Parent, @enumFromInt(0)) => @enumFromInt(main_index), + // Other nodes mounted at the end. + _ => |off| @enumFromInt(serialized_len + @intFromEnum(off) - 1), + }; + } + + serialized_len += storage.len - 1; + } + return .{ - .parents = serialized_node_parents, - .storage = serialized_node_storage, + .parents = serialized_node_parents_buffer[0..serialized_len], + .storage = serialized_node_storage_buffer[0..serialized_len], }; } fn computeRedraw() []u8 { const serialized = serialize(); + // Now we can analyze our copy of the graph without atomics, reconstructing + // children lists which do not exist in the canonical data. These are + // needed for tree traversal below. + var children_buffer: [default_node_storage_buffer_len]Children = undefined; const children = children_buffer[0..serialized.parents.len]; @@ -624,7 +725,8 @@ fn write(buf: []const u8) void { fn writeIpc(fd: posix.fd_t, serialized: Serialized) void { assert(serialized.parents.len == serialized.storage.len); - const header = std.mem.asBytes(&serialized.parents.len); + const serialized_len: u32 = @intCast(serialized.parents.len); + const header = std.mem.asBytes(&serialized_len); const storage = std.mem.sliceAsBytes(serialized.storage); const parents = std.mem.sliceAsBytes(serialized.parents); @@ -637,10 +739,17 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) void { // TODO: if big endian, byteswap // this is needed because the parent or child process might be running in qemu - const file: std.fs.File = .{ .handle = fd }; - file.writevAll(&vecs) catch |err| { - std.log.warn("failed to send progress to parent process: {s}", .{@errorName(err)}); - }; + // If this write would block we do not want to keep trying, but we need to + // know if a partial message was written. + if (posix.writev(fd, &vecs)) |written| { + const total = header.len + storage.len + parents.len; + if (written < total) { + std.log.warn("short write: {d} out of {d}", .{ written, total }); + } + } else |err| switch (err) { + error.WouldBlock => {}, + else => |e| std.log.warn("failed to send progress to parent process: {s}", .{@errorName(e)}), + } } fn maybeUpdateSize(resize_flag: bool) void { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index ef4a5c79c527..09664876ddc2 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -98,7 +98,10 @@ resource_usage_statistics: ResourceUsageStatistics = .{}, /// write end of the pipe will be specified in the `ZIG_PROGRESS` /// environment variable inside the child process. The progress reported by /// the child will be attached to this progress node in the parent process. -parent_progress_node: std.Progress.Node = .{ .index = .none }, +/// +/// The child's progress tree will be grafted into the parent's progress tree, +/// by substituting this node with the child's root node. +progress_node: std.Progress.Node = .{ .index = .none }, pub const ResourceUsageStatistics = struct { rusage: @TypeOf(rusage_init) = rusage_init, @@ -581,11 +584,11 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { } const prog_pipe: [2]posix.fd_t = p: { - if (self.parent_progress_node.index == .none) { + if (self.progress_node.index == .none) { break :p .{ -1, -1 }; } else { // No CLOEXEC because the child needs access to this file descriptor. - break :p try posix.pipe2(.{}); + break :p try posix.pipe2(.{ .NONBLOCK = true }); } }; errdefer destroyPipe(prog_pipe); @@ -685,18 +688,18 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { // we are the parent const pid: i32 = @intCast(pid_result); - if (self.stdin_behavior == StdIo.Pipe) { - self.stdin = File{ .handle = stdin_pipe[1] }; + if (self.stdin_behavior == .Pipe) { + self.stdin = .{ .handle = stdin_pipe[1] }; } else { self.stdin = null; } - if (self.stdout_behavior == StdIo.Pipe) { - self.stdout = File{ .handle = stdout_pipe[0] }; + if (self.stdout_behavior == .Pipe) { + self.stdout = .{ .handle = stdout_pipe[0] }; } else { self.stdout = null; } - if (self.stderr_behavior == StdIo.Pipe) { - self.stderr = File{ .handle = stderr_pipe[0] }; + if (self.stderr_behavior == .Pipe) { + self.stderr = .{ .handle = stderr_pipe[0] }; } else { self.stderr = null; } @@ -705,15 +708,17 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { self.err_pipe = err_pipe; self.term = null; - if (self.stdin_behavior == StdIo.Pipe) { + if (self.stdin_behavior == .Pipe) { posix.close(stdin_pipe[0]); } - if (self.stdout_behavior == StdIo.Pipe) { + if (self.stdout_behavior == .Pipe) { posix.close(stdout_pipe[1]); } - if (self.stderr_behavior == StdIo.Pipe) { + if (self.stderr_behavior == .Pipe) { posix.close(stderr_pipe[1]); } + + self.progress_node.setIpcFd(prog_pipe[0]); } fn spawnWindows(self: *ChildProcess) SpawnError!void { From 3a768bd6ce9e891ad258b32fb2dd0ed912ef9e9b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 23 May 2024 18:45:04 -0700 Subject: [PATCH 10/60] std.Progress: save a copy of IPC data so that the previous message can be used when the pipe is empty. prevents flickering --- lib/std/Progress.zig | 90 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 80 insertions(+), 10 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 76ef51943d9f..4efb8bb194c8 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -472,6 +472,7 @@ const Serialized = struct { fn serialize() Serialized { var serialized_len: usize = 0; + var any_ipc = false; // Iterate all of the nodes and construct a serializable copy of the state that can be examined // without atomics. @@ -486,6 +487,8 @@ fn serialize() Serialized { dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .monotonic); + any_ipc = any_ipc or dest_storage.getIpcFd() != null; + const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); if (begin_parent == end_parent) { serialized_node_parents_buffer[serialized_len] = begin_parent; @@ -508,6 +511,32 @@ fn serialize() Serialized { } // Find nodes which correspond to child processes. + if (any_ipc) + serialized_len = serializeIpc(serialized_len); + + return .{ + .parents = serialized_node_parents_buffer[0..serialized_len], + .storage = serialized_node_storage_buffer[0..serialized_len], + }; +} + +var parents_copy: [default_node_storage_buffer_len]Node.Parent = undefined; +var storage_copy: [default_node_storage_buffer_len]Node.Storage = undefined; + +const SavedMetadata = extern struct { + start_index: u16, + nodes_len: u16, + main_index: u16, + flags: Flags, + + const Flags = enum(u16) { + saved = std.math.maxInt(u16), + _, + }; +}; + +fn serializeIpc(start_serialized_len: usize) usize { + var serialized_len = start_serialized_len; var pipe_buf: [4096]u8 align(4) = undefined; for ( @@ -532,24 +561,23 @@ fn serialize() Serialized { // Ignore all but the last message on the pipe. var input: []align(2) u8 = pipe_buf[0..bytes_read]; if (input.len == 0) { - main_storage.completed_count = 0; - main_storage.estimated_total_count = 0; + serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); continue; } const storage, const parents = while (true) { if (input.len < 4) { std.log.warn("short read: {d} out of 4 header bytes", .{input.len}); - main_storage.completed_count = 0; - main_storage.estimated_total_count = 0; + // TODO keep track of the short read to trash odd bytes with the next read + serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); continue; } const subtree_len = std.mem.readInt(u32, input[0..4], .little); const expected_bytes = 4 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent)); if (input.len < expected_bytes) { std.log.warn("short read: {d} out of {d} ({d} nodes)", .{ input.len, expected_bytes, subtree_len }); - main_storage.completed_count = 0; - main_storage.estimated_total_count = 0; + // TODO keep track of the short read to trash odd bytes with the next read + serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); continue; } if (input.len > expected_bytes) { @@ -564,6 +592,14 @@ fn serialize() Serialized { }; }; + // Remember in case the pipe is empty on next update. + @as(*SavedMetadata, @ptrCast(&main_storage.name)).* = .{ + .start_index = @intCast(serialized_len), + .nodes_len = @intCast(parents.len), + .main_index = @intCast(main_index), + .flags = .saved, + }; + // Mount the root here. main_storage.* = storage[0]; @@ -580,6 +616,7 @@ fn serialize() Serialized { // Root node is being mounted here. @as(Node.Parent, @enumFromInt(0)) => @enumFromInt(main_index), // Other nodes mounted at the end. + // TODO check for bad data pointing outside the expected range _ => |off| @enumFromInt(serialized_len + @intFromEnum(off) - 1), }; } @@ -587,10 +624,43 @@ fn serialize() Serialized { serialized_len += storage.len - 1; } - return .{ - .parents = serialized_node_parents_buffer[0..serialized_len], - .storage = serialized_node_storage_buffer[0..serialized_len], - }; + // Save a copy in case any pipes are empty on the next update. + @memcpy(parents_copy[0..serialized_len], serialized_node_parents_buffer[0..serialized_len]); + @memcpy(storage_copy[0..serialized_len], serialized_node_storage_buffer[0..serialized_len]); + + return serialized_len; +} + +fn useSavedIpcData(start_serialized_len: usize, main_storage: *Node.Storage, main_index: usize) usize { + const saved_metadata: *SavedMetadata = @ptrCast(&main_storage.name); + if (saved_metadata.flags != .saved) { + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + return start_serialized_len; + } + + const start_index = saved_metadata.start_index; + const nodes_len = saved_metadata.nodes_len; + const old_main_index = saved_metadata.main_index; + + const parents = parents_copy[start_index..][0 .. nodes_len - 1]; + const storage = storage_copy[start_index..][0 .. nodes_len - 1]; + + main_storage.* = storage_copy[old_main_index]; + + @memcpy(serialized_node_storage_buffer[start_serialized_len..][0..storage.len], storage); + + for (serialized_node_parents_buffer[start_serialized_len..][0..parents.len], parents) |*dest, p| { + dest.* = switch (p) { + .none, .unused => .none, + _ => |prev| @enumFromInt(if (@intFromEnum(prev) == old_main_index) + main_index + else + @intFromEnum(prev) - start_index + start_serialized_len), + }; + } + + return start_serialized_len + storage.len; } fn computeRedraw() []u8 { From c01cfde6882840980cbf63593eafb5e1090b1ff0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 23 May 2024 20:22:58 -0700 Subject: [PATCH 11/60] std.process.Child: fix ZIG_PROGRESS env var handling and properly dup2 the file descriptor to make it handle the case when other files are already open --- lib/std/process.zig | 140 +++++++++++++++++++++++++++++--------- lib/std/process/Child.zig | 64 +++++++++-------- 2 files changed, 138 insertions(+), 66 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index ab83953d27fe..e9de2cf51791 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1814,58 +1814,132 @@ test raiseFileDescriptorLimit { } pub const CreateEnvironOptions = struct { - env_map: ?*const EnvMap = null, - existing: ?[*:null]const ?[*:0]const u8 = null, - extra_usizes: []const ExtraUsize = &.{}, - - pub const ExtraUsize = struct { - name: []const u8, - value: usize, - }; + /// `null` means to leave the `ZIG_PROGRESS` environment variable unmodified. + /// If non-null, negative means to remove the environment variable, and >= 0 + /// means to provide it with the given integer. + zig_progress_fd: ?i32 = null, }; /// Creates a null-deliminated environment variable block in the format -/// expected by POSIX, by combining all the sources of key-value pairs together -/// from `options`. -pub fn createEnviron(arena: Allocator, options: CreateEnvironOptions) Allocator.Error![:null]?[*:0]u8 { - const envp_count = c: { - var count: usize = 0; - if (options.existing) |env| { - while (env[count]) |_| : (count += 1) {} - } - if (options.env_map) |env_map| { - count += env_map.count(); +/// expected by POSIX, from a hash map plus options. +pub fn createEnvironFromMap( + arena: Allocator, + map: *const EnvMap, + options: CreateEnvironOptions, +) Allocator.Error![:null]?[*:0]u8 { + const ZigProgressAction = enum { nothing, edit, delete, add }; + const zig_progress_action: ZigProgressAction = a: { + const fd = options.zig_progress_fd orelse break :a .nothing; + const contains = map.get("ZIG_PROGRESS") != null; + if (fd >= 0) { + break :a if (contains) .edit else .add; + } else { + if (contains) break :a .delete; } - count += options.extra_usizes.len; - break :c count; + break :a .nothing; }; + + const envp_count: usize = @intCast(@as(isize, map.count()) + @as(isize, switch (zig_progress_action) { + .add => 1, + .delete => -1, + .nothing, .edit => 0, + })); + const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); var i: usize = 0; - if (options.existing) |env| { - while (env[i]) |line| : (i += 1) { - envp_buf[i] = try arena.dupeZ(u8, mem.span(line)); - } + if (zig_progress_action == .add) { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}); + i += 1; } - for (options.extra_usizes, envp_buf[i..][0..options.extra_usizes.len]) |extra_usize, *out| { - out.* = try std.fmt.allocPrintZ(arena, "{s}={d}", .{ extra_usize.name, extra_usize.value }); - } - i += options.extra_usizes.len; + { + var it = map.iterator(); + while (it.next()) |pair| { + if (mem.eql(u8, pair.key_ptr.*, "ZIG_PROGRESS")) switch (zig_progress_action) { + .add => unreachable, + .delete => continue, + .edit => { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={d}", .{ + pair.key_ptr.*, options.zig_progress_fd.?, + }); + i += 1; + continue; + }, + .nothing => {}, + }; - if (options.env_map) |env_map| { - var it = env_map.iterator(); - while (it.next()) |pair| : (i += 1) { envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={s}", .{ pair.key_ptr.*, pair.value_ptr.* }); + i += 1; + } + } + + assert(i == envp_count); + return envp_buf; +} + +/// Creates a null-deliminated environment variable block in the format +/// expected by POSIX, from a hash map plus options. +pub fn createEnvironFromExisting( + arena: Allocator, + existing: [*:null]const ?[*:0]const u8, + options: CreateEnvironOptions, +) Allocator.Error![:null]?[*:0]u8 { + const existing_count, const contains_zig_progress = c: { + var count: usize = 0; + var contains = false; + while (existing[count]) |line| : (count += 1) { + contains = contains or mem.eql(u8, mem.sliceTo(line, '='), "ZIG_PROGRESS"); } + break :c .{ count, contains }; + }; + const ZigProgressAction = enum { nothing, edit, delete, add }; + const zig_progress_action: ZigProgressAction = a: { + const fd = options.zig_progress_fd orelse break :a .nothing; + if (fd >= 0) { + break :a if (contains_zig_progress) .edit else .add; + } else { + if (contains_zig_progress) break :a .delete; + } + break :a .nothing; + }; + + const envp_count: usize = @intCast(@as(isize, @intCast(existing_count)) + @as(isize, switch (zig_progress_action) { + .add => 1, + .delete => -1, + .nothing, .edit => 0, + })); + + const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); + var i: usize = 0; + var existing_index: usize = 0; + + if (zig_progress_action == .add) { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}); + i += 1; + } + + while (existing[existing_index]) |line| : (existing_index += 1) { + if (mem.eql(u8, mem.sliceTo(line, '='), "ZIG_PROGRESS")) switch (zig_progress_action) { + .add => unreachable, + .delete => continue, + .edit => { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}); + i += 1; + continue; + }, + .nothing => {}, + }; + envp_buf[i] = try arena.dupeZ(u8, mem.span(line)); + i += 1; } assert(i == envp_count); return envp_buf; } -pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 { - return createEnviron(arena, .{ .env_map = env_map }); +pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) Allocator.Error![:null]?[*:0]u8 { + return createEnvironFromMap(arena, env_map, .{}); } test createNullDelimitedEnvMap { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 09664876ddc2..c1f935a852f0 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -216,9 +216,9 @@ pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess { .stdin = null, .stdout = null, .stderr = null, - .stdin_behavior = StdIo.Inherit, - .stdout_behavior = StdIo.Inherit, - .stderr_behavior = StdIo.Inherit, + .stdin_behavior = .Inherit, + .stdout_behavior = .Inherit, + .stderr_behavior = .Inherit, .expand_arg0 = .no_expand, }; } @@ -549,22 +549,22 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { // turns out, we `dup2` everything anyway, so there's no need! const pipe_flags: posix.O = .{ .CLOEXEC = true }; - const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined; - errdefer if (self.stdin_behavior == StdIo.Pipe) { + const stdin_pipe = if (self.stdin_behavior == .Pipe) try posix.pipe2(pipe_flags) else undefined; + errdefer if (self.stdin_behavior == .Pipe) { destroyPipe(stdin_pipe); }; - const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined; - errdefer if (self.stdout_behavior == StdIo.Pipe) { + const stdout_pipe = if (self.stdout_behavior == .Pipe) try posix.pipe2(pipe_flags) else undefined; + errdefer if (self.stdout_behavior == .Pipe) { destroyPipe(stdout_pipe); }; - const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined; - errdefer if (self.stderr_behavior == StdIo.Pipe) { + const stderr_pipe = if (self.stderr_behavior == .Pipe) try posix.pipe2(pipe_flags) else undefined; + errdefer if (self.stderr_behavior == .Pipe) { destroyPipe(stderr_pipe); }; - const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); + const any_ignore = (self.stdin_behavior == .Ignore or self.stdout_behavior == .Ignore or self.stderr_behavior == .Ignore); const dev_null_fd = if (any_ignore) posix.openZ("/dev/null", .{ .ACCMODE = .RDWR }, 0) catch |err| switch (err) { error.PathAlreadyExists => unreachable, @@ -609,35 +609,24 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; + const prog_fileno = 3; + const envp: [*:null]const ?[*:0]const u8 = m: { - const extra_usizes: []const process.CreateEnvironOptions.ExtraUsize = if (prog_pipe[1] == -1) &.{} else &.{ - .{ .name = "ZIG_PROGRESS", .value = @intCast(prog_pipe[1]) }, - }; + const prog_fd: i32 = if (prog_pipe[1] == -1) -1 else prog_fileno; if (self.env_map) |env_map| { - break :m (try process.createEnviron(arena, .{ - .env_map = env_map, - .extra_usizes = extra_usizes, + break :m (try process.createEnvironFromMap(arena, env_map, .{ + .zig_progress_fd = prog_fd, })).ptr; } else if (builtin.link_libc) { - if (extra_usizes.len == 0) { - break :m std.c.environ; - } else { - break :m (try process.createEnviron(arena, .{ - .existing = std.c.environ, - .extra_usizes = extra_usizes, - })).ptr; - } + break :m (try process.createEnvironFromExisting(arena, std.c.environ, .{ + .zig_progress_fd = prog_fd, + })).ptr; } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. - if (extra_usizes.len == 0) { - break :m @ptrCast(std.os.environ.ptr); - } else { - break :m (try process.createEnviron(arena, .{ - // TODO type-safety for null-termination of `os.environ`. - .existing = @ptrCast(std.os.environ.ptr), - .extra_usizes = extra_usizes, - })).ptr; - } + // TODO type-safety for null-termination of `os.environ`. + break :m (try process.createEnvironFromExisting(arena, @ptrCast(std.os.environ.ptr), .{ + .zig_progress_fd = prog_fd, + })).ptr; } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -664,6 +653,12 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); + if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); + + if (prog_pipe[1] != -1) { + if (prog_pipe[0] != prog_fileno) posix.close(prog_pipe[0]); + if (prog_pipe[1] != prog_fileno) posix.close(prog_pipe[1]); + } if (self.cwd_dir) |cwd| { posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); @@ -718,6 +713,9 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { posix.close(stderr_pipe[1]); } + if (prog_pipe[1] != -1) { + posix.close(prog_pipe[1]); + } self.progress_node.setIpcFd(prog_pipe[0]); } From f6873c6b00544923d5699737651f2bc4fe29fd06 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 23 May 2024 21:04:43 -0700 Subject: [PATCH 12/60] std.Progress: fix using saved IPC data also fix handling of BrokenPipe also fix continuing wrong loop in error conditions --- lib/std/Progress.zig | 51 +++++++++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 4efb8bb194c8..b7ac1fa328f3 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -88,7 +88,14 @@ pub const Node = struct { if (s.estimated_total_count != std.math.maxInt(u32)) return null; - return @bitCast(s.completed_count); + const low: u16 = @truncate(s.completed_count); + return low; + } + + fn getMainStorageIndex(s: Storage) Node.Index { + assert(s.estimated_total_count == std.math.maxInt(u32)); + const i: u16 = @truncate(s.completed_count >> 16); + return @enumFromInt(i); } fn setIpcFd(s: *Storage, fd: posix.fd_t) void { @@ -387,7 +394,7 @@ fn updateThreadRun() void { } } -fn ipcThreadRun(fd: posix.fd_t) void { +fn ipcThreadRun(fd: posix.fd_t) anyerror!void { { _ = wait(global_progress.initial_delay_ns); @@ -395,7 +402,9 @@ fn ipcThreadRun(fd: posix.fd_t) void { return; const serialized = serialize(); - writeIpc(fd, serialized); + writeIpc(fd, serialized) catch |err| switch (err) { + error.BrokenPipe => return, + }; } while (true) { @@ -405,7 +414,9 @@ fn ipcThreadRun(fd: posix.fd_t) void { return clearTerminal(); const serialized = serialize(); - writeIpc(fd, serialized); + writeIpc(fd, serialized) catch |err| switch (err) { + error.BrokenPipe => return, + }; } } @@ -487,7 +498,10 @@ fn serialize() Serialized { dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .monotonic); - any_ipc = any_ipc or dest_storage.getIpcFd() != null; + if (dest_storage.getIpcFd() != null) { + any_ipc = true; + dest_storage.completed_count |= @as(u32, @intCast(i)) << 16; + } const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); if (begin_parent == end_parent) { @@ -539,7 +553,7 @@ fn serializeIpc(start_serialized_len: usize) usize { var serialized_len = start_serialized_len; var pipe_buf: [4096]u8 align(4) = undefined; - for ( + main_loop: for ( serialized_node_parents_buffer[0..serialized_len], serialized_node_storage_buffer[0..serialized_len], 0.., @@ -554,7 +568,7 @@ fn serializeIpc(start_serialized_len: usize) usize { std.log.warn("failed to read child progress data: {s}", .{@errorName(e)}); main_storage.completed_count = 0; main_storage.estimated_total_count = 0; - continue; + continue :main_loop; }, }; } @@ -570,7 +584,7 @@ fn serializeIpc(start_serialized_len: usize) usize { std.log.warn("short read: {d} out of 4 header bytes", .{input.len}); // TODO keep track of the short read to trash odd bytes with the next read serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); - continue; + continue :main_loop; } const subtree_len = std.mem.readInt(u32, input[0..4], .little); const expected_bytes = 4 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent)); @@ -578,7 +592,7 @@ fn serializeIpc(start_serialized_len: usize) usize { std.log.warn("short read: {d} out of {d} ({d} nodes)", .{ input.len, expected_bytes, subtree_len }); // TODO keep track of the short read to trash odd bytes with the next read serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); - continue; + continue :main_loop; } if (input.len > expected_bytes) { input = @alignCast(input[expected_bytes..]); @@ -593,7 +607,8 @@ fn serializeIpc(start_serialized_len: usize) usize { }; // Remember in case the pipe is empty on next update. - @as(*SavedMetadata, @ptrCast(&main_storage.name)).* = .{ + const real_storage: *Node.Storage = Node.storageByIndex(main_storage.getMainStorageIndex()); + @as(*SavedMetadata, @ptrCast(&real_storage.name)).* = .{ .start_index = @intCast(serialized_len), .nodes_len = @intCast(parents.len), .main_index = @intCast(main_index), @@ -643,6 +658,14 @@ fn useSavedIpcData(start_serialized_len: usize, main_storage: *Node.Storage, mai const nodes_len = saved_metadata.nodes_len; const old_main_index = saved_metadata.main_index; + const real_storage: *Node.Storage = Node.storageByIndex(main_storage.getMainStorageIndex()); + @as(*SavedMetadata, @ptrCast(&real_storage.name)).* = .{ + .start_index = @intCast(start_serialized_len), + .nodes_len = nodes_len, + .main_index = @intCast(main_index), + .flags = .saved, + }; + const parents = parents_copy[start_index..][0 .. nodes_len - 1]; const storage = storage_copy[start_index..][0 .. nodes_len - 1]; @@ -793,7 +816,7 @@ fn write(buf: []const u8) void { }; } -fn writeIpc(fd: posix.fd_t, serialized: Serialized) void { +fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { assert(serialized.parents.len == serialized.storage.len); const serialized_len: u32 = @intCast(serialized.parents.len); const header = std.mem.asBytes(&serialized_len); @@ -818,7 +841,11 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) void { } } else |err| switch (err) { error.WouldBlock => {}, - else => |e| std.log.warn("failed to send progress to parent process: {s}", .{@errorName(e)}), + error.BrokenPipe => return error.BrokenPipe, + else => |e| { + std.log.warn("failed to send progress to parent process: {s}", .{@errorName(e)}); + return error.BrokenPipe; + }, } } From f97c2f28fdc3061bc7e30ccfcafaccbee77993b6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 08:22:47 -0700 Subject: [PATCH 13/60] update the codebase for the new std.Progress API --- lib/compiler/aro/aro/Diagnostics.zig | 4 +- lib/compiler/build_runner.zig | 20 ++- lib/std/Build.zig | 10 +- lib/std/Build/Step.zig | 22 ++- lib/std/Build/Step/CheckFile.zig | 2 +- lib/std/Build/Step/CheckObject.zig | 2 +- lib/std/Build/Step/Compile.zig | 6 +- lib/std/Build/Step/ConfigHeader.zig | 2 +- lib/std/Build/Step/Fmt.zig | 2 +- lib/std/Build/Step/InstallArtifact.zig | 2 +- lib/std/Build/Step/InstallDir.zig | 2 +- lib/std/Build/Step/InstallFile.zig | 2 +- lib/std/Build/Step/ObjCopy.zig | 2 +- lib/std/Build/Step/Options.zig | 2 +- lib/std/Build/Step/RemoveDir.zig | 2 +- lib/std/Build/Step/Run.zig | 18 ++- lib/std/Build/Step/TranslateC.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 2 +- lib/std/Progress.zig | 43 +++++- lib/std/debug.zig | 21 ++- lib/std/json/dynamic.zig | 4 +- lib/std/log.zig | 8 +- lib/std/zig/ErrorBundle.zig | 4 +- lib/std/zig/Server.zig | 2 - src/Compilation.zig | 101 +++++-------- src/Module.zig | 6 +- src/Package/Fetch.zig | 14 +- src/glibc.zig | 6 +- src/libcxx.zig | 4 +- src/libtsan.zig | 2 +- src/libunwind.zig | 2 +- src/link.zig | 8 +- src/link/C.zig | 7 +- src/link/Coff.zig | 7 +- src/link/Coff/lld.zig | 6 +- src/link/Elf.zig | 13 +- src/link/MachO.zig | 7 +- src/link/NvPtx.zig | 4 +- src/link/Plan9.zig | 7 +- src/link/SpirV.zig | 11 +- src/link/SpirV/deduplicate.zig | 5 +- src/link/SpirV/lower_invocation_globals.zig | 5 +- src/link/SpirV/prune_unused.zig | 5 +- src/link/Wasm.zig | 13 +- src/main.zig | 150 ++------------------ src/mingw.zig | 6 +- src/musl.zig | 2 +- src/wasi_libc.zig | 2 +- test/standalone/cmakedefine/build.zig | 2 +- 49 files changed, 226 insertions(+), 355 deletions(-) diff --git a/lib/compiler/aro/aro/Diagnostics.zig b/lib/compiler/aro/aro/Diagnostics.zig index f894403648b4..8f80e4393dc8 100644 --- a/lib/compiler/aro/aro/Diagnostics.zig +++ b/lib/compiler/aro/aro/Diagnostics.zig @@ -528,7 +528,7 @@ const MsgWriter = struct { config: std.io.tty.Config, fn init(config: std.io.tty.Config) MsgWriter { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); return .{ .w = std.io.bufferedWriter(std.io.getStdErr().writer()), .config = config, @@ -537,7 +537,7 @@ const MsgWriter = struct { pub fn deinit(m: *MsgWriter) void { m.w.flush() catch {}; - std.debug.getStderrMutex().unlock(); + std.debug.unlockStdErr(); } pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void { diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 35dcbb882bda..d4c96fe08b59 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -289,8 +289,7 @@ pub fn main() !void { .windows_api => {}, } - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const main_progress_node = progress.start("", 0); + const main_progress_node = std.Progress.start(.{}); builder.debug_log_scopes = debug_log_scopes.items; builder.resolveInstallPrefix(install_prefix, dir_list); @@ -385,7 +384,7 @@ fn runStepNames( arena: std.mem.Allocator, b: *std.Build, step_names: []const []const u8, - parent_prog_node: *std.Progress.Node, + parent_prog_node: std.Progress.Node, thread_pool_options: std.Thread.Pool.Options, run: *Run, seed: u32, @@ -452,7 +451,7 @@ fn runStepNames( { defer parent_prog_node.end(); - var step_prog = parent_prog_node.start("steps", step_stack.count()); + const step_prog = parent_prog_node.start("steps", step_stack.count()); defer step_prog.end(); var wait_group: std.Thread.WaitGroup = .{}; @@ -467,7 +466,7 @@ fn runStepNames( if (step.state == .skipped_oom) continue; thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{ - &wait_group, &thread_pool, b, step, &step_prog, run, + &wait_group, &thread_pool, b, step, step_prog, run, }); } } @@ -891,7 +890,7 @@ fn workerMakeOneStep( thread_pool: *std.Thread.Pool, b: *std.Build, s: *Step, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, run: *Run, ) void { // First, check the conditions for running this step. If they are not met, @@ -941,11 +940,10 @@ fn workerMakeOneStep( } } - var sub_prog_node = prog_node.start(s.name, 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start(s.name, 0); defer sub_prog_node.end(); - const make_result = s.make(&sub_prog_node); + const make_result = s.make(sub_prog_node); // No matter the result, we want to display error/warning messages. const show_compile_errors = !run.prominent_compile_errors and @@ -954,8 +952,8 @@ fn workerMakeOneStep( const show_stderr = s.result_stderr.len > 0; if (show_error_msgs or show_compile_errors or show_stderr) { - sub_prog_node.context.lock_stderr(); - defer sub_prog_node.context.unlock_stderr(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); printErrorMessages(b, s, run) catch {}; } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index a2e8a7c564e1..4443fa404c74 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1059,7 +1059,7 @@ pub fn getUninstallStep(b: *Build) *Step { return &b.uninstall_tls.step; } -fn makeUninstall(uninstall_step: *Step, prog_node: *std.Progress.Node) anyerror!void { +fn makeUninstall(uninstall_step: *Step, prog_node: std.Progress.Node) anyerror!void { _ = prog_node; const uninstall_tls: *TopLevelStep = @fieldParentPtr("step", uninstall_step); const b: *Build = @fieldParentPtr("uninstall_tls", uninstall_tls); @@ -2281,10 +2281,10 @@ pub const LazyPath = union(enum) { .cwd_relative => |p| return src_builder.pathFromCwd(p), .generated => |gen| { var file_path: []const u8 = gen.file.step.owner.pathFromRoot(gen.file.path orelse { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); const stderr = std.io.getStdErr(); dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {}; - std.debug.getStderrMutex().unlock(); + std.debug.unlockStdErr(); @panic("misconfigured build script"); }); @@ -2351,8 +2351,8 @@ fn dumpBadDirnameHelp( comptime msg: []const u8, args: anytype, ) anyerror!void { - debug.getStderrMutex().lock(); - defer debug.getStderrMutex().unlock(); + debug.lockStdErr(); + defer debug.unlockStdErr(); const stderr = io.getStdErr(); const w = stderr.writer(); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 01bea6c0cef5..a9657358435e 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -58,7 +58,7 @@ pub const TestResults = struct { } }; -pub const MakeFn = *const fn (step: *Step, prog_node: *std.Progress.Node) anyerror!void; +pub const MakeFn = *const fn (step: *Step, prog_node: std.Progress.Node) anyerror!void; pub const State = enum { precheck_unstarted, @@ -176,7 +176,7 @@ pub fn init(options: StepOptions) Step { /// If the Step's `make` function reports `error.MakeFailed`, it indicates they /// have already reported the error. Otherwise, we add a simple error report /// here. -pub fn make(s: *Step, prog_node: *std.Progress.Node) error{ MakeFailed, MakeSkipped }!void { +pub fn make(s: *Step, prog_node: std.Progress.Node) error{ MakeFailed, MakeSkipped }!void { const arena = s.owner.allocator; s.makeFn(s, prog_node) catch |err| switch (err) { @@ -217,7 +217,7 @@ pub fn getStackTrace(s: *Step) ?std.builtin.StackTrace { }; } -fn makeNoOp(step: *Step, prog_node: *std.Progress.Node) anyerror!void { +fn makeNoOp(step: *Step, prog_node: std.Progress.Node) anyerror!void { _ = prog_node; var all_cached = true; @@ -303,7 +303,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO pub fn evalZigProcess( s: *Step, argv: []const []const u8, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !?[]const u8 { assert(argv.len != 0); const b = s.owner; @@ -313,12 +313,16 @@ pub fn evalZigProcess( try handleChildProcUnsupported(s, null, argv); try handleVerbose(s.owner, null, argv); + const sub_prog_node = prog_node.start("", 0); + defer sub_prog_node.end(); + var child = std.process.Child.init(argv, arena); child.env_map = &b.graph.env_map; child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; child.request_resource_usage_statistics = true; + child.progress_node = sub_prog_node; child.spawn() catch |err| return s.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err), @@ -337,11 +341,6 @@ pub fn evalZigProcess( const Header = std.zig.Server.Message.Header; var result: ?[]const u8 = null; - var node_name: std.ArrayListUnmanaged(u8) = .{}; - defer node_name.deinit(gpa); - var sub_prog_node = prog_node.start("", 0); - defer sub_prog_node.end(); - const stdout = poller.fifo(.stdout); poll: while (true) { @@ -379,11 +378,6 @@ pub fn evalZigProcess( .extra = extra_array, }; }, - .progress => { - node_name.clearRetainingCapacity(); - try node_name.appendSlice(gpa, body); - sub_prog_node.setName(node_name.items); - }, .emit_bin_path => { const EbpHdr = std.zig.Server.Message.EmitBinPath; const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body)); diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index b3323f9e98b9..b7ce2ded6136 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -46,7 +46,7 @@ pub fn setName(check_file: *CheckFile, name: []const u8) void { check_file.step.name = name; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const check_file: *CheckFile = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index fa0ccc339d3d..84c9c62abb40 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -550,7 +550,7 @@ pub fn checkComputeCompare( check_object.checks.append(check) catch @panic("OOM"); } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const gpa = b.allocator; diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index f660ef64a6b8..e27dd656198a 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -967,7 +967,7 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking const maybe_path: ?*GeneratedFile = @field(compile, tag_name); const generated_file = maybe_path orelse { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); const stderr = std.io.getStdErr(); std.Build.dumpBadGetPathHelp(&compile.step, stderr, compile.step.owner, asking_step) catch {}; @@ -976,7 +976,7 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking }; const path = generated_file.path orelse { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); const stderr = std.io.getStdErr(); std.Build.dumpBadGetPathHelp(&compile.step, stderr, compile.step.owner, asking_step) catch {}; @@ -987,7 +987,7 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking return path; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const arena = b.allocator; const compile: *Compile = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 4a0e64e8d054..212ea605ed19 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -164,7 +164,7 @@ fn putValue(config_header: *ConfigHeader, field_name: []const u8, comptime T: ty } } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const config_header: *ConfigHeader = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig index 3010d701b1ac..f346c6cc3949 100644 --- a/lib/std/Build/Step/Fmt.zig +++ b/lib/std/Build/Step/Fmt.zig @@ -36,7 +36,7 @@ pub fn create(owner: *std.Build, options: Options) *Fmt { return fmt; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { // zig fmt is fast enough that no progress is needed. _ = prog_node; diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig index c56bafcfb53d..bd1d5db4a977 100644 --- a/lib/std/Build/Step/InstallArtifact.zig +++ b/lib/std/Build/Step/InstallArtifact.zig @@ -115,7 +115,7 @@ pub fn create(owner: *std.Build, artifact: *Step.Compile, options: Options) *Ins return install_artifact; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const install_artifact: *InstallArtifact = @fieldParentPtr("step", step); const b = step.owner; diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index 1722b975f70c..0a6edafb338e 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -56,7 +56,7 @@ pub fn create(owner: *std.Build, options: Options) *InstallDir { return install_dir; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_dir: *InstallDir = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index 6fa6d6bc9994..8202a9d79648 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -36,7 +36,7 @@ pub fn create( return install_file; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_file: *InstallFile = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 515736dbc117..966764adcc1e 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -90,7 +90,7 @@ pub fn getOutputSeparatedDebug(objcopy: *const ObjCopy) ?std.Build.LazyPath { return if (objcopy.output_file_debug) |*file| .{ .generated = .{ .file = file } } else null; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const objcopy: *ObjCopy = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index c4daed73fff6..2937cf70e1ee 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -410,7 +410,7 @@ pub fn getOutput(options: *Options) LazyPath { return .{ .generated = .{ .file = &options.generated_file } }; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { // This step completes so quickly that no progress is necessary. _ = prog_node; diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig index 64a3c72668ee..6483a684aaea 100644 --- a/lib/std/Build/Step/RemoveDir.zig +++ b/lib/std/Build/Step/RemoveDir.zig @@ -22,7 +22,7 @@ pub fn create(owner: *std.Build, dir_path: []const u8) *RemoveDir { return remove_dir; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { // TODO update progress node while walking file system. // Should the standard library support this use case?? _ = prog_node; diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index fec5b5ab679c..d49d0b3ce2a8 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -574,7 +574,7 @@ const IndexedOutput = struct { tag: @typeInfo(Arg).Union.tag_type.?, output: *Output, }; -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const arena = b.allocator; const run: *Run = @fieldParentPtr("step", step); @@ -878,7 +878,7 @@ fn runCommand( argv: []const []const u8, has_side_effects: bool, output_dir_path: []const u8, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { const step = &run.step; const b = step.owner; @@ -1195,7 +1195,7 @@ fn spawnChildAndCollect( run: *Run, argv: []const []const u8, has_side_effects: bool, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !ChildProcResult { const b = run.step.owner; const arena = b.allocator; @@ -1235,6 +1235,10 @@ fn spawnChildAndCollect( child.stdin_behavior = .Pipe; } + if (run.stdio != .zig_test) { + child.progress_node = prog_node.start("", 0); + } + try child.spawn(); var timer = try std.time.Timer.start(); @@ -1264,7 +1268,7 @@ const StdIoResult = struct { fn evalZigTest( run: *Run, child: *std.process.Child, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !StdIoResult { const gpa = run.step.owner.allocator; const arena = run.step.owner.allocator; @@ -1291,7 +1295,7 @@ fn evalZigTest( var metadata: ?TestMetadata = null; var sub_prog_node: ?std.Progress.Node = null; - defer if (sub_prog_node) |*n| n.end(); + defer if (sub_prog_node) |n| n.end(); poll: while (true) { while (stdout.readableLength() < @sizeOf(Header)) { @@ -1406,7 +1410,7 @@ const TestMetadata = struct { expected_panic_msgs: []const u32, string_bytes: []const u8, next_index: u32, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, fn testName(tm: TestMetadata, index: u32) []const u8 { return std.mem.sliceTo(tm.string_bytes[tm.names[index]..], 0); @@ -1421,7 +1425,7 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr if (metadata.expected_panic_msgs[i] != 0) continue; const name = metadata.testName(i); - if (sub_prog_node.*) |*n| n.end(); + if (sub_prog_node.*) |n| n.end(); sub_prog_node.* = metadata.prog_node.start(name, 0); try sendRunTestMessage(in, i); diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index cb1b48e3c061..e07744c2da15 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -116,7 +116,7 @@ pub fn defineCMacroRaw(translate_c: *TranslateC, name_and_value: []const u8) voi translate_c.c_macros.append(translate_c.step.owner.dupe(name_and_value)) catch @panic("OOM"); } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const translate_c: *TranslateC = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 401c5b78ece2..0639573b8fea 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -198,7 +198,7 @@ fn maybeUpdateName(write_file: *WriteFile) void { } } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const write_file: *WriteFile = @fieldParentPtr("step", step); diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index b7ac1fa328f3..c5c1d17b9328 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -58,7 +58,7 @@ pub const Options = struct { /// cannot fit into this buffer which will look bad but not cause any malfunctions. /// /// Must be at least 200 bytes. - draw_buffer: []u8, + draw_buffer: []u8 = &default_draw_buffer, /// How many nanoseconds between writing updates to the terminal. refresh_rate_ns: u64 = 60 * std.time.ns_per_ms, /// How many nanoseconds to keep the output hidden @@ -67,6 +67,7 @@ pub const Options = struct { /// 0 means unknown. estimated_total_items: usize = 0, root_name: []const u8 = "", + disable_printing: bool = false, }; /// Represents one unit of progress. Each node can have children nodes, or @@ -203,6 +204,13 @@ pub const Node = struct { @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, count), .monotonic); } + /// Thread-safe. + pub fn increaseEstimatedTotalItems(n: Node, count: usize) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + _ = @atomicRmw(u32, &storage.estimated_total_count, .Add, std.math.lossyCast(u32, count), .monotonic); + } + /// Finish a started `Node`. Thread-safe. pub fn end(n: Node) void { const index = n.index.unwrap() orelse return; @@ -290,6 +298,8 @@ var node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefine var node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; var node_freelist_buffer: [default_node_storage_buffer_len]Node.OptionalIndex = undefined; +var default_draw_buffer: [2000]u8 = undefined; + /// Initializes a global Progress instance. /// /// Asserts there is only one global Progress instance. @@ -318,6 +328,9 @@ pub fn start(options: Options) Node { } } else |env_err| switch (env_err) { error.EnvironmentVariableNotFound => { + if (options.disable_printing) { + return .{ .index = .none }; + } const stderr = std.io.getStdErr(); if (stderr.supportsAnsiEscapeCodes()) { global_progress.terminal = stderr; @@ -330,7 +343,7 @@ pub fn start(options: Options) Node { global_progress.terminal = stderr; } - if (global_progress.terminal == null) { + if (global_progress.terminal == null or !global_progress.supports_ansi_escape_codes) { return .{ .index = .none }; } @@ -379,7 +392,10 @@ fn updateThreadRun() void { return clearTerminal(); const buffer = computeRedraw(); - write(buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer); + } } while (true) { @@ -390,10 +406,25 @@ fn updateThreadRun() void { return clearTerminal(); const buffer = computeRedraw(); - write(buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer); + } } } +/// Allows the caller to freely write to stderr until `unlockStdErr` is called. +/// +/// During the lock, any `std.Progress` information is cleared from the terminal. +pub fn lockStdErr() void { + stderr_mutex.lock(); + clearTerminal(); +} + +pub fn unlockStdErr() void { + stderr_mutex.unlock(); +} + fn ipcThreadRun(fd: posix.fd_t) anyerror!void { { _ = wait(global_progress.initial_delay_ns); @@ -432,6 +463,8 @@ const tree_line = "\x1B\x28\x30\x78\x1B\x28\x42 "; // │ const tree_langle = "\x1B\x28\x30\x6d\x71\x1B\x28\x42 "; // └─ fn clearTerminal() void { + if (global_progress.newline_count == 0) return; + var i: usize = 0; const buf = global_progress.draw_buffer; @@ -876,3 +909,5 @@ fn handleSigWinch(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) assert(sig == posix.SIG.WINCH); global_progress.redraw_event.set(); } + +var stderr_mutex: std.Thread.Mutex = .{}; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 980b027f0ac6..41439df5e6e2 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -77,19 +77,28 @@ const PdbOrDwarf = union(enum) { } }; -var stderr_mutex = std.Thread.Mutex{}; +/// Allows the caller to freely write to stderr until `unlockStdErr` is called. +/// +/// During the lock, any `std.Progress` information is cleared from the terminal. +pub fn lockStdErr() void { + std.Progress.lockStdErr(); +} + +pub fn unlockStdErr() void { + std.Progress.unlockStdErr(); +} /// Print to stderr, unbuffered, and silently returning on failure. Intended /// for use in "printf debugging." Use `std.log` functions for proper logging. pub fn print(comptime fmt: []const u8, args: anytype) void { - stderr_mutex.lock(); - defer stderr_mutex.unlock(); + lockStdErr(); + defer unlockStdErr(); const stderr = io.getStdErr().writer(); nosuspend stderr.print(fmt, args) catch return; } pub fn getStderrMutex() *std.Thread.Mutex { - return &stderr_mutex; + @compileError("deprecated. call std.debug.lockStdErr() and std.debug.unlockStdErr() instead which will integrate properly with std.Progress"); } /// TODO multithreaded awareness @@ -107,8 +116,8 @@ pub fn getSelfDebugInfo() !*DebugInfo { /// Tries to print a hexadecimal view of the bytes, unbuffered, and ignores any error returned. /// Obtains the stderr mutex while dumping. pub fn dump_hex(bytes: []const u8) void { - stderr_mutex.lock(); - defer stderr_mutex.unlock(); + lockStdErr(); + defer unlockStdErr(); dump_hex_fallible(bytes) catch {}; } diff --git a/lib/std/json/dynamic.zig b/lib/std/json/dynamic.zig index a56d37bf0ba5..a1849b0fed3e 100644 --- a/lib/std/json/dynamic.zig +++ b/lib/std/json/dynamic.zig @@ -52,8 +52,8 @@ pub const Value = union(enum) { } pub fn dump(self: Value) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); stringify(self, .{}, stderr) catch return; diff --git a/lib/std/log.zig b/lib/std/log.zig index 0562d09c51ce..b2c05112b029 100644 --- a/lib/std/log.zig +++ b/lib/std/log.zig @@ -45,8 +45,8 @@ //! const prefix = "[" ++ comptime level.asText() ++ "] " ++ scope_prefix; //! //! // Print the message to stderr, silently ignoring any errors -//! std.debug.getStderrMutex().lock(); -//! defer std.debug.getStderrMutex().unlock(); +//! std.debug.lockStdErr(); +//! defer std.debug.unlockStdErr(); //! const stderr = std.io.getStdErr().writer(); //! nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return; //! } @@ -152,8 +152,8 @@ pub fn defaultLog( var bw = std.io.bufferedWriter(stderr); const writer = bw.writer(); - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); nosuspend { writer.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return; bw.flush() catch return; diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index 5a6651b24847..2d69ff901dbb 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -155,8 +155,8 @@ pub const RenderOptions = struct { }; pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr(); return renderToWriter(eb, options, stderr.writer()) catch return; } diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index 10e14a55fc68..7f8de00b4aeb 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -14,8 +14,6 @@ pub const Message = struct { zig_version, /// Body is an ErrorBundle. error_bundle, - /// Body is a UTF-8 string. - progress, /// Body is a EmitBinPath. emit_bin_path, /// Body is a TestMetadata diff --git a/src/Compilation.zig b/src/Compilation.zig index 03b981812e35..512ceeabbdc9 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1273,8 +1273,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (options.verbose_llvm_cpu_features) { if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: { const target = options.root_mod.resolved_target.result; - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); nosuspend { stderr.print("compilation: {s}\n", .{options.root_name}) catch break :print; @@ -1934,7 +1934,7 @@ pub fn getTarget(self: Compilation) Target { /// Only legal to call when cache mode is incremental and a link file is present. pub fn hotCodeSwap( comp: *Compilation, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, pid: std.process.Child.Id, ) !void { const lf = comp.bin_file.?; @@ -1966,7 +1966,7 @@ fn cleanupAfterUpdate(comp: *Compilation) void { } /// Detect changes to source files, perform semantic analysis, and update the output files. -pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void { +pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { const tracy_trace = trace(@src()); defer tracy_trace.end(); @@ -2256,7 +2256,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void } } -fn flush(comp: *Compilation, arena: Allocator, prog_node: *std.Progress.Node) !void { +fn flush(comp: *Compilation, arena: Allocator, prog_node: std.Progress.Node) !void { if (comp.bin_file) |lf| { // This is needed before reading the error flags. lf.flush(arena, prog_node) catch |err| switch (err) { @@ -2566,13 +2566,11 @@ pub fn emitLlvmObject( default_emit: Emit, bin_emit_loc: ?EmitLoc, llvm_object: *LlvmObject, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { if (build_options.only_c) @compileError("unreachable"); - var sub_prog_node = prog_node.start("LLVM Emit Object", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLVM Emit Object", 0); defer sub_prog_node.end(); try llvm_object.emit(.{ @@ -3249,23 +3247,23 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { pub fn performAllTheWork( comp: *Compilation, - main_progress_node: *std.Progress.Node, + main_progress_node: std.Progress.Node, ) error{ TimerUnsupported, OutOfMemory }!void { // Here we queue up all the AstGen tasks first, followed by C object compilation. // We wait until the AstGen tasks are all completed before proceeding to the // (at least for now) single-threaded main work queue. However, C object compilation // only needs to be finished by the end of this function. - var zir_prog_node = main_progress_node.start("AST Lowering", 0); + const zir_prog_node = main_progress_node.start("AST Lowering", 0); defer zir_prog_node.end(); - var wasm_prog_node = main_progress_node.start("Compile Autodocs", 0); + const wasm_prog_node = main_progress_node.start("Compile Autodocs", 0); defer wasm_prog_node.end(); - var c_obj_prog_node = main_progress_node.start("Compile C Objects", comp.c_source_files.len); + const c_obj_prog_node = main_progress_node.start("Compile C Objects", comp.c_source_files.len); defer c_obj_prog_node.end(); - var win32_resource_prog_node = main_progress_node.start("Compile Win32 Resources", comp.rc_source_files.len); + const win32_resource_prog_node = main_progress_node.start("Compile Win32 Resources", comp.rc_source_files.len); defer win32_resource_prog_node.end(); comp.work_queue_wait_group.reset(); @@ -3274,7 +3272,7 @@ pub fn performAllTheWork( if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.docs_emit != null) { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerDocsCopy, .{comp}); - comp.work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, &wasm_prog_node }); + comp.work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, wasm_prog_node }); } } @@ -3313,7 +3311,7 @@ pub fn performAllTheWork( while (comp.astgen_work_queue.readItem()) |file| { comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ - comp, file, &zir_prog_node, &comp.astgen_wait_group, .root, + comp, file, zir_prog_node, &comp.astgen_wait_group, .root, }); } @@ -3325,14 +3323,14 @@ pub fn performAllTheWork( while (comp.c_object_work_queue.readItem()) |c_object| { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerUpdateCObject, .{ - comp, c_object, &c_obj_prog_node, + comp, c_object, c_obj_prog_node, }); } if (!build_options.only_core_functionality) { while (comp.win32_resource_work_queue.readItem()) |win32_resource| { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerUpdateWin32Resource, .{ - comp, win32_resource, &win32_resource_prog_node, + comp, win32_resource, win32_resource_prog_node, }); } } @@ -3342,7 +3340,6 @@ pub fn performAllTheWork( try reportMultiModuleErrors(mod); try mod.flushRetryableFailures(); mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - mod.sema_prog_node.activate(); } defer if (comp.module) |mod| { mod.sema_prog_node.end(); @@ -3379,7 +3376,7 @@ pub fn performAllTheWork( } } -fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !void { +fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { switch (job) { .codegen_decl => |decl_index| { const module = comp.module.?; @@ -3803,7 +3800,7 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8, } } -fn workerDocsWasm(comp: *Compilation, prog_node: *std.Progress.Node) void { +fn workerDocsWasm(comp: *Compilation, prog_node: std.Progress.Node) void { workerDocsWasmFallible(comp, prog_node) catch |err| { comp.lockAndSetMiscFailure(.docs_wasm, "unable to build autodocs: {s}", .{ @errorName(err), @@ -3811,7 +3808,7 @@ fn workerDocsWasm(comp: *Compilation, prog_node: *std.Progress.Node) void { }; } -fn workerDocsWasmFallible(comp: *Compilation, prog_node: *std.Progress.Node) anyerror!void { +fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anyerror!void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); @@ -3952,12 +3949,11 @@ const AstGenSrc = union(enum) { fn workerAstGenFile( comp: *Compilation, file: *Module.File, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, wg: *WaitGroup, src: AstGenSrc, ) void { - var child_prog_node = prog_node.start(file.sub_file_path, 0); - child_prog_node.activate(); + const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); const mod = comp.module.?; @@ -4265,7 +4261,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module fn workerUpdateCObject( comp: *Compilation, c_object: *CObject, - progress_node: *std.Progress.Node, + progress_node: std.Progress.Node, ) void { comp.updateCObject(c_object, progress_node) catch |err| switch (err) { error.AnalysisFail => return, @@ -4282,7 +4278,7 @@ fn workerUpdateCObject( fn workerUpdateWin32Resource( comp: *Compilation, win32_resource: *Win32Resource, - progress_node: *std.Progress.Node, + progress_node: std.Progress.Node, ) void { comp.updateWin32Resource(win32_resource, progress_node) catch |err| switch (err) { error.AnalysisFail => return, @@ -4300,7 +4296,7 @@ fn buildCompilerRtOneShot( comp: *Compilation, output_mode: std.builtin.OutputMode, out: *?CRTFile, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) void { comp.buildOutputFromZig( "compiler_rt.zig", @@ -4427,7 +4423,7 @@ fn reportRetryableEmbedFileError( } } -fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.Progress.Node) !void { +fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Progress.Node) !void { if (comp.config.c_frontend == .aro) { return comp.failCObj(c_object, "aro does not support compiling C objects yet", .{}); } @@ -4467,9 +4463,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P const c_source_basename = std.fs.path.basename(c_object.src.src_path); - c_obj_prog_node.activate(); - var child_progress_node = c_obj_prog_node.start(c_source_basename, 0); - child_progress_node.activate(); + const child_progress_node = c_obj_prog_node.start(c_source_basename, 0); defer child_progress_node.end(); // Special case when doing build-obj for just one C file. When there are more than one object @@ -4731,7 +4725,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P }; } -fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: *std.Progress.Node) !void { +fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: std.Progress.Node) !void { if (!std.process.can_spawn) { return comp.failWin32Resource(win32_resource, "{s} does not support spawning a child process", .{@tagName(builtin.os.tag)}); } @@ -4763,9 +4757,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 _ = comp.failed_win32_resources.swapRemove(win32_resource); } - win32_resource_prog_node.activate(); - var child_progress_node = win32_resource_prog_node.start(src_basename, 0); - child_progress_node.activate(); + const child_progress_node = win32_resource_prog_node.start(src_basename, 0); defer child_progress_node.end(); var man = comp.obtainWin32ResourceCacheManifest(); @@ -4833,7 +4825,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 }); try argv.appendSlice(&.{ "--", in_rc_path, out_res_path }); - try spawnZigRc(comp, win32_resource, src_basename, arena, argv.items, &child_progress_node); + try spawnZigRc(comp, win32_resource, arena, argv.items, child_progress_node); break :blk digest; }; @@ -4901,7 +4893,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 try argv.appendSlice(rc_src.extra_flags); try argv.appendSlice(&.{ "--", rc_src.src_path, out_res_path }); - try spawnZigRc(comp, win32_resource, src_basename, arena, argv.items, &child_progress_node); + try spawnZigRc(comp, win32_resource, arena, argv.items, child_progress_node); // Read depfile and update cache manifest { @@ -4966,10 +4958,9 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 fn spawnZigRc( comp: *Compilation, win32_resource: *Win32Resource, - src_basename: []const u8, arena: Allocator, argv: []const []const u8, - child_progress_node: *std.Progress.Node, + child_progress_node: std.Progress.Node, ) !void { var node_name: std.ArrayListUnmanaged(u8) = .{}; defer node_name.deinit(arena); @@ -4978,6 +4969,7 @@ fn spawnZigRc( child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; + child.progress_node = child_progress_node; child.spawn() catch |err| { return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) }); @@ -5019,22 +5011,6 @@ fn spawnZigRc( }; return comp.failWin32ResourceWithOwnedBundle(win32_resource, error_bundle); }, - .progress => { - node_name.clearRetainingCapacity(); - // is a special string that indicates that the child - // process has reached resinator's main function - if (std.mem.eql(u8, body, "")) { - child_progress_node.setName(src_basename); - } - // Ignore 0-length strings since if multiple zig rc commands - // are executed at the same time, only one will send progress strings - // while the other(s) will send empty strings. - else if (body.len > 0) { - try node_name.appendSlice(arena, "build 'zig rc'... "); - try node_name.appendSlice(arena, body); - child_progress_node.setName(node_name.items); - } - }, else => {}, // ignore other messages } @@ -5937,8 +5913,8 @@ pub fn lockAndParseLldStderr(comp: *Compilation, prefix: []const u8, stderr: []c } pub fn dump_argv(argv: []const []const u8) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); for (argv[0 .. argv.len - 1]) |arg| { nosuspend stderr.print("{s} ", .{arg}) catch return; @@ -5989,11 +5965,10 @@ pub fn updateSubCompilation( parent_comp: *Compilation, sub_comp: *Compilation, misc_task: MiscTask, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { { - var sub_node = prog_node.start(@tagName(misc_task), 0); - sub_node.activate(); + const sub_node = prog_node.start(@tagName(misc_task), 0); defer sub_node.end(); try sub_comp.update(prog_node); @@ -6024,7 +5999,7 @@ fn buildOutputFromZig( output_mode: std.builtin.OutputMode, out: *?CRTFile, misc_task_tag: MiscTask, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { const tracy_trace = trace(@src()); defer tracy_trace.end(); @@ -6131,7 +6106,7 @@ pub fn build_crt_file( root_name: []const u8, output_mode: std.builtin.OutputMode, misc_task_tag: MiscTask, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, /// These elements have to get mutated to add the owner module after it is /// created within this function. c_source_files: []CSourceFile, diff --git a/src/Module.zig b/src/Module.zig index c571c851fe57..55b08701dc26 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2991,8 +2991,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); } - var decl_prog_node = mod.sema_prog_node.start("", 0); - decl_prog_node.activate(); + const decl_prog_node = mod.sema_prog_node.start("", 0); defer decl_prog_node.end(); const sema_result: SemaDeclResult = blk: { @@ -5316,7 +5315,7 @@ fn handleUpdateExports( pub fn populateTestFunctions( mod: *Module, - main_progress_node: *std.Progress.Node, + main_progress_node: std.Progress.Node, ) !void { const gpa = mod.gpa; const ip = &mod.intern_pool; @@ -5333,7 +5332,6 @@ pub fn populateTestFunctions( // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - mod.sema_prog_node.activate(); defer { mod.sema_prog_node.end(); mod.sema_prog_node = undefined; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 97cbd36bd0b5..6528c2a53ffa 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -35,7 +35,7 @@ name_tok: std.zig.Ast.TokenIndex, lazy_status: LazyStatus, parent_package_root: Cache.Path, parent_manifest_ast: ?*const std.zig.Ast, -prog_node: *std.Progress.Node, +prog_node: std.Progress.Node, job_queue: *JobQueue, /// If true, don't add an error for a missing hash. This flag is not passed /// down to recursive dependencies. It's intended to be used only be the CLI. @@ -720,8 +720,7 @@ fn queueJobsForDeps(f: *Fetch) RunError!void { }; } - // job_queue mutex is locked so this is OK. - f.prog_node.unprotected_estimated_total_items += new_fetch_index; + f.prog_node.increaseEstimatedTotalItems(new_fetch_index); break :nf .{ new_fetches[0..new_fetch_index], prog_names[0..new_fetch_index] }; }; @@ -751,9 +750,8 @@ pub fn relativePathDigest( } pub fn workerRun(f: *Fetch, prog_name: []const u8) void { - var prog_node = f.prog_node.start(prog_name, 0); + const prog_node = f.prog_node.start(prog_name, 0); defer prog_node.end(); - prog_node.activate(); run(f) catch |err| switch (err) { error.OutOfMemory => f.oom_flag = true, @@ -1311,9 +1309,8 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource) anyerror!Unpac var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true }); defer index_file.close(); { - var index_prog_node = f.prog_node.start("Index pack", 0); + const index_prog_node = f.prog_node.start("Index pack", 0); defer index_prog_node.end(); - index_prog_node.activate(); var index_buffered_writer = std.io.bufferedWriter(index_file.writer()); try git.indexPack(gpa, pack_file, index_buffered_writer.writer()); try index_buffered_writer.flush(); @@ -1321,9 +1318,8 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource) anyerror!Unpac } { - var checkout_prog_node = f.prog_node.start("Checkout", 0); + const checkout_prog_node = f.prog_node.start("Checkout", 0); defer checkout_prog_node.end(); - checkout_prog_node.activate(); var repository = try git.Repository.init(gpa, pack_file, index_file); defer repository.deinit(); var diagnostics: git.Diagnostics = .{ .allocator = arena }; diff --git a/src/glibc.zig b/src/glibc.zig index 5ec0442d6a8e..6474a23dce02 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -160,7 +160,7 @@ pub const CRTFile = enum { libc_nonshared_a, }; -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } @@ -658,7 +658,7 @@ pub const BuiltSharedObjects = struct { const all_map_basename = "all.map"; -pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !void { +pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1065,7 +1065,7 @@ fn buildSharedLib( bin_directory: Compilation.Directory, asm_file_basename: []const u8, lib: Lib, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { const tracy = trace(@src()); defer tracy.end(); diff --git a/src/libcxx.zig b/src/libcxx.zig index b1b2014cb576..1c48f775271b 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -113,7 +113,7 @@ pub const BuildError = error{ ZigCompilerNotBuiltWithLLVMExtensions, }; -pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildLibCXX(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } @@ -357,7 +357,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) BuildError comp.libcxx_static_lib = try sub_compilation.toCrtFile(); } -pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/libtsan.zig b/src/libtsan.zig index 28dba65772fd..1aa32e6ff07a 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -13,7 +13,7 @@ pub const BuildError = error{ TSANUnsupportedCPUArchitecture, }; -pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/libunwind.zig b/src/libunwind.zig index 808ea298ab9c..77838768272b 100644 --- a/src/libunwind.zig +++ b/src/libunwind.zig @@ -14,7 +14,7 @@ pub const BuildError = error{ ZigCompilerNotBuiltWithLLVMExtensions, }; -pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/link.zig b/src/link.zig index c05b545a892e..ef09639dcffd 100644 --- a/src/link.zig +++ b/src/link.zig @@ -535,7 +535,7 @@ pub const File = struct { /// Commit pending changes and write headers. Takes into account final output mode /// and `use_lld`, not only `effectiveOutputMode`. /// `arena` has the lifetime of the call to `Compilation.update`. - pub fn flush(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void { + pub fn flush(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { if (build_options.only_c) { assert(base.tag == .c); return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node); @@ -572,7 +572,7 @@ pub const File = struct { /// Commit pending changes and write headers. Works based on `effectiveOutputMode` /// rather than final output mode. - pub fn flushModule(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void { + pub fn flushModule(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; @@ -688,7 +688,7 @@ pub const File = struct { } } - pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void { + pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -966,7 +966,7 @@ pub const File = struct { base: File, arena: Allocator, llvm_object: *LlvmObject, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { return base.comp.emitLlvmObject(arena, base.emit, .{ .directory = null, diff --git a/src/link/C.zig b/src/link/C.zig index 07814c9e7101..305af4015639 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -370,7 +370,7 @@ pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclInde _ = decl_index; } -pub fn flush(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !void { +pub fn flush(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { return self.flushModule(arena, prog_node); } @@ -389,14 +389,13 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { return defines; } -pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !void { +pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); defer tracy.end(); - var sub_prog_node = prog_node.start("Flush Module", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); const comp = self.base.comp; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 31cfe1ca9d20..d24d69d913bd 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1702,7 +1702,7 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void { gop.value_ptr.* = current; } -pub fn flush(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = self.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; if (use_lld) { @@ -1714,7 +1714,7 @@ pub fn flush(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) link. } } -pub fn flushModule(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1726,8 +1726,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) return; } - var sub_prog_node = prog_node.start("COFF Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("COFF Flush", 0); defer sub_prog_node.end(); const module = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented; diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig index 47753cbf0135..c2620c1fe930 100644 --- a/src/link/Coff/lld.zig +++ b/src/link/Coff/lld.zig @@ -16,7 +16,7 @@ const Allocator = mem.Allocator; const Coff = @import("../Coff.zig"); const Compilation = @import("../../Compilation.zig"); -pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) !void { +pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -38,9 +38,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) } } else null; - var sub_prog_node = prog_node.start("LLD Link", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLD Link", 0); defer sub_prog_node.end(); const is_lib = comp.config.output_mode == .Lib; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 8a3192f93ea3..eb27b4449ed8 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1064,7 +1064,7 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void { } } -pub fn flush(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const use_lld = build_options.have_llvm and self.base.comp.config.use_lld; if (use_lld) { return self.linkWithLLD(arena, prog_node); @@ -1072,7 +1072,7 @@ pub fn flush(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) link.F try self.flushModule(arena, prog_node); } -pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1085,8 +1085,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) if (use_lld) return; } - var sub_prog_node = prog_node.start("ELF Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("ELF Flush", 0); defer sub_prog_node.end(); const target = comp.root_mod.resolved_target.result; @@ -2147,7 +2146,7 @@ fn scanRelocs(self: *Elf) !void { } } -fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !void { +fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -2169,9 +2168,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi } } else null; - var sub_prog_node = prog_node.start("LLD Link", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLD Link", 0); defer sub_prog_node.end(); const output_mode = comp.config.output_mode; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 7bf195f5f9cb..947a2665de6f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -360,11 +360,11 @@ pub fn deinit(self: *MachO) void { self.unwind_records.deinit(gpa); } -pub fn flush(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { try self.flushModule(arena, prog_node); } -pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -375,8 +375,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node try self.base.emitLlvmObject(arena, llvm_object, prog_node); } - var sub_prog_node = prog_node.start("MachO Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("MachO Flush", 0); defer sub_prog_node.end(); const directory = self.base.emit.directory; diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 111b59fc3ba5..8e1ebc9726ee 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -106,11 +106,11 @@ pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void { return self.llvm_object.freeDecl(decl_index); } -pub fn flush(self: *NvPtx, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, prog_node); } -pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native) @panic("Attempted to compile for architecture that was disabled by build configuration"); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index a45142a12ef4..328d669b585d 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -604,7 +604,7 @@ fn allocateGotIndex(self: *Plan9) usize { } } -pub fn flush(self: *Plan9, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = self.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; assert(!use_lld); @@ -663,7 +663,7 @@ fn atomCount(self: *Plan9) usize { return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count + extern_atom_count + anon_atom_count; } -pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -677,8 +677,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: *std.Progress.Node const tracy = trace(@src()); defer tracy.end(); - var sub_prog_node = prog_node.start("Flush Module", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); log.debug("flushModule", .{}); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 27c905cc615e..0cc238f140af 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -193,11 +193,11 @@ pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void { _ = decl_index; } -pub fn flush(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, prog_node); } -pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -205,8 +205,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node const tracy = trace(@src()); defer tracy.end(); - var sub_prog_node = prog_node.start("Flush Module", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); const spv = &self.object.spv; @@ -253,7 +252,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node const module = try spv.finalize(arena, target); errdefer arena.free(module); - const linked_module = self.linkModule(arena, module, &sub_prog_node) catch |err| switch (err) { + const linked_module = self.linkModule(arena, module, sub_prog_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => |other| { log.err("error while linking: {s}\n", .{@errorName(other)}); @@ -264,7 +263,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)); } -fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: *std.Progress.Node) ![]Word { +fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress.Node) ![]Word { _ = self; const lower_invocation_globals = @import("SpirV/lower_invocation_globals.zig"); diff --git a/src/link/SpirV/deduplicate.zig b/src/link/SpirV/deduplicate.zig index 4cf5ebf65ac0..292ff0e86821 100644 --- a/src/link/SpirV/deduplicate.zig +++ b/src/link/SpirV/deduplicate.zig @@ -418,9 +418,8 @@ const EntityHashContext = struct { } }; -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { - var sub_node = progress.start("deduplicate", 0); - sub_node.activate(); +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: std.Progress.Node) !void { + const sub_node = progress.start("deduplicate", 0); defer sub_node.end(); var arena = std.heap.ArenaAllocator.init(parser.a); diff --git a/src/link/SpirV/lower_invocation_globals.zig b/src/link/SpirV/lower_invocation_globals.zig index ee992112c8ee..edf16a7cd80f 100644 --- a/src/link/SpirV/lower_invocation_globals.zig +++ b/src/link/SpirV/lower_invocation_globals.zig @@ -682,9 +682,8 @@ const ModuleBuilder = struct { } }; -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { - var sub_node = progress.start("Lower invocation globals", 6); - sub_node.activate(); +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: std.Progress.Node) !void { + const sub_node = progress.start("Lower invocation globals", 6); defer sub_node.end(); var arena = std.heap.ArenaAllocator.init(parser.a); diff --git a/src/link/SpirV/prune_unused.zig b/src/link/SpirV/prune_unused.zig index cefdaddd93c6..a604d62349f6 100644 --- a/src/link/SpirV/prune_unused.zig +++ b/src/link/SpirV/prune_unused.zig @@ -255,9 +255,8 @@ fn removeIdsFromMap(a: Allocator, map: anytype, info: ModuleInfo, alive_marker: } } -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { - var sub_node = progress.start("Prune unused IDs", 0); - sub_node.activate(); +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: std.Progress.Node) !void { + const sub_node = progress.start("Prune unused IDs", 0); defer sub_node.end(); var arena = std.heap.ArenaAllocator.init(parser.a); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index bf345813df2d..da6425326b76 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2464,7 +2464,7 @@ fn appendDummySegment(wasm: *Wasm) !void { }); } -pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = wasm.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; @@ -2475,7 +2475,7 @@ pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) link. } /// Uses the in-house linker to link one or multiple object -and archive files into a WebAssembly binary. -pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2486,8 +2486,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) if (use_lld) return; } - var sub_prog_node = prog_node.start("Wasm Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Wasm Flush", 0); defer sub_prog_node.end(); const directory = wasm.base.emit.directory; // Just an alias to make it shorter to type. @@ -3323,7 +3322,7 @@ fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void { } } -fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !void { +fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -3350,9 +3349,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !vo } } else null; - var sub_prog_node = prog_node.start("LLD Link", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLD Link", 0); defer sub_prog_node.end(); const is_obj = comp.config.output_mode == .Obj; diff --git a/src/main.zig b/src/main.zig index 099ceb27f90c..20f9bfba54a7 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4028,22 +4028,7 @@ fn serve( var child_pid: ?std.process.Child.Id = null; - var progress: std.Progress = .{ - .terminal = null, - .root = .{ - .context = undefined, - .parent = null, - .name = "", - .unprotected_estimated_total_items = 0, - .unprotected_completed_items = 0, - }, - .columns_written = 0, - .prev_refresh_timestamp = 0, - .timer = null, - .done = false, - }; - const main_progress_node = &progress.root; - main_progress_node.context = &progress; + const main_progress_node = std.Progress.start(.{}); while (true) { const hdr = try server.receiveMessage(); @@ -4051,7 +4036,6 @@ fn serve( switch (hdr.tag) { .exit => return cleanExit(), .update => { - assert(main_progress_node.recently_updated_child == null); tracy.frameMark(); if (arg_mode == .translate_c) { @@ -4075,21 +4059,7 @@ fn serve( try comp.makeBinFileWritable(); } - if (builtin.single_threaded) { - try comp.update(main_progress_node); - } else { - var reset: std.Thread.ResetEvent = .{}; - - var progress_thread = try std.Thread.spawn(.{}, progressThread, .{ - &progress, &server, &reset, - }); - defer { - reset.set(); - progress_thread.join(); - } - - try comp.update(main_progress_node); - } + try comp.update(main_progress_node); try comp.makeBinFileExecutable(); try serveUpdateResults(&server, comp); @@ -4116,7 +4086,6 @@ fn serve( }, .hot_update => { tracy.frameMark(); - assert(main_progress_node.recently_updated_child == null); if (child_pid) |pid| { try comp.hotCodeSwap(main_progress_node, pid); try serveUpdateResults(&server, comp); @@ -4146,63 +4115,6 @@ fn serve( } } -fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Thread.ResetEvent) void { - while (true) { - if (reset.timedWait(500 * std.time.ns_per_ms)) |_| { - // The Compilation update has completed. - return; - } else |err| switch (err) { - error.Timeout => {}, - } - - var buf: std.BoundedArray(u8, 160) = .{}; - - { - progress.update_mutex.lock(); - defer progress.update_mutex.unlock(); - - var need_ellipse = false; - var maybe_node: ?*std.Progress.Node = &progress.root; - while (maybe_node) |node| { - if (need_ellipse) { - buf.appendSlice("... ") catch {}; - } - need_ellipse = false; - const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); - const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); - const current_item = completed_items + 1; - if (node.name.len != 0 or eti > 0) { - if (node.name.len != 0) { - buf.appendSlice(node.name) catch {}; - need_ellipse = true; - } - if (eti > 0) { - if (need_ellipse) buf.appendSlice(" ") catch {}; - buf.writer().print("[{d}/{d}] ", .{ current_item, eti }) catch {}; - need_ellipse = false; - } else if (completed_items != 0) { - if (need_ellipse) buf.appendSlice(" ") catch {}; - buf.writer().print("[{d}] ", .{current_item}) catch {}; - need_ellipse = false; - } - } - maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .acquire); - } - } - - const progress_string = buf.slice(); - - server.serveMessage(.{ - .tag = .progress, - .bytes_len = @as(u32, @intCast(progress_string.len)), - }, &.{ - progress_string, - }) catch |err| { - fatal("unable to write to client: {s}", .{@errorName(err)}); - }; - } -} - fn serveUpdateResults(s: *Server, comp: *Compilation) !void { const gpa = comp.gpa; var error_bundle = try comp.getAllErrorsAlloc(); @@ -4472,19 +4384,10 @@ fn runOrTestHotSwap( fn updateModule(comp: *Compilation, color: Color) !void { { // If the terminal is dumb, we dont want to show the user all the output. - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const main_progress_node = progress.start("", 0); + const main_progress_node = std.Progress.start(.{ + .disable_printing = color == .off, + }); defer main_progress_node.end(); - switch (color) { - .off => { - progress.terminal = null; - }, - .on => { - progress.terminal = std.io.getStdErr(); - progress.supports_ansi_escape_codes = true; - }, - .auto => {}, - } try comp.update(main_progress_node); } @@ -4736,8 +4639,6 @@ const usage_build = ; fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - var build_file: ?[]const u8 = null; var override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena); var override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena); @@ -5051,7 +4952,9 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { config, ); } else { - const root_prog_node = progress.start("Fetch Packages", 0); + const root_prog_node = std.Progress.start(.{ + .root_name = "Fetch Packages", + }); defer root_prog_node.end(); var job_queue: Package.Fetch.JobQueue = .{ @@ -5473,38 +5376,14 @@ fn jitCmd( }; defer comp.destroy(); - if (options.server and !builtin.single_threaded) { - var reset: std.Thread.ResetEvent = .{}; - var progress: std.Progress = .{ - .terminal = null, - .root = .{ - .context = undefined, - .parent = null, - .name = "", - .unprotected_estimated_total_items = 0, - .unprotected_completed_items = 0, - }, - .columns_written = 0, - .prev_refresh_timestamp = 0, - .timer = null, - .done = false, - }; - const main_progress_node = &progress.root; - main_progress_node.context = &progress; + if (options.server) { + const main_progress_node = std.Progress.start(.{}); var server = std.zig.Server{ .out = std.io.getStdOut(), .in = undefined, // won't be receiving messages .receive_fifo = undefined, // won't be receiving messages }; - var progress_thread = try std.Thread.spawn(.{}, progressThread, .{ - &progress, &server, &reset, - }); - defer { - reset.set(); - progress_thread.join(); - } - try comp.update(main_progress_node); var error_bundle = try comp.getAllErrorsAlloc(); @@ -6963,8 +6842,9 @@ fn cmdFetch( try http_client.initDefaultProxies(arena); - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const root_prog_node = progress.start("Fetch", 0); + var root_prog_node = std.Progress.start(.{ + .root_name = "Fetch", + }); defer root_prog_node.end(); var global_cache_directory: Compilation.Directory = l: { @@ -7028,8 +6908,8 @@ fn cmdFetch( const hex_digest = Package.Manifest.hexDigest(fetch.actual_hash); - progress.done = true; - progress.refresh(); + root_prog_node.end(); + root_prog_node = .{ .index = .none }; const name = switch (save) { .no => { diff --git a/src/mingw.zig b/src/mingw.zig index 803c0f936752..5aa79064ee2c 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -16,7 +16,7 @@ pub const CRTFile = enum { mingw32_lib, }; -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } @@ -234,8 +234,8 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const include_dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "def-include" }); if (comp.verbose_cc) print: { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print; nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print; diff --git a/src/musl.zig b/src/musl.zig index 3228faf27192..edeea9cca7d2 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -19,7 +19,7 @@ pub const CRTFile = enum { libc_so, }; -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig index 122d45230b0b..57d93b6f567f 100644 --- a/src/wasi_libc.zig +++ b/src/wasi_libc.zig @@ -57,7 +57,7 @@ pub fn execModelCrtFileFullName(wasi_exec_model: std.builtin.WasiExecModel) []co }; } -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/test/standalone/cmakedefine/build.zig b/test/standalone/cmakedefine/build.zig index d90441360fce..3c57523373e8 100644 --- a/test/standalone/cmakedefine/build.zig +++ b/test/standalone/cmakedefine/build.zig @@ -80,7 +80,7 @@ pub fn build(b: *std.Build) void { test_step.dependOn(&wrapper_header.step); } -fn compare_headers(step: *std.Build.Step, prog_node: *std.Progress.Node) !void { +fn compare_headers(step: *std.Build.Step, prog_node: std.Progress.Node) !void { _ = prog_node; const allocator = step.owner.allocator; const expected_fmt = "expected_{s}"; From 795c5791a962e4d4b46fa9fd0470fe8162d65521 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:33:35 -0700 Subject: [PATCH 14/60] test runner: update to new std.Progress API --- lib/compiler/test_runner.zig | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 194e84b8eab0..dc82545e5497 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -129,12 +129,11 @@ fn mainTerminal() void { var ok_count: usize = 0; var skip_count: usize = 0; var fail_count: usize = 0; - var progress = std.Progress{ - .dont_print_on_dumb = true, - }; - const root_node = progress.start("Test", test_fn_list.len); - const have_tty = progress.terminal != null and - (progress.supports_ansi_escape_codes or progress.is_windows_terminal); + const root_node = std.Progress.start(.{ + .root_name = "Test", + .estimated_total_items = test_fn_list.len, + }); + const have_tty = std.io.getStdErr().isTty(); var async_frame_buffer: []align(builtin.target.stackAlignment()) u8 = undefined; // TODO this is on the next line (using `undefined` above) because otherwise zig incorrectly @@ -151,11 +150,9 @@ fn mainTerminal() void { } std.testing.log_level = .warn; - var test_node = root_node.start(test_fn.name, 0); - test_node.activate(); - progress.refresh(); + const test_node = root_node.start(test_fn.name, 0); if (!have_tty) { - std.debug.print("{d}/{d} {s}... ", .{ i + 1, test_fn_list.len, test_fn.name }); + std.debug.print("{d}/{d} {s}...", .{ i + 1, test_fn_list.len, test_fn.name }); } if (test_fn.func()) |_| { ok_count += 1; @@ -164,12 +161,22 @@ fn mainTerminal() void { } else |err| switch (err) { error.SkipZigTest => { skip_count += 1; - progress.log("SKIP\n", .{}); + if (have_tty) { + std.debug.print("{d}/{d} {s}...SKIP\n", .{ i + 1, test_fn_list.len, test_fn.name }); + } else { + std.debug.print("SKIP\n", .{}); + } test_node.end(); }, else => { fail_count += 1; - progress.log("FAIL ({s})\n", .{@errorName(err)}); + if (have_tty) { + std.debug.print("{d}/{d} {s}...FAIL ({s})\n", .{ + i + 1, test_fn_list.len, test_fn.name, @errorName(err), + }); + } else { + std.debug.print("FAIL ({s})\n", .{@errorName(err)}); + } if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } From a486392ee4bfe6f2577aafa9c1d50a176b0de0ea Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:33:53 -0700 Subject: [PATCH 15/60] std.Build.Step: don't create an empty progress node --- lib/std/Build/Step.zig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index a9657358435e..b6aed17076ce 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -313,16 +313,13 @@ pub fn evalZigProcess( try handleChildProcUnsupported(s, null, argv); try handleVerbose(s.owner, null, argv); - const sub_prog_node = prog_node.start("", 0); - defer sub_prog_node.end(); - var child = std.process.Child.init(argv, arena); child.env_map = &b.graph.env_map; child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; child.request_resource_usage_statistics = true; - child.progress_node = sub_prog_node; + child.progress_node = prog_node; child.spawn() catch |err| return s.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err), From 1cf1cb6ae00561b81b4e0863cff43c3fdd28a4a0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:34:29 -0700 Subject: [PATCH 16/60] std.debug.Trace: follow the struct default field guidance --- lib/std/debug.zig | 12 +++++++++--- lib/std/zig.zig | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 41439df5e6e2..1073d6d3abd9 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -2759,13 +2759,19 @@ pub const Trace = ConfigurableTrace(2, 4, builtin.mode == .Debug); pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime is_enabled: bool) type { return struct { - addrs: [actual_size][stack_frame_count]usize = undefined, - notes: [actual_size][]const u8 = undefined, - index: Index = 0, + addrs: [actual_size][stack_frame_count]usize, + notes: [actual_size][]const u8, + index: Index, const actual_size = if (enabled) size else 0; const Index = if (enabled) usize else u0; + pub const init: @This() = .{ + .addrs = undefined, + .notes = undefined, + .index = 0, + }; + pub const enabled = is_enabled; pub const add = if (enabled) addNoInline else addNoOp; diff --git a/lib/std/zig.zig b/lib/std/zig.zig index 03921ba77312..26c06780820f 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -718,7 +718,7 @@ pub const LazySrcLoc = union(enum) { /// where in semantic analysis the value got set. pub const TracedOffset = struct { x: i32, - trace: std.debug.Trace = .{}, + trace: std.debug.Trace = std.debug.Trace.init, const want_tracing = false; }; From b27fdf92fb366aa7b28235822c7871ab9eae9859 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:35:08 -0700 Subject: [PATCH 17/60] Compilation: only create progress nodes for work actually being done --- src/Compilation.zig | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 512ceeabbdc9..1ec7664e8efa 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3254,25 +3254,13 @@ pub fn performAllTheWork( // (at least for now) single-threaded main work queue. However, C object compilation // only needs to be finished by the end of this function. - const zir_prog_node = main_progress_node.start("AST Lowering", 0); - defer zir_prog_node.end(); - - const wasm_prog_node = main_progress_node.start("Compile Autodocs", 0); - defer wasm_prog_node.end(); - - const c_obj_prog_node = main_progress_node.start("Compile C Objects", comp.c_source_files.len); - defer c_obj_prog_node.end(); - - const win32_resource_prog_node = main_progress_node.start("Compile Win32 Resources", comp.rc_source_files.len); - defer win32_resource_prog_node.end(); - comp.work_queue_wait_group.reset(); defer comp.work_queue_wait_group.wait(); if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.docs_emit != null) { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerDocsCopy, .{comp}); - comp.work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, wasm_prog_node }); + comp.work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, main_progress_node }); } } @@ -3280,6 +3268,9 @@ pub fn performAllTheWork( const astgen_frame = tracy.namedFrame("astgen"); defer astgen_frame.end(); + const zir_prog_node = main_progress_node.start("AST Lowering", 0); + defer zir_prog_node.end(); + comp.astgen_wait_group.reset(); defer comp.astgen_wait_group.wait(); @@ -3323,14 +3314,14 @@ pub fn performAllTheWork( while (comp.c_object_work_queue.readItem()) |c_object| { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerUpdateCObject, .{ - comp, c_object, c_obj_prog_node, + comp, c_object, main_progress_node, }); } if (!build_options.only_core_functionality) { while (comp.win32_resource_work_queue.readItem()) |win32_resource| { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerUpdateWin32Resource, .{ - comp, win32_resource, win32_resource_prog_node, + comp, win32_resource, main_progress_node, }); } } @@ -3800,7 +3791,10 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8, } } -fn workerDocsWasm(comp: *Compilation, prog_node: std.Progress.Node) void { +fn workerDocsWasm(comp: *Compilation, parent_prog_node: std.Progress.Node) void { + const prog_node = parent_prog_node.start("Compile Autodocs", 0); + defer prog_node.end(); + workerDocsWasmFallible(comp, prog_node) catch |err| { comp.lockAndSetMiscFailure(.docs_wasm, "unable to build autodocs: {s}", .{ @errorName(err), From 2233d95b0f5edce5b2e0105ef78847fabe86f4d6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:35:33 -0700 Subject: [PATCH 18/60] main: avoid creating multiple std.Progress instances --- src/main.zig | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/src/main.zig b/src/main.zig index 20f9bfba54a7..3369fa4c21d4 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3408,7 +3408,11 @@ fn buildOutputType( return cmdTranslateC(comp, arena, null); } - updateModule(comp, color) catch |err| switch (err) { + const root_prog_node = std.Progress.start(.{ + .disable_printing = (color == .off), + }); + + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => { assert(listen == .none); saveState(comp, debug_incremental); @@ -4381,16 +4385,8 @@ fn runOrTestHotSwap( } } -fn updateModule(comp: *Compilation, color: Color) !void { - { - // If the terminal is dumb, we dont want to show the user all the output. - const main_progress_node = std.Progress.start(.{ - .disable_printing = color == .off, - }); - defer main_progress_node.end(); - - try comp.update(main_progress_node); - } +fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) !void { + try comp.update(prog_node); var errors = try comp.getAllErrorsAlloc(); defer errors.deinit(comp.gpa); @@ -4797,6 +4793,10 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const work_around_btrfs_bug = native_os == .linux and EnvVar.ZIG_BTRFS_WORKAROUND.isSet(); const color: Color = .auto; + const root_prog_node = std.Progress.start(.{ + .disable_printing = (color == .off), + }); + defer root_prog_node.end(); const target_query: std.Target.Query = .{}; const resolved_target: Package.Module.ResolvedTarget = .{ @@ -4952,10 +4952,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { config, ); } else { - const root_prog_node = std.Progress.start(.{ - .root_name = "Fetch Packages", - }); - defer root_prog_node.end(); + const fetch_prog_node = root_prog_node.start("Fetch Packages", 0); + defer fetch_prog_node.end(); var job_queue: Package.Fetch.JobQueue = .{ .http_client = &http_client, @@ -4996,7 +4994,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { .lazy_status = .eager, .parent_package_root = build_mod.root, .parent_manifest_ast = null, - .prog_node = root_prog_node, + .prog_node = fetch_prog_node, .job_queue = &job_queue, .omit_missing_hash_error = true, .allow_missing_paths_field = false, @@ -5135,7 +5133,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { }; defer comp.destroy(); - updateModule(comp, color) catch |err| switch (err) { + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => process.exit(2), else => |e| return e, }; @@ -5229,7 +5227,7 @@ const JitCmdOptions = struct { prepend_zig_exe_path: bool = false, depend_on_aro: bool = false, capture: ?*[]u8 = null, - /// Send progress and error bundles via std.zig.Server over stdout + /// Send error bundles via std.zig.Server over stdout server: bool = false, }; @@ -5240,6 +5238,9 @@ fn jitCmd( options: JitCmdOptions, ) !void { const color: Color = .auto; + const root_prog_node = std.Progress.start(.{ + .disable_printing = (color == .off), + }); const target_query: std.Target.Query = .{}; const resolved_target: Package.Module.ResolvedTarget = .{ @@ -5377,14 +5378,13 @@ fn jitCmd( defer comp.destroy(); if (options.server) { - const main_progress_node = std.Progress.start(.{}); var server = std.zig.Server{ .out = std.io.getStdOut(), .in = undefined, // won't be receiving messages .receive_fifo = undefined, // won't be receiving messages }; - try comp.update(main_progress_node); + try comp.update(root_prog_node); var error_bundle = try comp.getAllErrorsAlloc(); defer error_bundle.deinit(comp.gpa); @@ -5393,7 +5393,7 @@ fn jitCmd( process.exit(2); } } else { - updateModule(comp, color) catch |err| switch (err) { + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => process.exit(2), else => |e| return e, }; From 70e39c1a20842a2e1579d5ff76845cd1121a907b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:39:07 -0700 Subject: [PATCH 19/60] std.Progress: fixes * bump default statically allocated resources * debug help when multiple instances of std.Progress are initialized * only handle sigwinch on supported operating systems * handle when reading from the pipe returns 0 bytes * avoid printing more lines than rows --- lib/std/Progress.zig | 65 +++++++++++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 16 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index c5c1d17b9328..69e0a9ecf706 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -293,12 +293,14 @@ var global_progress: Progress = .{ .node_end_index = 0, }; -const default_node_storage_buffer_len = 100; +const default_node_storage_buffer_len = 200; var node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; var node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; var node_freelist_buffer: [default_node_storage_buffer_len]Node.OptionalIndex = undefined; -var default_draw_buffer: [2000]u8 = undefined; +var default_draw_buffer: [4096]u8 = undefined; + +var debug_start_trace = std.debug.Trace.init; /// Initializes a global Progress instance. /// @@ -307,7 +309,11 @@ var default_draw_buffer: [2000]u8 = undefined; /// Call `Node.end` when done. pub fn start(options: Options) Node { // Ensure there is only 1 global Progress object. - assert(global_progress.node_end_index == 0); + if (global_progress.node_end_index != 0) { + debug_start_trace.dump(); + unreachable; + } + debug_start_trace.add("first initialized here"); @memset(global_progress.node_parents, .unused); const root_node = Node.init(@enumFromInt(0), .none, options.root_name, options.estimated_total_items); @@ -347,14 +353,16 @@ pub fn start(options: Options) Node { return .{ .index = .none }; } - var act: posix.Sigaction = .{ - .handler = .{ .sigaction = handleSigWinch }, - .mask = posix.empty_sigset, - .flags = (posix.SA.SIGINFO | posix.SA.RESTART), - }; - posix.sigaction(posix.SIG.WINCH, &act, null) catch |err| { - std.log.warn("failed to install SIGWINCH signal handler for noticing terminal resizes: {s}", .{@errorName(err)}); - }; + if (have_sigwinch) { + var act: posix.Sigaction = .{ + .handler = .{ .sigaction = handleSigWinch }, + .mask = posix.empty_sigset, + .flags = (posix.SA.SIGINFO | posix.SA.RESTART), + }; + posix.sigaction(posix.SIG.WINCH, &act, null) catch |err| { + std.log.warn("failed to install SIGWINCH signal handler for noticing terminal resizes: {s}", .{@errorName(err)}); + }; + } if (std.Thread.spawn(.{}, updateThreadRun, .{})) |thread| { global_progress.update_thread = thread; @@ -595,7 +603,7 @@ fn serializeIpc(start_serialized_len: usize) usize { const fd = main_storage.getIpcFd() orelse continue; var bytes_read: usize = 0; while (true) { - bytes_read += posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) { + const n = posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) { error.WouldBlock => break, else => |e| { std.log.warn("failed to read child progress data: {s}", .{@errorName(e)}); @@ -604,6 +612,8 @@ fn serializeIpc(start_serialized_len: usize) usize { continue :main_loop; }, }; + if (n == 0) break; + bytes_read += n; } // Ignore all but the last message on the pipe. var input: []align(2) u8 = pipe_buf[0..bytes_read]; @@ -831,12 +841,16 @@ fn computeNode( i += 1; global_progress.newline_count += 1; - if (children[@intFromEnum(node_index)].child.unwrap()) |child| { - i = computeNode(buf, i, serialized, children, child); + if (global_progress.newline_count < global_progress.rows) { + if (children[@intFromEnum(node_index)].child.unwrap()) |child| { + i = computeNode(buf, i, serialized, children, child); + } } - if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| { - i = computeNode(buf, i, serialized, children, sibling); + if (global_progress.newline_count < global_progress.rows) { + if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| { + i = computeNode(buf, i, serialized, children, sibling); + } } return i; @@ -910,4 +924,23 @@ fn handleSigWinch(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) global_progress.redraw_event.set(); } +const have_sigwinch = switch (builtin.os.tag) { + .linux, + .plan9, + .solaris, + .netbsd, + .openbsd, + .haiku, + .macos, + .ios, + .watchos, + .tvos, + .visionos, + .dragonfly, + .freebsd, + => true, + + else => false, +}; + var stderr_mutex: std.Thread.Mutex = .{}; From 352dc2c06a470d88c33dfbfdf3fdbb093097775c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 10:51:14 -0700 Subject: [PATCH 20/60] compiler: show decl name in progress node --- src/Module.zig | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 55b08701dc26..a31111897053 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2942,11 +2942,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const ip = &mod.intern_pool; const decl = mod.declPtr(decl_index); log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{ @intFromEnum(decl_index), - decl.name.fmt(&mod.intern_pool), + decl.name.fmt(ip), }); // Determine whether or not this Decl is outdated, i.e. requires re-analysis @@ -2991,9 +2992,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); } - const decl_prog_node = mod.sema_prog_node.start("", 0); - defer decl_prog_node.end(); - const sema_result: SemaDeclResult = blk: { if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) { // Anonymous decl. We don't semantically analyze these. @@ -3011,6 +3009,9 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { }; } + const decl_prog_node = mod.sema_prog_node.start(decl.name.toSlice(ip), 0); + defer decl_prog_node.end(); + break :blk mod.semaDecl(decl_index) catch |err| switch (err) { error.AnalysisFail => { if (decl.analysis == .in_progress) { From 275917345237b79761a0692dbc7d47080cd4c8b5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 11:58:57 -0700 Subject: [PATCH 21/60] zig build: lock stderr while executing the build runner --- src/main.zig | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/main.zig b/src/main.zig index 3369fa4c21d4..ac8a483d1732 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5151,7 +5151,12 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - const term = try child.spawnAndWait(); + const term = t: { + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + break :t try child.spawnAndWait(); + }; + switch (term) { .Exited => |code| { if (code == 0) return cleanExit(); From 516366f78f00d7627b772c0fc8a41ba7747a4f61 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 11:59:21 -0700 Subject: [PATCH 22/60] std.Progress: skip printing root node when it is empty --- lib/std/Progress.zig | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 69e0a9ecf706..36e2c9d40217 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -592,7 +592,7 @@ const SavedMetadata = extern struct { fn serializeIpc(start_serialized_len: usize) usize { var serialized_len = start_serialized_len; - var pipe_buf: [4096]u8 align(4) = undefined; + var pipe_buf: [2 * 4096]u8 align(4) = undefined; main_loop: for ( serialized_node_parents_buffer[0..serialized_len], @@ -836,10 +836,13 @@ fn computeNode( } } - i = @min(global_progress.cols + start_i, i); - buf[i] = '\n'; - i += 1; - global_progress.newline_count += 1; + const is_empty_root = @intFromEnum(node_index) == 0 and serialized.storage[0].name[0] == 0; + if (!is_empty_root) { + i = @min(global_progress.cols + start_i, i); + buf[i] = '\n'; + i += 1; + global_progress.newline_count += 1; + } if (global_progress.newline_count < global_progress.rows) { if (children[@intFromEnum(node_index)].child.unwrap()) |child| { From ca03c9c512f09baff0ea6e44ec665ec6310526c7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 15:03:20 -0700 Subject: [PATCH 23/60] std.Progress: fix race condition with IPC nodes It stored some metadata into the canonical node storage data but that is a race condition because another thread recycles those nodes. Also, keep the parent name for empty child root node names. --- lib/std/Progress.zig | 89 ++++++++++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 40 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 36e2c9d40217..216460e6b7a3 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -86,17 +86,7 @@ pub const Node = struct { name: [max_name_len]u8, fn getIpcFd(s: Storage) ?posix.fd_t { - if (s.estimated_total_count != std.math.maxInt(u32)) - return null; - - const low: u16 = @truncate(s.completed_count); - return low; - } - - fn getMainStorageIndex(s: Storage) Node.Index { - assert(s.estimated_total_count == std.math.maxInt(u32)); - const i: u16 = @truncate(s.completed_count >> 16); - return @enumFromInt(i); + return if (s.estimated_total_count != std.math.maxInt(u32)) null else @bitCast(s.completed_count); } fn setIpcFd(s: *Storage, fd: posix.fd_t) void { @@ -538,14 +528,9 @@ fn serialize() Serialized { @memcpy(&dest_storage.name, &storage_ptr.name); dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .monotonic); - - if (dest_storage.getIpcFd() != null) { - any_ipc = true; - dest_storage.completed_count |= @as(u32, @intCast(i)) << 16; - } - const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); if (begin_parent == end_parent) { + any_ipc = any_ipc or (dest_storage.getIpcFd() != null); serialized_node_parents_buffer[serialized_len] = begin_parent; serialized_node_map_buffer[i] = @enumFromInt(serialized_len); serialized_len += 1; @@ -577,23 +562,25 @@ fn serialize() Serialized { var parents_copy: [default_node_storage_buffer_len]Node.Parent = undefined; var storage_copy: [default_node_storage_buffer_len]Node.Storage = undefined; +var ipc_metadata_copy: [default_node_storage_buffer_len]SavedMetadata = undefined; -const SavedMetadata = extern struct { +var ipc_metadata: [default_node_storage_buffer_len]SavedMetadata = undefined; +var ipc_metadata_len: u16 = 0; + +const SavedMetadata = struct { + ipc_fd: u16, + main_index: u16, start_index: u16, nodes_len: u16, - main_index: u16, - flags: Flags, - - const Flags = enum(u16) { - saved = std.math.maxInt(u16), - _, - }; }; fn serializeIpc(start_serialized_len: usize) usize { var serialized_len = start_serialized_len; var pipe_buf: [2 * 4096]u8 align(4) = undefined; + const old_ipc_metadata = ipc_metadata_copy[0..ipc_metadata_len]; + ipc_metadata_len = 0; + main_loop: for ( serialized_node_parents_buffer[0..serialized_len], serialized_node_storage_buffer[0..serialized_len], @@ -618,7 +605,7 @@ fn serializeIpc(start_serialized_len: usize) usize { // Ignore all but the last message on the pipe. var input: []align(2) u8 = pipe_buf[0..bytes_read]; if (input.len == 0) { - serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); + serialized_len = useSavedIpcData(serialized_len, main_storage, main_index, old_ipc_metadata); continue; } @@ -626,7 +613,7 @@ fn serializeIpc(start_serialized_len: usize) usize { if (input.len < 4) { std.log.warn("short read: {d} out of 4 header bytes", .{input.len}); // TODO keep track of the short read to trash odd bytes with the next read - serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); + serialized_len = useSavedIpcData(serialized_len, main_storage, main_index, old_ipc_metadata); continue :main_loop; } const subtree_len = std.mem.readInt(u32, input[0..4], .little); @@ -634,7 +621,7 @@ fn serializeIpc(start_serialized_len: usize) usize { if (input.len < expected_bytes) { std.log.warn("short read: {d} out of {d} ({d} nodes)", .{ input.len, expected_bytes, subtree_len }); // TODO keep track of the short read to trash odd bytes with the next read - serialized_len = useSavedIpcData(serialized_len, main_storage, main_index); + serialized_len = useSavedIpcData(serialized_len, main_storage, main_index, old_ipc_metadata); continue :main_loop; } if (input.len > expected_bytes) { @@ -650,16 +637,16 @@ fn serializeIpc(start_serialized_len: usize) usize { }; // Remember in case the pipe is empty on next update. - const real_storage: *Node.Storage = Node.storageByIndex(main_storage.getMainStorageIndex()); - @as(*SavedMetadata, @ptrCast(&real_storage.name)).* = .{ + ipc_metadata[ipc_metadata_len] = .{ + .ipc_fd = @intCast(fd), .start_index = @intCast(serialized_len), .nodes_len = @intCast(parents.len), .main_index = @intCast(main_index), - .flags = .saved, }; + ipc_metadata_len += 1; // Mount the root here. - main_storage.* = storage[0]; + copyRoot(main_storage, &storage[0]); // Copy the rest of the tree to the end. @memcpy(serialized_node_storage_buffer[serialized_len..][0 .. storage.len - 1], storage[1..]); @@ -685,34 +672,56 @@ fn serializeIpc(start_serialized_len: usize) usize { // Save a copy in case any pipes are empty on the next update. @memcpy(parents_copy[0..serialized_len], serialized_node_parents_buffer[0..serialized_len]); @memcpy(storage_copy[0..serialized_len], serialized_node_storage_buffer[0..serialized_len]); + @memcpy(ipc_metadata_copy[0..ipc_metadata_len], ipc_metadata[0..ipc_metadata_len]); return serialized_len; } -fn useSavedIpcData(start_serialized_len: usize, main_storage: *Node.Storage, main_index: usize) usize { - const saved_metadata: *SavedMetadata = @ptrCast(&main_storage.name); - if (saved_metadata.flags != .saved) { +fn copyRoot(dest: *Node.Storage, src: *align(2) Node.Storage) void { + dest.* = .{ + .completed_count = src.completed_count, + .estimated_total_count = src.estimated_total_count, + .name = if (src.name[0] == 0) dest.name else src.name, + }; +} + +fn findOld(ipc_fd: posix.fd_t, old_metadata: []const SavedMetadata) ?*const SavedMetadata { + for (old_metadata) |*m| { + if (m.ipc_fd == ipc_fd) + return m; + } + return null; +} + +fn useSavedIpcData( + start_serialized_len: usize, + main_storage: *Node.Storage, + main_index: usize, + old_metadata: []const SavedMetadata, +) usize { + const ipc_fd = main_storage.getIpcFd().?; + const saved_metadata = findOld(ipc_fd, old_metadata) orelse { main_storage.completed_count = 0; main_storage.estimated_total_count = 0; return start_serialized_len; - } + }; const start_index = saved_metadata.start_index; const nodes_len = saved_metadata.nodes_len; const old_main_index = saved_metadata.main_index; - const real_storage: *Node.Storage = Node.storageByIndex(main_storage.getMainStorageIndex()); - @as(*SavedMetadata, @ptrCast(&real_storage.name)).* = .{ + ipc_metadata[ipc_metadata_len] = .{ + .ipc_fd = @intCast(ipc_fd), .start_index = @intCast(start_serialized_len), .nodes_len = nodes_len, .main_index = @intCast(main_index), - .flags = .saved, }; + ipc_metadata_len += 1; const parents = parents_copy[start_index..][0 .. nodes_len - 1]; const storage = storage_copy[start_index..][0 .. nodes_len - 1]; - main_storage.* = storage_copy[old_main_index]; + copyRoot(main_storage, &storage_copy[old_main_index]); @memcpy(serialized_node_storage_buffer[start_serialized_len..][0..storage.len], storage); From 9331da8fe3854c57446574a1afc75e6e0811f610 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 May 2024 15:14:30 -0700 Subject: [PATCH 24/60] std.Build.Step.Run: don't create empty progress node --- lib/std/Build/Step/Run.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index d49d0b3ce2a8..1e7708120fb7 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1236,7 +1236,7 @@ fn spawnChildAndCollect( } if (run.stdio != .zig_test) { - child.progress_node = prog_node.start("", 0); + child.progress_node = prog_node; } try child.spawn(); From acdf988c24c51129ed3ad9929b6ed8482b9abe54 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 26 May 2024 01:50:54 +0100 Subject: [PATCH 25/60] std.process.Child: prevent racing children from inheriting progress pipes This fix is already in master branch for stdin, stdout, and stderr; this commit solves the same problem but for the progress pipe. Both fixes were originally included in one commit on this branch, however it was split it into two so that master branch could receive the fix before the progress branch is merged. --- lib/std/process/Child.zig | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index c1f935a852f0..1be56fc4892d 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -587,8 +587,8 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { if (self.progress_node.index == .none) { break :p .{ -1, -1 }; } else { - // No CLOEXEC because the child needs access to this file descriptor. - break :p try posix.pipe2(.{ .NONBLOCK = true }); + // We use CLOEXEC for the same reason as in `pipe_flags`. + break :p try posix.pipe2(.{ .NONBLOCK = true, .CLOEXEC = true }); } }; errdefer destroyPipe(prog_pipe); @@ -655,11 +655,6 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); - if (prog_pipe[1] != -1) { - if (prog_pipe[0] != prog_fileno) posix.close(prog_pipe[0]); - if (prog_pipe[1] != prog_fileno) posix.close(prog_pipe[1]); - } - if (self.cwd_dir) |cwd| { posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); } else if (self.cwd) |cwd| { From d403d8cb7a147856232430afe9af8562d59de38b Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 26 May 2024 01:58:26 +0100 Subject: [PATCH 26/60] Module: fix and improve progress reporting * correctly report time spent analyzing function bodies * print fully qualified decl names * also have a progress node for codegen The downside of these changes is that it's a bit flickerey, but the upside is that it's accurate; you can see what the compiler's doing! --- src/Compilation.zig | 3 +++ src/Module.zig | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 1ec7664e8efa..f7ea2434b369 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3331,10 +3331,13 @@ pub fn performAllTheWork( try reportMultiModuleErrors(mod); try mod.flushRetryableFailures(); mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); + mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); } defer if (comp.module) |mod| { mod.sema_prog_node.end(); mod.sema_prog_node = undefined; + mod.codegen_prog_node.end(); + mod.codegen_prog_node = undefined; }; while (true) { diff --git a/src/Module.zig b/src/Module.zig index a31111897053..d38a233a774f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -66,6 +66,7 @@ root_mod: *Package.Module, main_mod: *Package.Module, std_mod: *Package.Module, sema_prog_node: std.Progress.Node = undefined, +codegen_prog_node: std.Progress.Node = undefined, /// Used by AstGen worker to load and store ZIR cache. global_zir_cache: Compilation.Directory, @@ -3009,7 +3010,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { }; } - const decl_prog_node = mod.sema_prog_node.start(decl.name.toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); defer decl_prog_node.end(); break :blk mod.semaDecl(decl_index) catch |err| switch (err) { @@ -3215,6 +3216,9 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }; } + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); + defer codegen_prog_node.end(); + if (comp.bin_file) |lf| { lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -4500,6 +4504,9 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); } + const decl_prog_node = mod.sema_prog_ndoe.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + defer decl_prog_node.end(); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index })); var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa); @@ -5333,9 +5340,12 @@ pub fn populateTestFunctions( // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); + mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); defer { mod.sema_prog_node.end(); mod.sema_prog_node = undefined; + mod.codegen_prog_node.end(); + mod.codegen_prog_node = undefined; } try mod.ensureDeclAnalyzed(decl_index); } @@ -5445,15 +5455,18 @@ pub fn populateTestFunctions( pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { const comp = zcu.comp; + const decl = zcu.declPtr(decl_index); + + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); + defer codegen_prog_node.end(); + if (comp.bin_file) |lf| { lf.updateDecl(zcu, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - const decl = zcu.declPtr(decl_index); decl.analysis = .codegen_failure; }, else => { - const decl = zcu.declPtr(decl_index); const gpa = zcu.gpa; try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( @@ -5471,7 +5484,6 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { llvm_object.updateDecl(zcu, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - const decl = zcu.declPtr(decl_index); decl.analysis = .codegen_failure; }, }; From d77f5e7aaa94b66db4e3604f21c41b315743fb81 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 26 May 2024 07:07:44 -0400 Subject: [PATCH 27/60] Progress: fix compile errors on windows Works for `zig build-exe`, IPC still not implemented yet. --- lib/std/Progress.zig | 79 ++++++++++++++++++++++++++++++++------------ lib/std/fmt.zig | 59 +++++++++++++++++++-------------- lib/std/io/tty.zig | 2 +- lib/std/process.zig | 5 +-- src/Module.zig | 2 +- 5 files changed, 96 insertions(+), 51 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 216460e6b7a3..af688994865b 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -86,12 +86,20 @@ pub const Node = struct { name: [max_name_len]u8, fn getIpcFd(s: Storage) ?posix.fd_t { - return if (s.estimated_total_count != std.math.maxInt(u32)) null else @bitCast(s.completed_count); + return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) { + .Int => @bitCast(s.completed_count), + .Pointer => @ptrFromInt(s.completed_count), + else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), + } else null; } fn setIpcFd(s: *Storage, fd: posix.fd_t) void { s.estimated_total_count = std.math.maxInt(u32); - s.completed_count = @bitCast(fd); + s.completed_count = switch (@typeInfo(posix.fd_t)) { + .Int => @bitCast(fd), + .Pointer => @intFromPtr(fd), + else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), + }; } comptime { @@ -316,12 +324,16 @@ pub fn start(options: Options) Node { global_progress.initial_delay_ns = options.initial_delay_ns; if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| { - if (std.Thread.spawn(.{}, ipcThreadRun, .{ipc_fd})) |thread| { - global_progress.update_thread = thread; - } else |err| { + global_progress.update_thread = std.Thread.spawn(.{}, ipcThreadRun, .{ + @as(posix.fd_t, switch (@typeInfo(posix.fd_t)) { + .Int => ipc_fd, + .Pointer => @ptrFromInt(ipc_fd), + else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), + }), + }) catch |err| { std.log.warn("failed to spawn IPC thread for communicating progress to parent: {s}", .{@errorName(err)}); return .{ .index = .none }; - } + }; } else |env_err| switch (env_err) { error.EnvironmentVariableNotFound => { if (options.disable_printing) { @@ -572,6 +584,20 @@ const SavedMetadata = struct { main_index: u16, start_index: u16, nodes_len: u16, + + fn getIpcFd(metadata: SavedMetadata) posix.fd_t { + return if (builtin.os.tag == .windows) + @ptrFromInt(@as(usize, metadata.ipc_fd) << 2) + else + metadata.ipc_fd; + } + + fn setIpcFd(fd: posix.fd_t) u16 { + return @intCast(if (builtin.os.tag == .windows) + @shrExact(@intFromPtr(fd), 2) + else + fd); + } }; fn serializeIpc(start_serialized_len: usize) usize { @@ -638,7 +664,7 @@ fn serializeIpc(start_serialized_len: usize) usize { // Remember in case the pipe is empty on next update. ipc_metadata[ipc_metadata_len] = .{ - .ipc_fd = @intCast(fd), + .ipc_fd = SavedMetadata.setIpcFd(fd), .start_index = @intCast(serialized_len), .nodes_len = @intCast(parents.len), .main_index = @intCast(main_index), @@ -687,7 +713,7 @@ fn copyRoot(dest: *Node.Storage, src: *align(2) Node.Storage) void { fn findOld(ipc_fd: posix.fd_t, old_metadata: []const SavedMetadata) ?*const SavedMetadata { for (old_metadata) |*m| { - if (m.ipc_fd == ipc_fd) + if (m.getIpcFd() == ipc_fd) return m; } return null; @@ -711,7 +737,7 @@ fn useSavedIpcData( const old_main_index = saved_metadata.main_index; ipc_metadata[ipc_metadata_len] = .{ - .ipc_fd = @intCast(ipc_fd), + .ipc_fd = SavedMetadata.setIpcFd(ipc_fd), .start_index = @intCast(start_serialized_len), .nodes_len = nodes_len, .main_index = @intCast(main_index), @@ -911,21 +937,32 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { fn maybeUpdateSize(resize_flag: bool) void { if (!resize_flag) return; - var winsize: posix.winsize = .{ - .ws_row = 0, - .ws_col = 0, - .ws_xpixel = 0, - .ws_ypixel = 0, - }; - const fd = (global_progress.terminal orelse return).handle; - const err = posix.system.ioctl(fd, posix.T.IOCGWINSZ, @intFromPtr(&winsize)); - if (posix.errno(err) == .SUCCESS) { - global_progress.rows = winsize.ws_row; - global_progress.cols = winsize.ws_col; + if (builtin.os.tag == .windows) { + var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; + + if (windows.kernel32.GetConsoleScreenBufferInfo(fd, &info) == windows.FALSE) { + @panic("TODO: handle this failure"); + } + + global_progress.rows = @intCast(info.dwSize.Y); + global_progress.cols = @intCast(info.dwSize.X); } else { - @panic("TODO: handle this failure"); + var winsize: posix.winsize = .{ + .ws_row = 0, + .ws_col = 0, + .ws_xpixel = 0, + .ws_ypixel = 0, + }; + + const err = posix.system.ioctl(fd, posix.T.IOCGWINSZ, @intFromPtr(&winsize)); + if (posix.errno(err) == .SUCCESS) { + global_progress.rows = winsize.ws_row; + global_progress.cols = winsize.ws_col; + } else { + @panic("TODO: handle this failure"); + } } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 2d1f4402d647..1a7475e1c598 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -9,7 +9,7 @@ const assert = std.debug.assert; const mem = std.mem; const unicode = std.unicode; const meta = std.meta; -const lossyCast = std.math.lossyCast; +const lossyCast = math.lossyCast; const expectFmt = std.testing.expectFmt; pub const default_max_depth = 3; @@ -1494,10 +1494,20 @@ pub fn Formatter(comptime format_fn: anytype) type { /// Ignores '_' character in `buf`. /// See also `parseUnsigned`. pub fn parseInt(comptime T: type, buf: []const u8, base: u8) ParseIntError!T { + return parseIntWithGenericCharacter(T, u8, buf, base); +} + +/// Like `parseInt`, but with a generic `Character` type. +pub fn parseIntWithGenericCharacter( + comptime Result: type, + comptime Character: type, + buf: []const Character, + base: u8, +) ParseIntError!Result { if (buf.len == 0) return error.InvalidCharacter; - if (buf[0] == '+') return parseWithSign(T, buf[1..], base, .pos); - if (buf[0] == '-') return parseWithSign(T, buf[1..], base, .neg); - return parseWithSign(T, buf, base, .pos); + if (buf[0] == '+') return parseIntWithSign(Result, Character, buf[1..], base, .pos); + if (buf[0] == '-') return parseIntWithSign(Result, Character, buf[1..], base, .neg); + return parseIntWithSign(Result, Character, buf, base, .pos); } test parseInt { @@ -1560,12 +1570,13 @@ test parseInt { try std.testing.expectEqual(@as(i5, -16), try std.fmt.parseInt(i5, "-10", 16)); } -fn parseWithSign( - comptime T: type, - buf: []const u8, +fn parseIntWithSign( + comptime Result: type, + comptime Character: type, + buf: []const Character, base: u8, comptime sign: enum { pos, neg }, -) ParseIntError!T { +) ParseIntError!Result { if (buf.len == 0) return error.InvalidCharacter; var buf_base = base; @@ -1575,7 +1586,7 @@ fn parseWithSign( buf_base = 10; // Detect the base by looking at buf prefix. if (buf.len > 2 and buf[0] == '0') { - switch (std.ascii.toLower(buf[1])) { + if (math.cast(u8, buf[1])) |c| switch (std.ascii.toLower(c)) { 'b' => { buf_base = 2; buf_start = buf[2..]; @@ -1589,7 +1600,7 @@ fn parseWithSign( buf_start = buf[2..]; }, else => {}, - } + }; } } @@ -1598,33 +1609,33 @@ fn parseWithSign( .neg => math.sub, }; - // accumulate into U which is always 8 bits or larger. this prevents - // `buf_base` from overflowing T. - const info = @typeInfo(T); - const U = std.meta.Int(info.Int.signedness, @max(8, info.Int.bits)); - var x: U = 0; + // accumulate into Accumulate which is always 8 bits or larger. this prevents + // `buf_base` from overflowing Result. + const info = @typeInfo(Result); + const Accumulate = std.meta.Int(info.Int.signedness, @max(8, info.Int.bits)); + var accumulate: Accumulate = 0; if (buf_start[0] == '_' or buf_start[buf_start.len - 1] == '_') return error.InvalidCharacter; for (buf_start) |c| { if (c == '_') continue; - const digit = try charToDigit(c, buf_base); - if (x != 0) { - x = try math.mul(U, x, math.cast(U, buf_base) orelse return error.Overflow); + const digit = try charToDigit(math.cast(u8, c) orelse return error.InvalidCharacter, buf_base); + if (accumulate != 0) { + accumulate = try math.mul(Accumulate, accumulate, math.cast(Accumulate, buf_base) orelse return error.Overflow); } else if (sign == .neg) { // The first digit of a negative number. // Consider parsing "-4" as an i3. // This should work, but positive 4 overflows i3, so we can't cast the digit to T and subtract. - x = math.cast(U, -@as(i8, @intCast(digit))) orelse return error.Overflow; + accumulate = math.cast(Accumulate, -@as(i8, @intCast(digit))) orelse return error.Overflow; continue; } - x = try add(U, x, math.cast(U, digit) orelse return error.Overflow); + accumulate = try add(Accumulate, accumulate, math.cast(Accumulate, digit) orelse return error.Overflow); } - return if (T == U) - x + return if (Result == Accumulate) + accumulate else - math.cast(T, x) orelse return error.Overflow; + math.cast(Result, accumulate) orelse return error.Overflow; } /// Parses the string `buf` as unsigned representation in the specified base @@ -1639,7 +1650,7 @@ fn parseWithSign( /// Ignores '_' character in `buf`. /// See also `parseInt`. pub fn parseUnsigned(comptime T: type, buf: []const u8, base: u8) ParseIntError!T { - return parseWithSign(T, buf, base, .pos); + return parseIntWithSign(T, u8, buf, base, .pos); } test parseUnsigned { diff --git a/lib/std/io/tty.zig b/lib/std/io/tty.zig index baf54a1fdf86..cdeaba81c51e 100644 --- a/lib/std/io/tty.zig +++ b/lib/std/io/tty.zig @@ -24,7 +24,7 @@ pub fn detectConfig(file: File) Config { if (native_os == .windows and file.isTty()) { var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { + if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) == windows.FALSE) { return if (force_color == true) .escape_codes else .no_color; } return .{ .windows_api = .{ diff --git a/lib/std/process.zig b/lib/std/process.zig index e9de2cf51791..7d1f817337c3 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -442,10 +442,7 @@ pub fn parseEnvVarInt(comptime key: []const u8, comptime I: type, base: u8) Pars if (native_os == .windows) { const key_w = comptime std.unicode.utf8ToUtf16LeStringLiteral(key); const text = getenvW(key_w) orelse return error.EnvironmentVariableNotFound; - // For this implementation perhaps std.fmt.parseInt can be expanded to be generic across - // []u8 and []u16 like how many std.mem functions work. - _ = text; - @compileError("TODO implement this"); + return std.fmt.parseIntWithGenericCharacter(I, u16, text, base); } else if (native_os == .wasi and !builtin.link_libc) { @compileError("parseEnvVarInt is not supported for WASI without libc"); } else { diff --git a/src/Module.zig b/src/Module.zig index d38a233a774f..bfd79e611752 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4504,7 +4504,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); } - const decl_prog_node = mod.sema_prog_ndoe.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); defer decl_prog_node.end(); mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index })); From e8907f9e9ccf4baf661d1b0ab45157a9940e70e3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 09:24:30 -0700 Subject: [PATCH 28/60] std.Progress: correct the top level doc comments --- lib/std/Progress.zig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index af688994865b..e85d2f1507e0 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -1,7 +1,4 @@ -//! This API is non-allocating, non-fallible, and thread-safe. -//! -//! The tradeoff is that users of this API must provide the storage -//! for each `Progress.Node`. +//! This API is non-allocating, non-fallible, thread-safe, and lock-free. const std = @import("std"); const builtin = @import("builtin"); From e2e61f329682c88e0a487e049245d865d2df9239 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 11:10:45 -0700 Subject: [PATCH 29/60] std.process.Child: comptime assert to protect prog_fileno documenting my assumptions via comptime assertion --- lib/std/process/Child.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 1be56fc4892d..a31afdc66d5e 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -610,6 +610,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const prog_fileno = 3; + comptime assert(@max(posix.STDIN_FILENO, posix.STDOUT_FILENO, posix.STDERR_FILENO) + 1 == prog_fileno); const envp: [*:null]const ?[*:0]const u8 = m: { const prog_fd: i32 = if (prog_pipe[1] == -1) -1 else prog_fileno; From e8201734677ffcbe45dac12f2439f2de8e38710d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 12:07:49 -0700 Subject: [PATCH 30/60] Compilation: fix sub-compilations given wrong progress node --- src/Compilation.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index f7ea2434b369..507cbfc6d55a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5968,7 +5968,7 @@ pub fn updateSubCompilation( const sub_node = prog_node.start(@tagName(misc_task), 0); defer sub_node.end(); - try sub_comp.update(prog_node); + try sub_comp.update(sub_node); } // Look for compilation errors in this sub compilation From eb718ceffa0fd656a169a1dd2794e6e99bf5bcaa Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 12:08:15 -0700 Subject: [PATCH 31/60] std.process: fix compilation on 32-bit targets --- lib/std/process.zig | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index 7d1f817337c3..5fb36f991d3a 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1836,11 +1836,15 @@ pub fn createEnvironFromMap( break :a .nothing; }; - const envp_count: usize = @intCast(@as(isize, map.count()) + @as(isize, switch (zig_progress_action) { - .add => 1, - .delete => -1, - .nothing, .edit => 0, - })); + const envp_count: usize = c: { + var count: usize = map.count(); + switch (zig_progress_action) { + .add => count += 1, + .delete => count -= 1, + .nothing, .edit => {}, + } + break :c count; + }; const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); var i: usize = 0; @@ -1901,11 +1905,15 @@ pub fn createEnvironFromExisting( break :a .nothing; }; - const envp_count: usize = @intCast(@as(isize, @intCast(existing_count)) + @as(isize, switch (zig_progress_action) { - .add => 1, - .delete => -1, - .nothing, .edit => 0, - })); + const envp_count: usize = c: { + var count: usize = existing_count; + switch (zig_progress_action) { + .add => count += 1, + .delete => count -= 1, + .nothing, .edit => {}, + } + break :c count; + }; const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); var i: usize = 0; From d3b03ed64b9c3a4a963e6e5bef536c61c0e3e3c0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 12:20:50 -0700 Subject: [PATCH 32/60] frontend: fix use of undefined progress node This was causing a crash when running `zig test`. --- src/Module.zig | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index bfd79e611752..ef410fad4e33 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5340,15 +5340,13 @@ pub fn populateTestFunctions( // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); defer { mod.sema_prog_node.end(); mod.sema_prog_node = undefined; - mod.codegen_prog_node.end(); - mod.codegen_prog_node = undefined; } try mod.ensureDeclAnalyzed(decl_index); } + const decl = mod.declPtr(decl_index); const test_fn_ty = decl.typeOf(mod).slicePtrFieldType(mod).childType(mod); @@ -5449,7 +5447,15 @@ pub fn populateTestFunctions( decl.val = new_val; decl.has_tv = true; } - try mod.linkerUpdateDecl(decl_index); + { + mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); + defer { + mod.codegen_prog_node.end(); + mod.codegen_prog_node = undefined; + } + + try mod.linkerUpdateDecl(decl_index); + } } pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { From 7fe72d560d94fc379165ab46c9e568e6e684aa41 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 14:50:50 -0700 Subject: [PATCH 33/60] std.Progress: move global preallocations to thread memory Instead of making static buffers configurable, let's pick strong defaults and then use the update thread's stack memory to store the preallocations. The thread uses a fairly shallow stack so this memory is otherwise unused. This also makes the data section of the executable smaller since it runtime allocates the memory when a `std.Progress` instance is allocated, and in the case that the process is not connected to a terminal, it never allocates the memory. --- lib/std/Progress.zig | 105 ++++++++++++++++++++++++------------------- 1 file changed, 60 insertions(+), 45 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index e85d2f1507e0..6bb6c0252b16 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -280,7 +280,6 @@ var global_progress: Progress = .{ .draw_buffer = undefined, .done = false, - // TODO: make these configurable and avoid including the globals in .data if unused .node_parents = &node_parents_buffer, .node_storage = &node_storage_buffer, .node_freelist = &node_freelist_buffer, @@ -288,10 +287,10 @@ var global_progress: Progress = .{ .node_end_index = 0, }; -const default_node_storage_buffer_len = 200; -var node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; -var node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; -var node_freelist_buffer: [default_node_storage_buffer_len]Node.OptionalIndex = undefined; +const node_storage_buffer_len = 200; +var node_parents_buffer: [node_storage_buffer_len]Node.Parent = undefined; +var node_storage_buffer: [node_storage_buffer_len]Node.Storage = undefined; +var node_freelist_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined; var default_draw_buffer: [4096]u8 = undefined; @@ -391,14 +390,21 @@ fn wait(timeout_ns: u64) bool { } fn updateThreadRun() void { + // Store this data in the thread so that it does not need to be part of the + // linker data of the main executable. + var serialized_buffer: Serialized.Buffer = undefined; + { const resize_flag = wait(global_progress.initial_delay_ns); maybeUpdateSize(resize_flag); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { + stderr_mutex.lock(); + defer stderr_mutex.unlock(); return clearTerminal(); + } - const buffer = computeRedraw(); + const buffer = computeRedraw(&serialized_buffer); if (stderr_mutex.tryLock()) { defer stderr_mutex.unlock(); write(buffer); @@ -409,10 +415,13 @@ fn updateThreadRun() void { const resize_flag = wait(global_progress.refresh_rate_ns); maybeUpdateSize(resize_flag); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { + stderr_mutex.lock(); + defer stderr_mutex.unlock(); return clearTerminal(); + } - const buffer = computeRedraw(); + const buffer = computeRedraw(&serialized_buffer); if (stderr_mutex.tryLock()) { defer stderr_mutex.unlock(); write(buffer); @@ -433,13 +442,17 @@ pub fn unlockStdErr() void { } fn ipcThreadRun(fd: posix.fd_t) anyerror!void { + // Store this data in the thread so that it does not need to be part of the + // linker data of the main executable. + var serialized_buffer: Serialized.Buffer = undefined; + { _ = wait(global_progress.initial_delay_ns); if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; - const serialized = serialize(); + const serialized = serialize(&serialized_buffer); writeIpc(fd, serialized) catch |err| switch (err) { error.BrokenPipe => return, }; @@ -451,7 +464,7 @@ fn ipcThreadRun(fd: posix.fd_t) anyerror!void { if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return clearTerminal(); - const serialized = serialize(); + const serialized = serialize(&serialized_buffer); writeIpc(fd, serialized) catch |err| switch (err) { error.BrokenPipe => return, }; @@ -511,17 +524,18 @@ const Children = struct { sibling: Node.OptionalIndex, }; -// TODO make this configurable -var serialized_node_parents_buffer: [default_node_storage_buffer_len]Node.Parent = undefined; -var serialized_node_storage_buffer: [default_node_storage_buffer_len]Node.Storage = undefined; -var serialized_node_map_buffer: [default_node_storage_buffer_len]Node.Index = undefined; - const Serialized = struct { parents: []Node.Parent, storage: []Node.Storage, + + const Buffer = struct { + parents: [node_storage_buffer_len]Node.Parent, + storage: [node_storage_buffer_len]Node.Storage, + map: [node_storage_buffer_len]Node.Index, + }; }; -fn serialize() Serialized { +fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { var serialized_len: usize = 0; var any_ipc = false; @@ -533,15 +547,15 @@ fn serialize() Serialized { for (node_parents, node_storage, 0..) |*parent_ptr, *storage_ptr, i| { var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); while (begin_parent != .unused) { - const dest_storage = &serialized_node_storage_buffer[serialized_len]; + const dest_storage = &serialized_buffer.storage[serialized_len]; @memcpy(&dest_storage.name, &storage_ptr.name); dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .monotonic); const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); if (begin_parent == end_parent) { any_ipc = any_ipc or (dest_storage.getIpcFd() != null); - serialized_node_parents_buffer[serialized_len] = begin_parent; - serialized_node_map_buffer[i] = @enumFromInt(serialized_len); + serialized_buffer.parents[serialized_len] = begin_parent; + serialized_buffer.map[i] = @enumFromInt(serialized_len); serialized_len += 1; break; } @@ -551,29 +565,29 @@ fn serialize() Serialized { } // Remap parents to point inside serialized arrays. - for (serialized_node_parents_buffer[0..serialized_len]) |*parent| { + for (serialized_buffer.parents[0..serialized_len]) |*parent| { parent.* = switch (parent.*) { .unused => unreachable, .none => .none, - _ => |p| serialized_node_map_buffer[@intFromEnum(p)].toParent(), + _ => |p| serialized_buffer.map[@intFromEnum(p)].toParent(), }; } // Find nodes which correspond to child processes. if (any_ipc) - serialized_len = serializeIpc(serialized_len); + serialized_len = serializeIpc(serialized_len, serialized_buffer); return .{ - .parents = serialized_node_parents_buffer[0..serialized_len], - .storage = serialized_node_storage_buffer[0..serialized_len], + .parents = serialized_buffer.parents[0..serialized_len], + .storage = serialized_buffer.storage[0..serialized_len], }; } -var parents_copy: [default_node_storage_buffer_len]Node.Parent = undefined; -var storage_copy: [default_node_storage_buffer_len]Node.Storage = undefined; -var ipc_metadata_copy: [default_node_storage_buffer_len]SavedMetadata = undefined; +var parents_copy: [node_storage_buffer_len]Node.Parent = undefined; +var storage_copy: [node_storage_buffer_len]Node.Storage = undefined; +var ipc_metadata_copy: [node_storage_buffer_len]SavedMetadata = undefined; -var ipc_metadata: [default_node_storage_buffer_len]SavedMetadata = undefined; +var ipc_metadata: [node_storage_buffer_len]SavedMetadata = undefined; var ipc_metadata_len: u16 = 0; const SavedMetadata = struct { @@ -597,7 +611,7 @@ const SavedMetadata = struct { } }; -fn serializeIpc(start_serialized_len: usize) usize { +fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buffer) usize { var serialized_len = start_serialized_len; var pipe_buf: [2 * 4096]u8 align(4) = undefined; @@ -605,8 +619,8 @@ fn serializeIpc(start_serialized_len: usize) usize { ipc_metadata_len = 0; main_loop: for ( - serialized_node_parents_buffer[0..serialized_len], - serialized_node_storage_buffer[0..serialized_len], + serialized_buffer.parents[0..serialized_len], + serialized_buffer.storage[0..serialized_len], 0.., ) |main_parent, *main_storage, main_index| { if (main_parent == .unused) continue; @@ -628,7 +642,7 @@ fn serializeIpc(start_serialized_len: usize) usize { // Ignore all but the last message on the pipe. var input: []align(2) u8 = pipe_buf[0..bytes_read]; if (input.len == 0) { - serialized_len = useSavedIpcData(serialized_len, main_storage, main_index, old_ipc_metadata); + serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); continue; } @@ -636,7 +650,7 @@ fn serializeIpc(start_serialized_len: usize) usize { if (input.len < 4) { std.log.warn("short read: {d} out of 4 header bytes", .{input.len}); // TODO keep track of the short read to trash odd bytes with the next read - serialized_len = useSavedIpcData(serialized_len, main_storage, main_index, old_ipc_metadata); + serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); continue :main_loop; } const subtree_len = std.mem.readInt(u32, input[0..4], .little); @@ -644,7 +658,7 @@ fn serializeIpc(start_serialized_len: usize) usize { if (input.len < expected_bytes) { std.log.warn("short read: {d} out of {d} ({d} nodes)", .{ input.len, expected_bytes, subtree_len }); // TODO keep track of the short read to trash odd bytes with the next read - serialized_len = useSavedIpcData(serialized_len, main_storage, main_index, old_ipc_metadata); + serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); continue :main_loop; } if (input.len > expected_bytes) { @@ -672,12 +686,12 @@ fn serializeIpc(start_serialized_len: usize) usize { copyRoot(main_storage, &storage[0]); // Copy the rest of the tree to the end. - @memcpy(serialized_node_storage_buffer[serialized_len..][0 .. storage.len - 1], storage[1..]); + @memcpy(serialized_buffer.storage[serialized_len..][0 .. storage.len - 1], storage[1..]); // Patch up parent pointers taking into account how the subtree is mounted. - serialized_node_parents_buffer[serialized_len] = .none; + serialized_buffer.parents[serialized_len] = .none; - for (serialized_node_parents_buffer[serialized_len..][0 .. parents.len - 1], parents[1..]) |*dest, p| { + for (serialized_buffer.parents[serialized_len..][0 .. parents.len - 1], parents[1..]) |*dest, p| { dest.* = switch (p) { // Fix bad data so the rest of the code does not see `unused`. .none, .unused => .none, @@ -693,8 +707,8 @@ fn serializeIpc(start_serialized_len: usize) usize { } // Save a copy in case any pipes are empty on the next update. - @memcpy(parents_copy[0..serialized_len], serialized_node_parents_buffer[0..serialized_len]); - @memcpy(storage_copy[0..serialized_len], serialized_node_storage_buffer[0..serialized_len]); + @memcpy(parents_copy[0..serialized_len], serialized_buffer.parents[0..serialized_len]); + @memcpy(storage_copy[0..serialized_len], serialized_buffer.storage[0..serialized_len]); @memcpy(ipc_metadata_copy[0..ipc_metadata_len], ipc_metadata[0..ipc_metadata_len]); return serialized_len; @@ -718,6 +732,7 @@ fn findOld(ipc_fd: posix.fd_t, old_metadata: []const SavedMetadata) ?*const Save fn useSavedIpcData( start_serialized_len: usize, + serialized_buffer: *Serialized.Buffer, main_storage: *Node.Storage, main_index: usize, old_metadata: []const SavedMetadata, @@ -746,9 +761,9 @@ fn useSavedIpcData( copyRoot(main_storage, &storage_copy[old_main_index]); - @memcpy(serialized_node_storage_buffer[start_serialized_len..][0..storage.len], storage); + @memcpy(serialized_buffer.storage[start_serialized_len..][0..storage.len], storage); - for (serialized_node_parents_buffer[start_serialized_len..][0..parents.len], parents) |*dest, p| { + for (serialized_buffer.parents[start_serialized_len..][0..parents.len], parents) |*dest, p| { dest.* = switch (p) { .none, .unused => .none, _ => |prev| @enumFromInt(if (@intFromEnum(prev) == old_main_index) @@ -761,14 +776,14 @@ fn useSavedIpcData( return start_serialized_len + storage.len; } -fn computeRedraw() []u8 { - const serialized = serialize(); +fn computeRedraw(serialized_buffer: *Serialized.Buffer) []u8 { + const serialized = serialize(serialized_buffer); // Now we can analyze our copy of the graph without atomics, reconstructing // children lists which do not exist in the canonical data. These are // needed for tree traversal below. - var children_buffer: [default_node_storage_buffer_len]Children = undefined; + var children_buffer: [node_storage_buffer_len]Children = undefined; const children = children_buffer[0..serialized.parents.len]; @memset(children, .{ .child = .none, .sibling = .none }); From 807b613f71bf97d8169b6ce3ed7ad0c0dc8bb402 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 15:17:10 -0700 Subject: [PATCH 34/60] std.Progress: move more global preallocations to thread memory Same idea as previous commit --- lib/std/Progress.zig | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 6bb6c0252b16..8f3be0227282 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -532,6 +532,12 @@ const Serialized = struct { parents: [node_storage_buffer_len]Node.Parent, storage: [node_storage_buffer_len]Node.Storage, map: [node_storage_buffer_len]Node.Index, + + parents_copy: [node_storage_buffer_len]Node.Parent, + storage_copy: [node_storage_buffer_len]Node.Storage, + ipc_metadata_copy: [node_storage_buffer_len]SavedMetadata, + + ipc_metadata: [node_storage_buffer_len]SavedMetadata, }; }; @@ -583,11 +589,6 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { }; } -var parents_copy: [node_storage_buffer_len]Node.Parent = undefined; -var storage_copy: [node_storage_buffer_len]Node.Storage = undefined; -var ipc_metadata_copy: [node_storage_buffer_len]SavedMetadata = undefined; - -var ipc_metadata: [node_storage_buffer_len]SavedMetadata = undefined; var ipc_metadata_len: u16 = 0; const SavedMetadata = struct { @@ -612,6 +613,9 @@ const SavedMetadata = struct { }; fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buffer) usize { + const ipc_metadata_copy = &serialized_buffer.ipc_metadata_copy; + const ipc_metadata = &serialized_buffer.ipc_metadata; + var serialized_len = start_serialized_len; var pipe_buf: [2 * 4096]u8 align(4) = undefined; @@ -707,8 +711,8 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff } // Save a copy in case any pipes are empty on the next update. - @memcpy(parents_copy[0..serialized_len], serialized_buffer.parents[0..serialized_len]); - @memcpy(storage_copy[0..serialized_len], serialized_buffer.storage[0..serialized_len]); + @memcpy(serialized_buffer.parents_copy[0..serialized_len], serialized_buffer.parents[0..serialized_len]); + @memcpy(serialized_buffer.storage_copy[0..serialized_len], serialized_buffer.storage[0..serialized_len]); @memcpy(ipc_metadata_copy[0..ipc_metadata_len], ipc_metadata[0..ipc_metadata_len]); return serialized_len; @@ -737,6 +741,10 @@ fn useSavedIpcData( main_index: usize, old_metadata: []const SavedMetadata, ) usize { + const parents_copy = &serialized_buffer.parents_copy; + const storage_copy = &serialized_buffer.storage_copy; + const ipc_metadata = &serialized_buffer.ipc_metadata; + const ipc_fd = main_storage.getIpcFd().?; const saved_metadata = findOld(ipc_fd, old_metadata) orelse { main_storage.completed_count = 0; From 52ed54d1e7a396f4508829af45c6953f1a827a1c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 26 May 2024 16:05:38 -0700 Subject: [PATCH 35/60] std.Progress: truncate IPC data exceeding preallocated buffers This accomplishes 2 things simultaneously: 1. Don't trust child process data; if the data is outside the expected range, ignore the data. 2. If there is too much data to fit in the preallocated buffers, drop the data. --- lib/std/Progress.zig | 45 ++++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 8f3be0227282..921b0ce25186 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -593,9 +593,9 @@ var ipc_metadata_len: u16 = 0; const SavedMetadata = struct { ipc_fd: u16, - main_index: u16, - start_index: u16, - nodes_len: u16, + main_index: u8, + start_index: u8, + nodes_len: u8, fn getIpcFd(metadata: SavedMetadata) posix.fd_t { return if (builtin.os.tag == .windows) @@ -677,11 +677,13 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff }; }; + const nodes_len: u8 = @intCast(@min(parents.len - 1, serialized_buffer.storage.len - serialized_len)); + // Remember in case the pipe is empty on next update. ipc_metadata[ipc_metadata_len] = .{ .ipc_fd = SavedMetadata.setIpcFd(fd), .start_index = @intCast(serialized_len), - .nodes_len = @intCast(parents.len), + .nodes_len = nodes_len, .main_index = @intCast(main_index), }; ipc_metadata_len += 1; @@ -690,24 +692,26 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff copyRoot(main_storage, &storage[0]); // Copy the rest of the tree to the end. - @memcpy(serialized_buffer.storage[serialized_len..][0 .. storage.len - 1], storage[1..]); + @memcpy(serialized_buffer.storage[serialized_len..][0..nodes_len], storage[1..][0..nodes_len]); // Patch up parent pointers taking into account how the subtree is mounted. - serialized_buffer.parents[serialized_len] = .none; - - for (serialized_buffer.parents[serialized_len..][0 .. parents.len - 1], parents[1..]) |*dest, p| { + for (serialized_buffer.parents[serialized_len..][0..nodes_len], parents[1..][0..nodes_len]) |*dest, p| { dest.* = switch (p) { // Fix bad data so the rest of the code does not see `unused`. .none, .unused => .none, // Root node is being mounted here. @as(Node.Parent, @enumFromInt(0)) => @enumFromInt(main_index), // Other nodes mounted at the end. - // TODO check for bad data pointing outside the expected range - _ => |off| @enumFromInt(serialized_len + @intFromEnum(off) - 1), + // Don't trust child data; if the data is outside the expected range, ignore the data. + // This also handles the case when data was truncated. + _ => |off| if (@intFromEnum(off) > nodes_len) + .none + else + @enumFromInt(serialized_len + @intFromEnum(off) - 1), }; } - serialized_len += storage.len - 1; + serialized_len += nodes_len; } // Save a copy in case any pipes are empty on the next update. @@ -753,7 +757,7 @@ fn useSavedIpcData( }; const start_index = saved_metadata.start_index; - const nodes_len = saved_metadata.nodes_len; + const nodes_len = @min(saved_metadata.nodes_len, serialized_buffer.storage.len - start_serialized_len); const old_main_index = saved_metadata.main_index; ipc_metadata[ipc_metadata_len] = .{ @@ -764,8 +768,8 @@ fn useSavedIpcData( }; ipc_metadata_len += 1; - const parents = parents_copy[start_index..][0 .. nodes_len - 1]; - const storage = storage_copy[start_index..][0 .. nodes_len - 1]; + const parents = parents_copy[start_index..][0..nodes_len]; + const storage = storage_copy[start_index..][0..nodes_len]; copyRoot(main_storage, &storage_copy[old_main_index]); @@ -774,10 +778,15 @@ fn useSavedIpcData( for (serialized_buffer.parents[start_serialized_len..][0..parents.len], parents) |*dest, p| { dest.* = switch (p) { .none, .unused => .none, - _ => |prev| @enumFromInt(if (@intFromEnum(prev) == old_main_index) - main_index - else - @intFromEnum(prev) - start_index + start_serialized_len), + _ => |prev| d: { + if (@intFromEnum(prev) == old_main_index) { + break :d @enumFromInt(main_index); + } else if (@intFromEnum(prev) > nodes_len) { + break :d .none; + } else { + break :d @enumFromInt(@intFromEnum(prev) - start_index + start_serialized_len); + } + }, }; } From 11f894702b7c06b87f6e94eff719d9f83eaeeddf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 08:38:02 -0700 Subject: [PATCH 36/60] std.Progress: avoid scrolling the PS1 off the terminal --- lib/std/Progress.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 921b0ce25186..bb3d1cc2a264 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -908,13 +908,13 @@ fn computeNode( global_progress.newline_count += 1; } - if (global_progress.newline_count < global_progress.rows) { + if (global_progress.withinRowLimit()) { if (children[@intFromEnum(node_index)].child.unwrap()) |child| { i = computeNode(buf, i, serialized, children, child); } } - if (global_progress.newline_count < global_progress.rows) { + if (global_progress.withinRowLimit()) { if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| { i = computeNode(buf, i, serialized, children, sibling); } @@ -923,6 +923,11 @@ fn computeNode( return i; } +fn withinRowLimit(p: *Progress) bool { + // The +1 here is so that the PS1 is not scrolled off the top of the terminal. + return p.newline_count + 1 < p.rows; +} + fn write(buf: []const u8) void { const tty = global_progress.terminal orelse return; tty.writeAll(buf) catch { From ea7d8ec14752575a68733742be2ee7a583eea49e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 09:06:41 -0700 Subject: [PATCH 37/60] std.Progress: smaller type for parents and robustify Switch Node.Parent, Node.Index, and Node.OptionalIndex to be backed by u8 rather than u16. This works fine since we use 200 as the preallocated node buffer. This has the nice property that scanning the entire parents array for allocated nodes fits in 4 cache lines, even if we bumped the 200 up to 254 (leaving room for the two special states). The thread that reads progress updates from the pipe now handles short reads by ignoring messages that are sent in multiple reads. When checking the terminal size, if there is a failure, fall back to a conservative guess of 80x25 rather than panicking. A debug message is also emitted which would be displayed only in a debug build. --- lib/std/Progress.zig | 64 ++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index bb3d1cc2a264..fea09168baaf 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -104,11 +104,11 @@ pub const Node = struct { } }; - const Parent = enum(u16) { + const Parent = enum(u8) { /// Unallocated storage. - unused = std.math.maxInt(u16) - 1, + unused = std.math.maxInt(u8) - 1, /// Indicates root node. - none = std.math.maxInt(u16), + none = std.math.maxInt(u8), /// Index into `node_storage`. _, @@ -120,8 +120,8 @@ pub const Node = struct { } }; - const OptionalIndex = enum(u16) { - none = std.math.maxInt(u16), + const OptionalIndex = enum(u8) { + none = std.math.maxInt(u8), /// Index into `node_storage`. _, @@ -137,7 +137,7 @@ pub const Node = struct { }; /// Index into `node_storage`. - const Index = enum(u16) { + const Index = enum(u8) { _, fn toParent(i: @This()) Parent { @@ -589,8 +589,6 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { }; } -var ipc_metadata_len: u16 = 0; - const SavedMetadata = struct { ipc_fd: u16, main_index: u8, @@ -612,6 +610,9 @@ const SavedMetadata = struct { } }; +var ipc_metadata_len: u8 = 0; +var remaining_read_trash_bytes: usize = 0; + fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buffer) usize { const ipc_metadata_copy = &serialized_buffer.ipc_metadata_copy; const ipc_metadata = &serialized_buffer.ipc_metadata; @@ -641,36 +642,43 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff }, }; if (n == 0) break; + if (remaining_read_trash_bytes > 0) { + assert(bytes_read == 0); + if (remaining_read_trash_bytes >= n) { + remaining_read_trash_bytes -= n; + continue; + } + const src = pipe_buf[remaining_read_trash_bytes..n]; + std.mem.copyForwards(u8, &pipe_buf, src); + remaining_read_trash_bytes = 0; + bytes_read = src.len; + continue; + } bytes_read += n; } // Ignore all but the last message on the pipe. - var input: []align(2) u8 = pipe_buf[0..bytes_read]; + var input: []u8 = pipe_buf[0..bytes_read]; if (input.len == 0) { serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); continue; } const storage, const parents = while (true) { - if (input.len < 4) { - std.log.warn("short read: {d} out of 4 header bytes", .{input.len}); - // TODO keep track of the short read to trash odd bytes with the next read - serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); - continue :main_loop; - } - const subtree_len = std.mem.readInt(u32, input[0..4], .little); - const expected_bytes = 4 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent)); + const subtree_len: usize = input[0]; + const expected_bytes = 1 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent)); if (input.len < expected_bytes) { - std.log.warn("short read: {d} out of {d} ({d} nodes)", .{ input.len, expected_bytes, subtree_len }); - // TODO keep track of the short read to trash odd bytes with the next read + // Ignore short reads. We'll handle the next full message when it comes instead. + assert(remaining_read_trash_bytes == 0); + remaining_read_trash_bytes = expected_bytes - input.len; serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); continue :main_loop; } if (input.len > expected_bytes) { - input = @alignCast(input[expected_bytes..]); + input = input[expected_bytes..]; continue; } - const storage_bytes = input[4..][0 .. subtree_len * @sizeOf(Node.Storage)]; - const parents_bytes = input[4 + storage_bytes.len ..][0 .. subtree_len * @sizeOf(Node.Parent)]; + const storage_bytes = input[1..][0 .. subtree_len * @sizeOf(Node.Storage)]; + const parents_bytes = input[1 + storage_bytes.len ..][0 .. subtree_len * @sizeOf(Node.Parent)]; break .{ std.mem.bytesAsSlice(Node.Storage, storage_bytes), std.mem.bytesAsSlice(Node.Parent, parents_bytes), @@ -722,7 +730,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff return serialized_len; } -fn copyRoot(dest: *Node.Storage, src: *align(2) Node.Storage) void { +fn copyRoot(dest: *Node.Storage, src: *align(1) Node.Storage) void { dest.* = .{ .completed_count = src.completed_count, .estimated_total_count = src.estimated_total_count, @@ -937,7 +945,7 @@ fn write(buf: []const u8) void { fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { assert(serialized.parents.len == serialized.storage.len); - const serialized_len: u32 = @intCast(serialized.parents.len); + const serialized_len: u8 = @intCast(serialized.parents.len); const header = std.mem.asBytes(&serialized_len); const storage = std.mem.sliceAsBytes(serialized.storage); const parents = std.mem.sliceAsBytes(serialized.parents); @@ -977,7 +985,9 @@ fn maybeUpdateSize(resize_flag: bool) void { var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; if (windows.kernel32.GetConsoleScreenBufferInfo(fd, &info) == windows.FALSE) { - @panic("TODO: handle this failure"); + std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{}); + global_progress.rows = 25; + global_progress.cols = 80; } global_progress.rows = @intCast(info.dwSize.Y); @@ -995,7 +1005,9 @@ fn maybeUpdateSize(resize_flag: bool) void { global_progress.rows = winsize.ws_row; global_progress.cols = winsize.ws_col; } else { - @panic("TODO: handle this failure"); + std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{}); + global_progress.rows = 25; + global_progress.cols = 80; } } } From 0ca2b4e0f1b34122aada3c649ce3a627f3377ca3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 09:14:04 -0700 Subject: [PATCH 38/60] std.Progress: use std.log.debug rather than warn when the errors could possibly be spammed many times --- lib/std/Progress.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index fea09168baaf..679b44a5db08 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -635,7 +635,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff const n = posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) { error.WouldBlock => break, else => |e| { - std.log.warn("failed to read child progress data: {s}", .{@errorName(e)}); + std.log.debug("failed to read child progress data: {s}", .{@errorName(e)}); main_storage.completed_count = 0; main_storage.estimated_total_count = 0; continue :main_loop; @@ -964,13 +964,13 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { if (posix.writev(fd, &vecs)) |written| { const total = header.len + storage.len + parents.len; if (written < total) { - std.log.warn("short write: {d} out of {d}", .{ written, total }); + std.log.debug("short write: {d} out of {d}", .{ written, total }); } } else |err| switch (err) { error.WouldBlock => {}, error.BrokenPipe => return error.BrokenPipe, else => |e| { - std.log.warn("failed to send progress to parent process: {s}", .{@errorName(e)}); + std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)}); return error.BrokenPipe; }, } From 849693f07c882fad369e557940583b8ac9d1c648 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 09:36:45 -0700 Subject: [PATCH 39/60] zig build: give a root progress name Now it's more clear when zig is building the build script vs building the actual project. --- src/main.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.zig b/src/main.zig index ac8a483d1732..3ee90671b2c1 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4795,6 +4795,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const color: Color = .auto; const root_prog_node = std.Progress.start(.{ .disable_printing = (color == .off), + .root_name = "Compile Build Script", }); defer root_prog_node.end(); From 52ffdec74b5854bc842107f40f9fa31b40cf5432 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 09:48:13 -0700 Subject: [PATCH 40/60] std.Progress: keep cursor on newline Don't truncate trailing newline. This better handles stray writes to stderr that are not std.Progress-aware, such as from non-zig child processes. This commit also makes `Node.start` and `Node.end` bail out early with a comptime branch when it is known the target will not be spawning an update thread. --- lib/std/Progress.zig | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 679b44a5db08..4a40aed1d2c5 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -155,6 +155,10 @@ pub const Node = struct { /// /// Passing 0 for `estimated_total_items` means unknown. pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node { + if (noop_impl) { + assert(node.index == .none); + return .{ .index = .none }; + } const node_index = node.index.unwrap() orelse return .{ .index = .none }; const parent = node_index.toParent(); @@ -208,6 +212,10 @@ pub const Node = struct { /// Finish a started `Node`. Thread-safe. pub fn end(n: Node) void { + if (noop_impl) { + assert(n.index == .none); + return; + } const index = n.index.unwrap() orelse return; const parent_ptr = parentByIndex(index); if (parent_ptr.unwrap()) |parent_index| { @@ -296,6 +304,11 @@ var default_draw_buffer: [4096]u8 = undefined; var debug_start_trace = std.debug.Trace.init; +const noop_impl = builtin.single_threaded or switch (builtin.os.tag) { + .wasi, .freestanding => true, + else => false, +}; + /// Initializes a global Progress instance. /// /// Asserts there is only one global Progress instance. @@ -319,6 +332,9 @@ pub fn start(options: Options) Node { global_progress.refresh_rate_ns = options.refresh_rate_ns; global_progress.initial_delay_ns = options.initial_delay_ns; + if (noop_impl) + return .{ .index = .none }; + if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| { global_progress.update_thread = std.Thread.spawn(.{}, ipcThreadRun, .{ @as(posix.fd_t, switch (@typeInfo(posix.fd_t)) { @@ -507,7 +523,7 @@ fn computeClear(buf: []u8, start_i: usize) usize { global_progress.newline_count = 0; buf[i] = '\r'; i += 1; - for (1..prev_nl_n) |_| { + for (0..prev_nl_n) |_| { buf[i..][0..up_one_line.len].* = up_one_line.*; i += up_one_line.len; } @@ -841,9 +857,6 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) []u8 { const root_node_index: Node.Index = @enumFromInt(0); i = computeNode(buf, i, serialized, children, root_node_index); - // Truncate trailing newline. - if (buf[i - 1] == '\n') i -= 1; - buf[i..][0..finish_sync.len].* = finish_sync.*; i += finish_sync.len; @@ -932,8 +945,10 @@ fn computeNode( } fn withinRowLimit(p: *Progress) bool { - // The +1 here is so that the PS1 is not scrolled off the top of the terminal. - return p.newline_count + 1 < p.rows; + // The +2 here is so that the PS1 is not scrolled off the top of the terminal. + // one because we keep the cursor on the next line + // one more to account for the PS1 + return p.newline_count + 2 < p.rows; } fn write(buf: []const u8) void { From 6145819c0ba00924b37bab78200aeab6306c1672 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 10:04:00 -0700 Subject: [PATCH 41/60] std.Progress: handle when terminal write buffer too small --- lib/std/Progress.zig | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 4a40aed1d2c5..a28c129ba700 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -875,15 +875,23 @@ fn computePrefix( if (serialized.parents[@intFromEnum(parent_index)] == .none) return i; i = computePrefix(buf, i, serialized, children, parent_index); if (children[@intFromEnum(parent_index)].sibling == .none) { - buf[i..][0..3].* = " ".*; - i += 3; + const prefix = " "; + const upper_bound_len = prefix.len + line_upper_bound_len; + if (i + upper_bound_len > buf.len) return buf.len; + buf[i..][0..prefix.len].* = prefix.*; + i += prefix.len; } else { + const upper_bound_len = tree_line.len + line_upper_bound_len; + if (i + upper_bound_len > buf.len) return buf.len; buf[i..][0..tree_line.len].* = tree_line.*; i += tree_line.len; } return i; } +const line_upper_bound_len = @max(tree_tee.len, tree_langle.len) + "[4294967296/4294967296] ".len + + Node.max_name_len + finish_sync.len; + fn computeNode( buf: []u8, start_i: usize, @@ -894,6 +902,9 @@ fn computeNode( var i = start_i; i = computePrefix(buf, i, serialized, children, node_index); + if (i + line_upper_bound_len > buf.len) + return start_i; + const storage = &serialized.storage[@intFromEnum(node_index)]; const estimated_total = storage.estimated_total_count; const completed_items = storage.completed_count; @@ -910,19 +921,19 @@ fn computeNode( } } - if (name.len != 0 or estimated_total > 0) { - if (estimated_total > 0) { - i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total }) catch &.{}).len; - } else if (completed_items != 0) { - i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch &.{}).len; - } - if (name.len != 0) { - i += (std.fmt.bufPrint(buf[i..], "{s}", .{name}) catch &.{}).len; - } - } - const is_empty_root = @intFromEnum(node_index) == 0 and serialized.storage[0].name[0] == 0; if (!is_empty_root) { + if (name.len != 0 or estimated_total > 0) { + if (estimated_total > 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total }) catch &.{}).len; + } else if (completed_items != 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch &.{}).len; + } + if (name.len != 0) { + i += (std.fmt.bufPrint(buf[i..], "{s}", .{name}) catch &.{}).len; + } + } + i = @min(global_progress.cols + start_i, i); buf[i] = '\n'; i += 1; From dc3a192ae841706bb965218c68b0085bbec2b35e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 10:29:39 -0700 Subject: [PATCH 42/60] std.Progress: count newlines more accurately Split newline_count into written_newline_count and accumulated_newline_count. This handle the case when the tryLock() fails to obtain the lock, because in such case there would not be any newlines written to the terminal but the system would incorrectly think there were. Now, written_newline_count is only adjusted when the write() call succeeds. Furthermore, write() call failure is handled by exiting the update thread. --- lib/std/Progress.zig | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index a28c129ba700..412245d3e6f3 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -32,9 +32,11 @@ initial_delay_ns: u64, rows: u16, cols: u16, -/// Needed because terminal escape codes require one to take scrolling into -/// account. -newline_count: u16, +/// Tracks the number of newlines that have been actually written to the terminal. +written_newline_count: u16, +/// Tracks the number of newlines that will be written to the terminal if the +/// draw buffer is sent. +accumulated_newline_count: u16, /// Accessed only by the update thread. draw_buffer: []u8, @@ -284,7 +286,8 @@ var global_progress: Progress = .{ .initial_delay_ns = undefined, .rows = 0, .cols = 0, - .newline_count = 0, + .written_newline_count = 0, + .accumulated_newline_count = 0, .draw_buffer = undefined, .done = false, @@ -423,7 +426,7 @@ fn updateThreadRun() void { const buffer = computeRedraw(&serialized_buffer); if (stderr_mutex.tryLock()) { defer stderr_mutex.unlock(); - write(buffer); + write(buffer) catch return; } } @@ -440,7 +443,7 @@ fn updateThreadRun() void { const buffer = computeRedraw(&serialized_buffer); if (stderr_mutex.tryLock()) { defer stderr_mutex.unlock(); - write(buffer); + write(buffer) catch return; } } } @@ -499,7 +502,7 @@ const tree_line = "\x1B\x28\x30\x78\x1B\x28\x42 "; // │ const tree_langle = "\x1B\x28\x30\x6d\x71\x1B\x28\x42 "; // └─ fn clearTerminal() void { - if (global_progress.newline_count == 0) return; + if (global_progress.written_newline_count == 0) return; var i: usize = 0; const buf = global_progress.draw_buffer; @@ -512,15 +515,17 @@ fn clearTerminal() void { buf[i..][0..finish_sync.len].* = finish_sync.*; i += finish_sync.len; - write(buf[0..i]); + global_progress.accumulated_newline_count = 0; + write(buf[0..i]) catch { + global_progress.terminal = null; + }; } fn computeClear(buf: []u8, start_i: usize) usize { var i = start_i; - const prev_nl_n = global_progress.newline_count; + const prev_nl_n = global_progress.written_newline_count; if (prev_nl_n > 0) { - global_progress.newline_count = 0; buf[i] = '\r'; i += 1; for (0..prev_nl_n) |_| { @@ -854,6 +859,7 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) []u8 { i = computeClear(buf, i); + global_progress.accumulated_newline_count = 0; const root_node_index: Node.Index = @enumFromInt(0); i = computeNode(buf, i, serialized, children, root_node_index); @@ -937,7 +943,7 @@ fn computeNode( i = @min(global_progress.cols + start_i, i); buf[i] = '\n'; i += 1; - global_progress.newline_count += 1; + global_progress.accumulated_newline_count += 1; } if (global_progress.withinRowLimit()) { @@ -959,14 +965,13 @@ fn withinRowLimit(p: *Progress) bool { // The +2 here is so that the PS1 is not scrolled off the top of the terminal. // one because we keep the cursor on the next line // one more to account for the PS1 - return p.newline_count + 2 < p.rows; + return p.accumulated_newline_count + 2 < p.rows; } -fn write(buf: []const u8) void { +fn write(buf: []const u8) anyerror!void { const tty = global_progress.terminal orelse return; - tty.writeAll(buf) catch { - global_progress.terminal = null; - }; + try tty.writeAll(buf); + global_progress.written_newline_count = global_progress.accumulated_newline_count; } fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { From 64c6a5092cc8910eb597e52b52e744de5e54ba7f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 10:47:53 -0700 Subject: [PATCH 43/60] std.Progress: elide root node if empty when the root progress node has a zero length name, the sub-tree is flattened one layer, reducing visual noise, as well as bytes written to the terminal. --- lib/std/Progress.zig | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 412245d3e6f3..12874125e2f6 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -879,6 +879,11 @@ fn computePrefix( var i = start_i; const parent_index = serialized.parents[@intFromEnum(node_index)].unwrap() orelse return i; if (serialized.parents[@intFromEnum(parent_index)] == .none) return i; + if (@intFromEnum(serialized.parents[@intFromEnum(parent_index)]) == 0 and + serialized.storage[0].name[0] == 0) + { + return i; + } i = computePrefix(buf, i, serialized, children, parent_index); if (children[@intFromEnum(parent_index)].sibling == .none) { const prefix = " "; @@ -917,7 +922,10 @@ fn computeNode( const name = if (std.mem.indexOfScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name; const parent = serialized.parents[@intFromEnum(node_index)]; - if (parent != .none) { + if (parent != .none) p: { + if (@intFromEnum(parent) == 0 and serialized.storage[0].name[0] == 0) { + break :p; + } if (children[@intFromEnum(node_index)].sibling == .none) { buf[i..][0..tree_langle.len].* = tree_langle.*; i += tree_langle.len; From c9587d3e4013011b703ded68aefafba41550bbd7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 10:48:50 -0700 Subject: [PATCH 44/60] CLI: add missing call to root progress node end() cleans up unwanted "LLVM Emit Object" being left on the screen --- src/main.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.zig b/src/main.zig index 3ee90671b2c1..a1a726055208 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3411,6 +3411,7 @@ fn buildOutputType( const root_prog_node = std.Progress.start(.{ .disable_printing = (color == .off), }); + defer root_prog_node.end(); updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => { From 45b505c93070cb0815e81c9e909e9ce840569af3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 10:49:26 -0700 Subject: [PATCH 45/60] std.process.cleanExit: lock stderr before exiting This makes it so that any other threads which are writing to stderr have a chance to finish before the process terminates. It also clears the terminal in case any progress has been written to stderr, while still accomplishing the goal of not waiting until the update thread exits. --- lib/std/process.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/process.zig b/lib/std/process.zig index 5fb36f991d3a..e55ce7ff4148 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1760,6 +1760,7 @@ pub fn cleanExit() void { if (builtin.mode == .Debug) { return; } else { + std.debug.lockStdErr(); exit(0); } } From 03073d6c7bf5f7cd1daf8941fcf72665563b46d4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 11:20:18 -0700 Subject: [PATCH 46/60] build runner: use "configure" for the progress name --- lib/compiler/build_runner.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index d4c96fe08b59..8aa21df1d99c 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -294,7 +294,7 @@ pub fn main() !void { builder.debug_log_scopes = debug_log_scopes.items; builder.resolveInstallPrefix(install_prefix, dir_list); { - var prog_node = main_progress_node.start("user build.zig logic", 0); + var prog_node = main_progress_node.start("Configure", 0); defer prog_node.end(); try builder.runBuild(root); } From 0937992a14e9557da7d753f3c38070fe57583e33 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 11:20:42 -0700 Subject: [PATCH 47/60] resinator: update to new progress API --- lib/compiler/resinator/cli.zig | 4 ++-- lib/compiler/resinator/errors.zig | 4 ++-- lib/compiler/resinator/main.zig | 6 ------ 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index deee1ed54a3b..32b4389997de 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -108,8 +108,8 @@ pub const Diagnostics = struct { } pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8, config: std.io.tty.Config) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); self.renderToWriter(args, stderr, config) catch return; } diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig index 909824c594ce..5cfa766ac124 100644 --- a/lib/compiler/resinator/errors.zig +++ b/lib/compiler/resinator/errors.zig @@ -60,8 +60,8 @@ pub const Diagnostics = struct { } pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.io.tty.Config, source_mappings: ?SourceMappings) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); for (self.errors.items) |err_details| { renderErrorMessage(self.allocator, stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return; diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 3dd59857588e..e056e80252e1 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -50,12 +50,6 @@ pub fn main() !void { }, }; - if (zig_integration) { - // Send progress with a special string to indicate that the building of the - // resinator binary is finished and we've moved on to actually compiling the .rc file - try error_handler.server.serveStringMessage(.progress, ""); - } - var options = options: { var cli_diagnostics = cli.Diagnostics.init(allocator); defer cli_diagnostics.deinit(); From a5e4fe5487d2ac8d8bafceb6edba8ae2d87aa86c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 11:21:04 -0700 Subject: [PATCH 48/60] std.Build.Step.Run: account for new environment variable Introduces `disable_zig_progress` which prevents the build runner from assigning the child process a progress node. This is needed for the empty_env test which requires the environment to be completely empty. --- lib/std/Build/Step/Run.zig | 8 +++++++- test/standalone/empty_env/build.zig | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 1e7708120fb7..1ecc3334c480 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -23,6 +23,11 @@ cwd: ?Build.LazyPath, /// Override this field to modify the environment, or use setEnvironmentVariable env_map: ?*EnvMap, +/// When `true` prevents `ZIG_PROGRESS` environment variable from being passed +/// to the child process, which otherwise would be used for the child to send +/// progress updates to the parent. +disable_zig_progress: bool, + /// Configures whether the Run step is considered to have side-effects, and also /// whether the Run step will inherit stdio streams, forwarding them to the /// parent process, in which case will require a global lock to prevent other @@ -152,6 +157,7 @@ pub fn create(owner: *std.Build, name: []const u8) *Run { .argv = .{}, .cwd = null, .env_map = null, + .disable_zig_progress = false, .stdio = .infer_from_args, .stdin = .none, .extra_file_dependencies = &.{}, @@ -1235,7 +1241,7 @@ fn spawnChildAndCollect( child.stdin_behavior = .Pipe; } - if (run.stdio != .zig_test) { + if (run.stdio != .zig_test and !run.disable_zig_progress) { child.progress_node = prog_node; } diff --git a/test/standalone/empty_env/build.zig b/test/standalone/empty_env/build.zig index b8e488f830e5..344e8047bd12 100644 --- a/test/standalone/empty_env/build.zig +++ b/test/standalone/empty_env/build.zig @@ -21,6 +21,7 @@ pub fn build(b: *std.Build) void { const run = b.addRunArtifact(main); run.clearEnvironment(); + run.disable_zig_progress = true; test_step.dependOn(&run.step); } From eea7e5e554ff12b4b72a8aa8877d059e7c055bc3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 11:21:59 -0700 Subject: [PATCH 49/60] std.Progress: adjust the timings a little bit Slightly slower refresh rate. It's still updating very quickly. Lower the initial delay so that CLI applications feel more responsive. Even though the application is doing work for the full 500ms until something is displayed on the screen, it feels nicer to get the progress earlier. --- lib/std/Progress.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 12874125e2f6..90dfbfba9805 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -59,9 +59,9 @@ pub const Options = struct { /// Must be at least 200 bytes. draw_buffer: []u8 = &default_draw_buffer, /// How many nanoseconds between writing updates to the terminal. - refresh_rate_ns: u64 = 60 * std.time.ns_per_ms, + refresh_rate_ns: u64 = 80 * std.time.ns_per_ms, /// How many nanoseconds to keep the output hidden - initial_delay_ns: u64 = 500 * std.time.ns_per_ms, + initial_delay_ns: u64 = 200 * std.time.ns_per_ms, /// If provided, causes the progress item to have a denominator. /// 0 means unknown. estimated_total_items: usize = 0, From dcf9cae2568a7422ab7885404da63fafa31b00fc Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 12:06:29 -0700 Subject: [PATCH 50/60] std.Progress: handle big-endian targets We cannot rely on host endianness because the parent or child process may be executing inside QEMU. --- lib/std/Progress.zig | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 90dfbfba9805..bfdf0f743676 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -7,6 +7,7 @@ const testing = std.testing; const assert = std.debug.assert; const Progress = @This(); const posix = std.posix; +const is_big_endian = builtin.cpu.arch.endian() == .big; /// `null` if the current node (and its children) should /// not print on update() @@ -101,6 +102,11 @@ pub const Node = struct { }; } + fn byteSwap(s: *Storage) void { + s.completed_count = @byteSwap(s.completed_count); + s.estimated_total_count = @byteSwap(s.estimated_total_count); + } + comptime { assert((@sizeOf(Storage) % 4) == 0); } @@ -719,9 +725,14 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff // Mount the root here. copyRoot(main_storage, &storage[0]); + if (is_big_endian) main_storage.byteSwap(); // Copy the rest of the tree to the end. - @memcpy(serialized_buffer.storage[serialized_len..][0..nodes_len], storage[1..][0..nodes_len]); + const storage_dest = serialized_buffer.storage[serialized_len..][0..nodes_len]; + @memcpy(storage_dest, storage[1..][0..nodes_len]); + + // Always little-endian over the pipe. + if (is_big_endian) for (storage_dest) |*s| s.byteSwap(); // Patch up parent pointers taking into account how the subtree is mounted. for (serialized_buffer.parents[serialized_len..][0..nodes_len], parents[1..][0..nodes_len]) |*dest, p| { @@ -983,6 +994,10 @@ fn write(buf: []const u8) anyerror!void { } fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { + // Byteswap if necessary to ensure little endian over the pipe. This is + // needed because the parent or child process might be running in qemu. + if (is_big_endian) for (serialized.storage) |*s| s.byteSwap(); + assert(serialized.parents.len == serialized.storage.len); const serialized_len: u8 = @intCast(serialized.parents.len); const header = std.mem.asBytes(&serialized_len); @@ -995,9 +1010,6 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { .{ .base = parents.ptr, .len = parents.len }, }; - // TODO: if big endian, byteswap - // this is needed because the parent or child process might be running in qemu - // If this write would block we do not want to keep trying, but we need to // know if a partial message was written. if (posix.writev(fd, &vecs)) |written| { From 2367a1ff846ef2f146d4f7e449044970d399046b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 12:19:20 -0700 Subject: [PATCH 51/60] std.Progress: handle short writes --- lib/std/Progress.zig | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index bfdf0f743676..7653324187a5 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -993,6 +993,8 @@ fn write(buf: []const u8) anyerror!void { global_progress.written_newline_count = global_progress.accumulated_newline_count; } +var remaining_write_trash_bytes: usize = 0; + fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { // Byteswap if necessary to ensure little endian over the pipe. This is // needed because the parent or child process might be running in qemu. @@ -1004,18 +1006,35 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { const storage = std.mem.sliceAsBytes(serialized.storage); const parents = std.mem.sliceAsBytes(serialized.parents); - var vecs: [3]std.posix.iovec_const = .{ + var vecs: [3]posix.iovec_const = .{ .{ .base = header.ptr, .len = header.len }, .{ .base = storage.ptr, .len = storage.len }, .{ .base = parents.ptr, .len = parents.len }, }; + while (remaining_write_trash_bytes > 0) { + // We do this in a separate write call to give a better chance for the + // writev below to be in a single packet. + const n = @min(parents.len, remaining_write_trash_bytes); + if (posix.write(fd, parents[0..n])) |written| { + remaining_write_trash_bytes -= written; + continue; + } else |err| switch (err) { + error.WouldBlock => return, + error.BrokenPipe => return error.BrokenPipe, + else => |e| { + std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)}); + return error.BrokenPipe; + }, + } + } + // If this write would block we do not want to keep trying, but we need to // know if a partial message was written. if (posix.writev(fd, &vecs)) |written| { const total = header.len + storage.len + parents.len; if (written < total) { - std.log.debug("short write: {d} out of {d}", .{ written, total }); + remaining_write_trash_bytes = total - written; } } else |err| switch (err) { error.WouldBlock => {}, From 44389253c2c3bd47c2c741aa6c804d18d1642ee7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 13:51:16 -0700 Subject: [PATCH 52/60] fix zig translate-c creating root progress node twice --- lib/std/Progress.zig | 8 ++++---- src/main.zig | 28 +++++++++++++++++++--------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 7653324187a5..ba1f4bc18117 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -128,12 +128,12 @@ pub const Node = struct { } }; - const OptionalIndex = enum(u8) { + pub const OptionalIndex = enum(u8) { none = std.math.maxInt(u8), /// Index into `node_storage`. _, - fn unwrap(i: @This()) ?Index { + pub fn unwrap(i: @This()) ?Index { if (i == .none) return null; return @enumFromInt(@intFromEnum(i)); } @@ -145,7 +145,7 @@ pub const Node = struct { }; /// Index into `node_storage`. - const Index = enum(u8) { + pub const Index = enum(u8) { _, fn toParent(i: @This()) Parent { @@ -154,7 +154,7 @@ pub const Node = struct { return @enumFromInt(@intFromEnum(i)); } - fn toOptional(i: @This()) OptionalIndex { + pub fn toOptional(i: @This()) OptionalIndex { return @enumFromInt(@intFromEnum(i)); } }; diff --git a/src/main.zig b/src/main.zig index a1a726055208..987a7cec5a9a 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3404,15 +3404,15 @@ fn buildOutputType( }, } - if (arg_mode == .translate_c) { - return cmdTranslateC(comp, arena, null); - } - const root_prog_node = std.Progress.start(.{ .disable_printing = (color == .off), }); defer root_prog_node.end(); + if (arg_mode == .translate_c) { + return cmdTranslateC(comp, arena, null, root_prog_node); + } + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => { assert(listen == .none); @@ -4048,7 +4048,7 @@ fn serve( defer arena_instance.deinit(); const arena = arena_instance.allocator(); var output: Compilation.CImportResult = undefined; - try cmdTranslateC(comp, arena, &output); + try cmdTranslateC(comp, arena, &output, main_progress_node); defer output.deinit(gpa); if (output.errors.errorMessageCount() != 0) { try server.serveErrorBundle(output.errors); @@ -4398,7 +4398,12 @@ fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) } } -fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilation.CImportResult) !void { +fn cmdTranslateC( + comp: *Compilation, + arena: Allocator, + fancy_output: ?*Compilation.CImportResult, + prog_node: std.Progress.Node, +) !void { if (build_options.only_core_functionality) @panic("@translate-c is not available in a zig2.c build"); const color: Color = .auto; assert(comp.c_source_files.len == 1); @@ -4459,6 +4464,7 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilati .root_src_path = "aro_translate_c.zig", .depend_on_aro = true, .capture = &stdout, + .progress_node = prog_node, }); break :f stdout; }, @@ -5236,6 +5242,7 @@ const JitCmdOptions = struct { capture: ?*[]u8 = null, /// Send error bundles via std.zig.Server over stdout server: bool = false, + progress_node: std.Progress.Node = .{ .index = .none }, }; fn jitCmd( @@ -5245,9 +5252,12 @@ fn jitCmd( options: JitCmdOptions, ) !void { const color: Color = .auto; - const root_prog_node = std.Progress.start(.{ - .disable_printing = (color == .off), - }); + const root_prog_node = if (options.progress_node.index != .none) + options.progress_node + else + std.Progress.start(.{ + .disable_printing = (color == .off), + }); const target_query: std.Target.Query = .{}; const resolved_target: Package.Module.ResolvedTarget = .{ From bb1f4d2bdafe669ef251d93c0aa13a9cbaf2ecf2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 15:21:19 -0700 Subject: [PATCH 53/60] translate-c tests: skip_foreign_checks=true --- test/src/Cases.zig | 2 +- test/src/RunTranslatedC.zig | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 2138a6aa2546..72574bd97ea0 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -561,7 +561,7 @@ pub fn lowerToTranslateCSteps( for (self.translate.items) |case| switch (case.kind) { .run => |output| { if (translate_c_options.skip_run_translated_c) continue; - const annotated_case_name = b.fmt("run-translated-c {s}", .{case.name}); + const annotated_case_name = b.fmt("run-translated-c {s}", .{case.name}); for (test_filters) |test_filter| { if (std.mem.indexOf(u8, annotated_case_name, test_filter)) |_| break; } else if (test_filters.len > 0) continue; diff --git a/test/src/RunTranslatedC.zig b/test/src/RunTranslatedC.zig index 8414bd15ac91..74d119276884 100644 --- a/test/src/RunTranslatedC.zig +++ b/test/src/RunTranslatedC.zig @@ -91,6 +91,7 @@ pub fn addCase(self: *RunTranslatedCContext, case: *const TestCase) void { run.expectStdErrEqual(""); } run.expectStdOutEqual(case.expected_stdout); + run.skip_foreign_checks = true; self.step.dependOn(&run.step); } From aca7feb8fac2fae8f0b79a2cfac2a248bcd8451b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 15:22:18 -0700 Subject: [PATCH 54/60] std.Progress: fix race condition with setIpcFd The update thread was sometimes reading the special state and then incorrectly getting 0 for the file descriptor, making it hang since it tried to read from stdin. --- lib/std/Progress.zig | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index ba1f4bc18117..72ec219c932b 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -85,6 +85,7 @@ pub const Node = struct { estimated_total_count: u32, name: [max_name_len]u8, + /// Not thread-safe. fn getIpcFd(s: Storage) ?posix.fd_t { return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) { .Int => @bitCast(s.completed_count), @@ -93,15 +94,21 @@ pub const Node = struct { } else null; } + /// Thread-safe. fn setIpcFd(s: *Storage, fd: posix.fd_t) void { - s.estimated_total_count = std.math.maxInt(u32); - s.completed_count = switch (@typeInfo(posix.fd_t)) { + const integer: u32 = switch (@typeInfo(posix.fd_t)) { .Int => @bitCast(fd), .Pointer => @intFromPtr(fd), else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), }; + // `estimated_total_count` max int indicates the special state that + // causes `completed_count` to be treated as a file descriptor, so + // the order here matters. + @atomicStore(u32, &s.completed_count, integer, .seq_cst); + @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .seq_cst); } + /// Not thread-safe. fn byteSwap(s: *Storage) void { s.completed_count = @byteSwap(s.completed_count); s.estimated_total_count = @byteSwap(s.estimated_total_count); @@ -208,7 +215,9 @@ pub const Node = struct { pub fn setEstimatedTotalItems(n: Node, count: usize) void { const index = n.index.unwrap() orelse return; const storage = storageByIndex(index); - @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, count), .monotonic); + // Avoid u32 max int which is used to indicate a special state. + const saturated = @min(std.math.maxInt(u32) - 1, count); + @atomicStore(u32, &storage.estimated_total_count, saturated, .monotonic); } /// Thread-safe. @@ -243,10 +252,13 @@ pub const Node = struct { } } - /// Posix-only. Used by `std.process.Child`. + /// Posix-only. Used by `std.process.Child`. Thread-safe. pub fn setIpcFd(node: Node, fd: posix.fd_t) void { const index = node.index.unwrap() orelse return; - assert(fd != -1); + assert(fd >= 0); + assert(fd != posix.STDOUT_FILENO); + assert(fd != posix.STDIN_FILENO); + assert(fd != posix.STDERR_FILENO); storageByIndex(index).setIpcFd(fd); } @@ -582,8 +594,8 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { while (begin_parent != .unused) { const dest_storage = &serialized_buffer.storage[serialized_len]; @memcpy(&dest_storage.name, &storage_ptr.name); - dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); - dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .monotonic); + dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .seq_cst); + dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .seq_cst); const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); if (begin_parent == end_parent) { any_ipc = any_ipc or (dest_storage.getIpcFd() != null); From b7889f262a5bee642460eb33b1ae7f2b1f87864c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 15:41:47 -0700 Subject: [PATCH 55/60] zig build: respect --color argument `--color off` now disables the CLI progress bar both in the parent process and the build runner process. --- lib/compiler/build_runner.zig | 6 ++++-- src/main.zig | 11 ++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 8aa21df1d99c..86ad68133ac3 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -289,7 +289,9 @@ pub fn main() !void { .windows_api => {}, } - const main_progress_node = std.Progress.start(.{}); + const main_progress_node = std.Progress.start(.{ + .disable_printing = (color == .off), + }); builder.debug_log_scopes = debug_log_scopes.items; builder.resolveInstallPrefix(install_prefix, dir_list); @@ -1223,7 +1225,7 @@ fn cleanExit() void { process.exit(0); } -const Color = enum { auto, off, on }; +const Color = std.zig.Color; const Summary = enum { all, new, failures, none }; fn get_tty_conf(color: Color, stderr: File) std.io.tty.Config { diff --git a/src/main.zig b/src/main.zig index 987a7cec5a9a..cec15a087f17 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4702,6 +4702,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const results_tmp_file_nonce = Package.Manifest.hex64(std.crypto.random.int(u64)); try child_argv.append("-Z" ++ results_tmp_file_nonce); + var color: Color = .auto; + { var i: usize = 0; while (i < args.len) : (i += 1) { @@ -4786,6 +4788,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { verbose_cimport = true; } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { verbose_llvm_cpu_features = true; + } else if (mem.eql(u8, arg, "--color")) { + if (i + 1 >= args.len) fatal("expected [auto|on|off] after {s}", .{arg}); + i += 1; + color = std.meta.stringToEnum(Color, args[i]) orelse { + fatal("expected [auto|on|off] after {s}, found '{s}'", .{ arg, args[i] }); + }; + try child_argv.appendSlice(&.{ arg, args[i] }); + continue; } else if (mem.eql(u8, arg, "--seed")) { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; @@ -4799,7 +4809,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const work_around_btrfs_bug = native_os == .linux and EnvVar.ZIG_BTRFS_WORKAROUND.isSet(); - const color: Color = .auto; const root_prog_node = std.Progress.start(.{ .disable_printing = (color == .off), .root_name = "Compile Build Script", From 947a3a1be92e0d5ddc5ad263d9434b31e8c170db Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 15:55:32 -0700 Subject: [PATCH 56/60] std.process.Child: fix spawning child proc with new cwd fd Before this fix, the dup2 of the progress pipe was clobbering the cwd fd, causing the fchdir to return ENOTDIR in between fork() and exec(). --- lib/std/process/Child.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index a31afdc66d5e..0599763c67e0 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -654,7 +654,6 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); - if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); if (self.cwd_dir) |cwd| { posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); @@ -662,6 +661,10 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } + // Must happen after fchdir above, the cwd file descriptor might be + // equal to prog_fileno and be clobbered by this dup2 call. + if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); + if (self.gid) |gid| { posix.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err); } From 5bdfe22092716d0e590979a678a3f5bf715439e9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 20:53:25 -0700 Subject: [PATCH 57/60] CLI: actually fix translate-c creating root progress node twice 7281cc1d839da6e84bb76fadb2c1eafc22a82df7 did not solve the problem because even when Node.index is none, it still counts as initializing the global Progress object. Just use a normal zig optional, and all is good. --- src/main.zig | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/main.zig b/src/main.zig index cec15a087f17..22349dd36a27 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5251,7 +5251,7 @@ const JitCmdOptions = struct { capture: ?*[]u8 = null, /// Send error bundles via std.zig.Server over stdout server: bool = false, - progress_node: std.Progress.Node = .{ .index = .none }, + progress_node: ?std.Progress.Node = null, }; fn jitCmd( @@ -5261,12 +5261,9 @@ fn jitCmd( options: JitCmdOptions, ) !void { const color: Color = .auto; - const root_prog_node = if (options.progress_node.index != .none) - options.progress_node - else - std.Progress.start(.{ - .disable_printing = (color == .off), - }); + const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(.{ + .disable_printing = (color == .off), + }); const target_query: std.Target.Query = .{}; const resolved_target: Package.Module.ResolvedTarget = .{ From 65a0e14e4f1dee68906b4a380cdb1d8517fd88ea Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 May 2024 20:54:35 -0700 Subject: [PATCH 58/60] std.Progress: relax some of the atomic orderings Generates better machine code, particularly on ARM --- lib/std/Progress.zig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 72ec219c932b..f04f5b916163 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -104,8 +104,8 @@ pub const Node = struct { // `estimated_total_count` max int indicates the special state that // causes `completed_count` to be treated as a file descriptor, so // the order here matters. - @atomicStore(u32, &s.completed_count, integer, .seq_cst); - @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .seq_cst); + @atomicStore(u32, &s.completed_count, integer, .monotonic); + @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); } /// Not thread-safe. @@ -590,13 +590,13 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { const node_parents = global_progress.node_parents[0..end_index]; const node_storage = global_progress.node_storage[0..end_index]; for (node_parents, node_storage, 0..) |*parent_ptr, *storage_ptr, i| { - var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); + var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); while (begin_parent != .unused) { const dest_storage = &serialized_buffer.storage[serialized_len]; @memcpy(&dest_storage.name, &storage_ptr.name); - dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .seq_cst); - dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .seq_cst); - const end_parent = @atomicLoad(Node.Parent, parent_ptr, .seq_cst); + dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); + dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); + const end_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); if (begin_parent == end_parent) { any_ipc = any_ipc or (dest_storage.getIpcFd() != null); serialized_buffer.parents[serialized_len] = begin_parent; From 40afac40b8d9f274d63448a11f9f4259a1f68528 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Tue, 28 May 2024 03:40:18 -0700 Subject: [PATCH 59/60] std.Progress: Use Windows console API calls when ANSI escape codes are not supported --- lib/std/Progress.zig | 197 +++++++++++++++++++++++++++++--- lib/std/os/windows/kernel32.zig | 9 ++ 2 files changed, 188 insertions(+), 18 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index f04f5b916163..2e7c70f9d62e 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -16,6 +16,8 @@ terminal: ?std.fs.File, /// Is this a windows API terminal (note: this is not the same as being run on windows /// because other terminals exist like MSYS/git-bash) is_windows_terminal: bool, +/// The output code page of the console (only set if the console is a Windows API terminal) +console_code_page: if (builtin.os.tag == .windows) windows.UINT else void, /// Whether the terminal supports ANSI escape codes. supports_ansi_escape_codes: bool, @@ -297,6 +299,7 @@ pub const Node = struct { var global_progress: Progress = .{ .terminal = null, .is_windows_terminal = false, + .console_code_page = if (builtin.os.tag == .windows) undefined else {}, .supports_ansi_escape_codes = false, .update_thread = null, .redraw_event = .{}, @@ -378,13 +381,15 @@ pub fn start(options: Options) Node { global_progress.supports_ansi_escape_codes = true; } else if (builtin.os.tag == .windows and stderr.isTty()) { global_progress.is_windows_terminal = true; + global_progress.console_code_page = windows.kernel32.GetConsoleOutputCP(); global_progress.terminal = stderr; } else if (builtin.os.tag != .windows) { // we are in a "dumb" terminal like in acme or writing to a file global_progress.terminal = stderr; } - if (global_progress.terminal == null or !global_progress.supports_ansi_escape_codes) { + const can_clear_terminal = global_progress.supports_ansi_escape_codes or global_progress.is_windows_terminal; + if (global_progress.terminal == null or !can_clear_terminal) { return .{ .index = .none }; } @@ -515,11 +520,90 @@ const save = "\x1b7"; const restore = "\x1b8"; const finish_sync = "\x1b[?2026l"; -const tree_tee = "\x1B\x28\x30\x74\x71\x1B\x28\x42 "; // ├─ -const tree_line = "\x1B\x28\x30\x78\x1B\x28\x42 "; // │ -const tree_langle = "\x1B\x28\x30\x6d\x71\x1B\x28\x42 "; // └─ +const TreeSymbol = enum { + /// ├─ + tee, + /// │ + line, + /// └─ + langle, + + const Encoding = enum { + ansi_escapes, + code_page_437, + utf8, + ascii, + }; + + /// The escape sequence representation as a string literal + fn escapeSeq(symbol: TreeSymbol) *const [9:0]u8 { + return switch (symbol) { + .tee => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", + .line => "\x1B\x28\x30\x78\x1B\x28\x42 ", + .langle => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", + }; + } + + fn bytes(symbol: TreeSymbol, encoding: Encoding) []const u8 { + return switch (encoding) { + .ansi_escapes => escapeSeq(symbol), + .code_page_437 => switch (symbol) { + .tee => "\xC3\xC4 ", + .line => "\xB3 ", + .langle => "\xC0\xC4 ", + }, + .utf8 => switch (symbol) { + .tee => "├─ ", + .line => "│ ", + .langle => "└─ ", + }, + .ascii => switch (symbol) { + .tee => "|- ", + .line => "| ", + .langle => "+- ", + }, + }; + } + + fn maxByteLen(symbol: TreeSymbol) usize { + var max: usize = 0; + inline for (@typeInfo(Encoding).Enum.fields) |field| { + const len = symbol.bytes(@field(Encoding, field.name)).len; + if (len > max) max = len; + } + return max; + } +}; + +fn appendTreeSymbol(comptime symbol: TreeSymbol, buf: []u8, start_i: usize) usize { + if (builtin.os.tag == .windows and global_progress.is_windows_terminal) { + const bytes = switch (global_progress.console_code_page) { + // Code page 437 is the default code page and contains the box drawing symbols + 437 => symbol.bytes(.code_page_437), + // UTF-8 + 65001 => symbol.bytes(.utf8), + // Fall back to ASCII approximation + else => symbol.bytes(.ascii), + }; + @memcpy(buf[start_i..][0..bytes.len], bytes); + return start_i + bytes.len; + } + + // Drawing the tree is disabled when ansi escape codes are not supported + assert(global_progress.supports_ansi_escape_codes); + + const bytes = symbol.escapeSeq(); + buf[start_i..][0..bytes.len].* = bytes.*; + return start_i + bytes.len; +} fn clearTerminal() void { + if (builtin.os.tag == .windows and global_progress.is_windows_terminal) { + return clearTerminalWindowsApi() catch { + global_progress.terminal = null; + }; + } + if (global_progress.written_newline_count == 0) return; var i: usize = 0; @@ -558,6 +642,64 @@ fn computeClear(buf: []u8, start_i: usize) usize { return i; } +/// U+25BA or ► +const windows_api_start_marker = 0x25BA; + +fn clearTerminalWindowsApi() error{Unexpected}!void { + // This uses a 'marker' strategy. The idea is: + // - Always write a marker (in this case U+25BA or ►) at the beginning of the progress + // - Get the current cursor position (at the end of the progress) + // - Subtract the number of lines written to get the expected start of the progress + // - Check to see if the first character at the start of the progress is the marker + // - If it's not the marker, keep checking the line before until we find it + // - Clear the screen from that position down, and set the cursor position to the start + // + // This strategy works even if there is line wrapping, and can handle the window + // being resized/scrolled arbitrarily. + // + // Notes: + // - Ideally, the marker would be a zero-width character, but the Windows console + // doesn't seem to support rendering zero-width characters (they show up as a space) + // - This same marker idea could technically be done with an attribute instead + // (https://learn.microsoft.com/en-us/windows/console/console-screen-buffers#character-attributes) + // but it must be a valid attribute and it actually needs to apply to the first + // character in order to be readable via ReadConsoleOutputAttribute. It doesn't seem + // like any of the available attributes are invisible/benign. + const prev_nl_n = global_progress.written_newline_count; + if (prev_nl_n > 0) { + const handle = (global_progress.terminal orelse return).handle; + const screen_area = @as(windows.DWORD, global_progress.cols) * global_progress.rows; + + var console_info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; + if (windows.kernel32.GetConsoleScreenBufferInfo(handle, &console_info) == 0) { + return error.Unexpected; + } + const cursor_pos = console_info.dwCursorPosition; + const expected_y = cursor_pos.Y - @as(i16, @intCast(prev_nl_n)); + var start_pos = windows.COORD{ .X = 0, .Y = expected_y }; + while (start_pos.Y >= 0) { + var wchar: [1]u16 = undefined; + var num_console_chars_read: windows.DWORD = undefined; + if (windows.kernel32.ReadConsoleOutputCharacterW(handle, &wchar, wchar.len, start_pos, &num_console_chars_read) == 0) { + return error.Unexpected; + } + + if (wchar[0] == windows_api_start_marker) break; + start_pos.Y -= 1; + } else { + // If we couldn't find the marker, then just assume that no lines wrapped + start_pos = .{ .X = 0, .Y = expected_y }; + } + var num_chars_written: windows.DWORD = undefined; + if (windows.kernel32.FillConsoleOutputCharacterW(handle, ' ', screen_area, start_pos, &num_chars_written) == 0) { + return error.Unexpected; + } + if (windows.kernel32.SetConsoleCursorPosition(handle, start_pos) == 0) { + return error.Unexpected; + } + } +} + const Children = struct { child: Node.OptionalIndex, sibling: Node.OptionalIndex, @@ -877,17 +1019,35 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) []u8 { var i: usize = 0; const buf = global_progress.draw_buffer; - buf[i..][0..start_sync.len].* = start_sync.*; - i += start_sync.len; + if (global_progress.supports_ansi_escape_codes) { + buf[i..][0..start_sync.len].* = start_sync.*; + i += start_sync.len; - i = computeClear(buf, i); + i = computeClear(buf, i); + } else if (builtin.os.tag == .windows and global_progress.is_windows_terminal) { + clearTerminalWindowsApi() catch { + global_progress.terminal = null; + return buf[0..0]; + }; + + // Write the marker that we will use to find the beginning of the progress when clearing. + // Note: This doesn't have to use WriteConsoleW, but doing so avoids dealing with the code page. + var num_chars_written: windows.DWORD = undefined; + const handle = (global_progress.terminal orelse return buf[0..0]).handle; + if (windows.kernel32.WriteConsoleW(handle, &[_]u16{windows_api_start_marker}, 1, &num_chars_written, null) == 0) { + global_progress.terminal = null; + return buf[0..0]; + } + } global_progress.accumulated_newline_count = 0; const root_node_index: Node.Index = @enumFromInt(0); i = computeNode(buf, i, serialized, children, root_node_index); - buf[i..][0..finish_sync.len].* = finish_sync.*; - i += finish_sync.len; + if (global_progress.supports_ansi_escape_codes) { + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; + } return buf[0..i]; } @@ -915,15 +1075,14 @@ fn computePrefix( buf[i..][0..prefix.len].* = prefix.*; i += prefix.len; } else { - const upper_bound_len = tree_line.len + line_upper_bound_len; + const upper_bound_len = TreeSymbol.line.maxByteLen() + line_upper_bound_len; if (i + upper_bound_len > buf.len) return buf.len; - buf[i..][0..tree_line.len].* = tree_line.*; - i += tree_line.len; + i = appendTreeSymbol(.line, buf, i); } return i; } -const line_upper_bound_len = @max(tree_tee.len, tree_langle.len) + "[4294967296/4294967296] ".len + +const line_upper_bound_len = @max(TreeSymbol.tee.maxByteLen(), TreeSymbol.langle.maxByteLen()) + "[4294967296/4294967296] ".len + Node.max_name_len + finish_sync.len; fn computeNode( @@ -950,11 +1109,9 @@ fn computeNode( break :p; } if (children[@intFromEnum(node_index)].sibling == .none) { - buf[i..][0..tree_langle.len].* = tree_langle.*; - i += tree_langle.len; + i = appendTreeSymbol(.langle, buf, i); } else { - buf[i..][0..tree_tee.len].* = tree_tee.*; - i += tree_tee.len; + i = appendTreeSymbol(.tee, buf, i); } } @@ -1072,7 +1229,11 @@ fn maybeUpdateSize(resize_flag: bool) void { global_progress.cols = 80; } - global_progress.rows = @intCast(info.dwSize.Y); + // In the old Windows console, dwSize.Y is the line count of the entire + // scrollback buffer, so we use this instead so that we always get the + // size of the screen. + const screen_height = info.srWindow.Bottom - info.srWindow.Top; + global_progress.rows = @intCast(screen_height); global_progress.cols = @intCast(info.dwSize.X); } else { var winsize: posix.winsize = .{ diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig index a5c295487861..3d72b6f1a26e 100644 --- a/lib/std/os/windows/kernel32.zig +++ b/lib/std/os/windows/kernel32.zig @@ -175,6 +175,15 @@ pub extern "kernel32" fn FillConsoleOutputCharacterW(hConsoleOutput: HANDLE, cCh pub extern "kernel32" fn FillConsoleOutputAttribute(hConsoleOutput: HANDLE, wAttribute: WORD, nLength: DWORD, dwWriteCoord: COORD, lpNumberOfAttrsWritten: *DWORD) callconv(WINAPI) BOOL; pub extern "kernel32" fn SetConsoleCursorPosition(hConsoleOutput: HANDLE, dwCursorPosition: COORD) callconv(WINAPI) BOOL; +pub extern "kernel32" fn WriteConsoleW(hConsoleOutput: HANDLE, lpBuffer: [*]const u16, nNumberOfCharsToWrite: DWORD, lpNumberOfCharsWritten: ?*DWORD, lpReserved: ?LPVOID) callconv(WINAPI) BOOL; +pub extern "kernel32" fn ReadConsoleOutputCharacterW( + hConsoleOutput: windows.HANDLE, + lpCharacter: [*]u16, + nLength: windows.DWORD, + dwReadCoord: windows.COORD, + lpNumberOfCharsRead: *windows.DWORD, +) callconv(windows.WINAPI) windows.BOOL; + pub extern "kernel32" fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: ?[*]WCHAR) callconv(WINAPI) DWORD; pub extern "kernel32" fn GetCurrentThread() callconv(WINAPI) HANDLE; From 3a3d2187f986066859cfb793fb7ee1cae4dfea08 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 28 May 2024 12:31:10 -0700 Subject: [PATCH 60/60] std.Progress: better Windows support * Merge a bunch of related state together into TerminalMode. Windows sometimes follows the same path as posix via ansi_escape_codes, sometimes not. * Use a different thread entry point for Windows API but share the same entry point on Windows when the terminal is in ansi_escape_codes mode. * Only clear the terminal when the stderr lock is held. * Don't try to clear the terminal when nothing has been written yet. * Don't try to clear the terminal in IPC mode. * Fix size detection logic bug under error conditions. --- lib/std/Progress.zig | 216 +++++++++++++++++++++++-------------------- 1 file changed, 115 insertions(+), 101 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 2e7c70f9d62e..59cc559d5d98 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -8,19 +8,13 @@ const assert = std.debug.assert; const Progress = @This(); const posix = std.posix; const is_big_endian = builtin.cpu.arch.endian() == .big; +const is_windows = builtin.os.tag == .windows; /// `null` if the current node (and its children) should /// not print on update() -terminal: ?std.fs.File, +terminal: std.fs.File, -/// Is this a windows API terminal (note: this is not the same as being run on windows -/// because other terminals exist like MSYS/git-bash) -is_windows_terminal: bool, -/// The output code page of the console (only set if the console is a Windows API terminal) -console_code_page: if (builtin.os.tag == .windows) windows.UINT else void, - -/// Whether the terminal supports ANSI escape codes. -supports_ansi_escape_codes: bool, +terminal_mode: TerminalMode, update_thread: ?std.Thread, @@ -53,6 +47,19 @@ node_freelist: []Node.OptionalIndex, node_freelist_first: Node.OptionalIndex, node_end_index: u32, +pub const TerminalMode = union(enum) { + off, + ansi_escape_codes, + /// This is not the same as being run on windows because other terminals + /// exist like MSYS/git-bash. + windows_api: if (is_windows) WindowsApi else void, + + pub const WindowsApi = struct { + /// The output code page of the console. + code_page: windows.UINT, + }; +}; + pub const Options = struct { /// User-provided buffer with static lifetime. /// @@ -297,10 +304,8 @@ pub const Node = struct { }; var global_progress: Progress = .{ - .terminal = null, - .is_windows_terminal = false, - .console_code_page = if (builtin.os.tag == .windows) undefined else {}, - .supports_ansi_escape_codes = false, + .terminal = undefined, + .terminal_mode = .off, .update_thread = null, .redraw_event = .{}, .refresh_rate_ns = undefined, @@ -376,20 +381,16 @@ pub fn start(options: Options) Node { return .{ .index = .none }; } const stderr = std.io.getStdErr(); + global_progress.terminal = stderr; if (stderr.supportsAnsiEscapeCodes()) { - global_progress.terminal = stderr; - global_progress.supports_ansi_escape_codes = true; - } else if (builtin.os.tag == .windows and stderr.isTty()) { - global_progress.is_windows_terminal = true; - global_progress.console_code_page = windows.kernel32.GetConsoleOutputCP(); - global_progress.terminal = stderr; - } else if (builtin.os.tag != .windows) { - // we are in a "dumb" terminal like in acme or writing to a file - global_progress.terminal = stderr; + global_progress.terminal_mode = .ansi_escape_codes; + } else if (is_windows and stderr.isTty()) { + global_progress.terminal_mode = TerminalMode{ .windows_api = .{ + .code_page = windows.kernel32.GetConsoleOutputCP(), + } }; } - const can_clear_terminal = global_progress.supports_ansi_escape_codes or global_progress.is_windows_terminal; - if (global_progress.terminal == null or !can_clear_terminal) { + if (global_progress.terminal_mode == .off) { return .{ .index = .none }; } @@ -404,7 +405,11 @@ pub fn start(options: Options) Node { }; } - if (std.Thread.spawn(.{}, updateThreadRun, .{})) |thread| { + if (switch (global_progress.terminal_mode) { + .off => unreachable, // handled a few lines above + .ansi_escape_codes => std.Thread.spawn(.{}, updateThreadRun, .{}), + .windows_api => if (is_windows) std.Thread.spawn(.{}, windowsApiUpdateThreadRun, .{}) else unreachable, + }) |thread| { global_progress.update_thread = thread; } else |err| { std.log.warn("unable to spawn thread for printing progress to terminal: {s}", .{@errorName(err)}); @@ -438,13 +443,42 @@ fn updateThreadRun() void { { const resize_flag = wait(global_progress.initial_delay_ns); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; maybeUpdateSize(resize_flag); + const buffer = computeRedraw(&serialized_buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer) catch return; + } + } + + while (true) { + const resize_flag = wait(global_progress.refresh_rate_ns); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { stderr_mutex.lock(); defer stderr_mutex.unlock(); - return clearTerminal(); + return clearWrittenWithEscapeCodes() catch {}; + } + + maybeUpdateSize(resize_flag); + + const buffer = computeRedraw(&serialized_buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer) catch return; } + } +} + +fn windowsApiUpdateThreadRun() void { + var serialized_buffer: Serialized.Buffer = undefined; + + { + const resize_flag = wait(global_progress.initial_delay_ns); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; + maybeUpdateSize(resize_flag); const buffer = computeRedraw(&serialized_buffer); if (stderr_mutex.tryLock()) { @@ -455,17 +489,19 @@ fn updateThreadRun() void { while (true) { const resize_flag = wait(global_progress.refresh_rate_ns); - maybeUpdateSize(resize_flag); if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { stderr_mutex.lock(); defer stderr_mutex.unlock(); - return clearTerminal(); + return clearWrittenWindowsApi() catch {}; } + maybeUpdateSize(resize_flag); + const buffer = computeRedraw(&serialized_buffer); if (stderr_mutex.tryLock()) { defer stderr_mutex.unlock(); + clearWrittenWindowsApi() catch return; write(buffer) catch return; } } @@ -476,7 +512,7 @@ fn updateThreadRun() void { /// During the lock, any `std.Progress` information is cleared from the terminal. pub fn lockStdErr() void { stderr_mutex.lock(); - clearTerminal(); + clearWrittenWithEscapeCodes() catch {}; } pub fn unlockStdErr() void { @@ -504,7 +540,7 @@ fn ipcThreadRun(fd: posix.fd_t) anyerror!void { _ = wait(global_progress.refresh_rate_ns); if (@atomicLoad(bool, &global_progress.done, .seq_cst)) - return clearTerminal(); + return; const serialized = serialize(&serialized_buffer); writeIpc(fd, serialized) catch |err| switch (err) { @@ -569,41 +605,36 @@ const TreeSymbol = enum { var max: usize = 0; inline for (@typeInfo(Encoding).Enum.fields) |field| { const len = symbol.bytes(@field(Encoding, field.name)).len; - if (len > max) max = len; + max = @max(max, len); } return max; } }; -fn appendTreeSymbol(comptime symbol: TreeSymbol, buf: []u8, start_i: usize) usize { - if (builtin.os.tag == .windows and global_progress.is_windows_terminal) { - const bytes = switch (global_progress.console_code_page) { - // Code page 437 is the default code page and contains the box drawing symbols - 437 => symbol.bytes(.code_page_437), - // UTF-8 - 65001 => symbol.bytes(.utf8), - // Fall back to ASCII approximation - else => symbol.bytes(.ascii), - }; - @memcpy(buf[start_i..][0..bytes.len], bytes); - return start_i + bytes.len; +fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize { + switch (global_progress.terminal_mode) { + .off => unreachable, + .ansi_escape_codes => { + const bytes = symbol.escapeSeq(); + buf[start_i..][0..bytes.len].* = bytes.*; + return start_i + bytes.len; + }, + .windows_api => |windows_api| { + const bytes = if (!is_windows) unreachable else switch (windows_api.code_page) { + // Code page 437 is the default code page and contains the box drawing symbols + 437 => symbol.bytes(.code_page_437), + // UTF-8 + 65001 => symbol.bytes(.utf8), + // Fall back to ASCII approximation + else => symbol.bytes(.ascii), + }; + @memcpy(buf[start_i..][0..bytes.len], bytes); + return start_i + bytes.len; + }, } - - // Drawing the tree is disabled when ansi escape codes are not supported - assert(global_progress.supports_ansi_escape_codes); - - const bytes = symbol.escapeSeq(); - buf[start_i..][0..bytes.len].* = bytes.*; - return start_i + bytes.len; } -fn clearTerminal() void { - if (builtin.os.tag == .windows and global_progress.is_windows_terminal) { - return clearTerminalWindowsApi() catch { - global_progress.terminal = null; - }; - } - +fn clearWrittenWithEscapeCodes() anyerror!void { if (global_progress.written_newline_count == 0) return; var i: usize = 0; @@ -618,9 +649,7 @@ fn clearTerminal() void { i += finish_sync.len; global_progress.accumulated_newline_count = 0; - write(buf[0..i]) catch { - global_progress.terminal = null; - }; + try write(buf[0..i]); } fn computeClear(buf: []u8, start_i: usize) usize { @@ -645,7 +674,7 @@ fn computeClear(buf: []u8, start_i: usize) usize { /// U+25BA or ► const windows_api_start_marker = 0x25BA; -fn clearTerminalWindowsApi() error{Unexpected}!void { +fn clearWrittenWindowsApi() error{Unexpected}!void { // This uses a 'marker' strategy. The idea is: // - Always write a marker (in this case U+25BA or ►) at the beginning of the progress // - Get the current cursor position (at the end of the progress) @@ -667,7 +696,7 @@ fn clearTerminalWindowsApi() error{Unexpected}!void { // like any of the available attributes are invisible/benign. const prev_nl_n = global_progress.written_newline_count; if (prev_nl_n > 0) { - const handle = (global_progress.terminal orelse return).handle; + const handle = global_progress.terminal.handle; const screen_area = @as(windows.DWORD, global_progress.cols) * global_progress.rows; var console_info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; @@ -777,14 +806,14 @@ const SavedMetadata = struct { nodes_len: u8, fn getIpcFd(metadata: SavedMetadata) posix.fd_t { - return if (builtin.os.tag == .windows) + return if (is_windows) @ptrFromInt(@as(usize, metadata.ipc_fd) << 2) else metadata.ipc_fd; } fn setIpcFd(fd: posix.fd_t) u16 { - return @intCast(if (builtin.os.tag == .windows) + return @intCast(if (is_windows) @shrExact(@intFromPtr(fd), 2) else fd); @@ -1019,35 +1048,21 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) []u8 { var i: usize = 0; const buf = global_progress.draw_buffer; - if (global_progress.supports_ansi_escape_codes) { - buf[i..][0..start_sync.len].* = start_sync.*; - i += start_sync.len; - - i = computeClear(buf, i); - } else if (builtin.os.tag == .windows and global_progress.is_windows_terminal) { - clearTerminalWindowsApi() catch { - global_progress.terminal = null; - return buf[0..0]; - }; + buf[i..][0..start_sync.len].* = start_sync.*; + i += start_sync.len; - // Write the marker that we will use to find the beginning of the progress when clearing. - // Note: This doesn't have to use WriteConsoleW, but doing so avoids dealing with the code page. - var num_chars_written: windows.DWORD = undefined; - const handle = (global_progress.terminal orelse return buf[0..0]).handle; - if (windows.kernel32.WriteConsoleW(handle, &[_]u16{windows_api_start_marker}, 1, &num_chars_written, null) == 0) { - global_progress.terminal = null; - return buf[0..0]; - } + switch (global_progress.terminal_mode) { + .off => unreachable, + .ansi_escape_codes => i = computeClear(buf, i), + .windows_api => if (!is_windows) unreachable, } global_progress.accumulated_newline_count = 0; const root_node_index: Node.Index = @enumFromInt(0); i = computeNode(buf, i, serialized, children, root_node_index); - if (global_progress.supports_ansi_escape_codes) { - buf[i..][0..finish_sync.len].* = finish_sync.*; - i += finish_sync.len; - } + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; return buf[0..i]; } @@ -1075,15 +1090,15 @@ fn computePrefix( buf[i..][0..prefix.len].* = prefix.*; i += prefix.len; } else { - const upper_bound_len = TreeSymbol.line.maxByteLen() + line_upper_bound_len; + const upper_bound_len = comptime (TreeSymbol.line.maxByteLen() + line_upper_bound_len); if (i + upper_bound_len > buf.len) return buf.len; i = appendTreeSymbol(.line, buf, i); } return i; } -const line_upper_bound_len = @max(TreeSymbol.tee.maxByteLen(), TreeSymbol.langle.maxByteLen()) + "[4294967296/4294967296] ".len + - Node.max_name_len + finish_sync.len; +const line_upper_bound_len = @max(TreeSymbol.tee.maxByteLen(), TreeSymbol.langle.maxByteLen()) + + "[4294967296/4294967296] ".len + Node.max_name_len + finish_sync.len; fn computeNode( buf: []u8, @@ -1157,8 +1172,7 @@ fn withinRowLimit(p: *Progress) bool { } fn write(buf: []const u8) anyerror!void { - const tty = global_progress.terminal orelse return; - try tty.writeAll(buf); + try global_progress.terminal.writeAll(buf); global_progress.written_newline_count = global_progress.accumulated_newline_count; } @@ -1218,23 +1232,23 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { fn maybeUpdateSize(resize_flag: bool) void { if (!resize_flag) return; - const fd = (global_progress.terminal orelse return).handle; + const fd = global_progress.terminal.handle; - if (builtin.os.tag == .windows) { + if (is_windows) { var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(fd, &info) == windows.FALSE) { + if (windows.kernel32.GetConsoleScreenBufferInfo(fd, &info) != windows.FALSE) { + // In the old Windows console, dwSize.Y is the line count of the + // entire scrollback buffer, so we use this instead so that we + // always get the size of the screen. + const screen_height = info.srWindow.Bottom - info.srWindow.Top; + global_progress.rows = @intCast(screen_height); + global_progress.cols = @intCast(info.dwSize.X); + } else { std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{}); global_progress.rows = 25; global_progress.cols = 80; } - - // In the old Windows console, dwSize.Y is the line count of the entire - // scrollback buffer, so we use this instead so that we always get the - // size of the screen. - const screen_height = info.srWindow.Bottom - info.srWindow.Top; - global_progress.rows = @intCast(screen_height); - global_progress.cols = @intCast(info.dwSize.X); } else { var winsize: posix.winsize = .{ .ws_row = 0,