diff --git a/build.zig b/build.zig index 1d44c249cc51..5dcf8709e5bd 100644 --- a/build.zig +++ b/build.zig @@ -45,7 +45,7 @@ pub fn build(b: *std.Build) !void { }); const docgen_cmd = b.addRunArtifact(docgen_exe); - docgen_cmd.addArgs(&.{ "--zig", b.zig_exe }); + docgen_cmd.addArgs(&.{ "--zig", b.graph.zig_exe }); if (b.zig_lib_dir) |p| { docgen_cmd.addArg("--zig-lib-dir"); docgen_cmd.addDirectoryArg(p); @@ -884,7 +884,7 @@ fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 { } } - var check_dir = fs.path.dirname(b.zig_exe).?; + var check_dir = fs.path.dirname(b.graph.zig_exe).?; while (true) { var dir = fs.cwd().openDir(check_dir, .{}) catch unreachable; defer dir.close(); diff --git a/deps/aro/build/GenerateDef.zig b/deps/aro/build/GenerateDef.zig index 5c0f8d4b18bd..c6e8615299f0 100644 --- a/deps/aro/build/GenerateDef.zig +++ b/deps/aro/build/GenerateDef.zig @@ -53,7 +53,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const self = @fieldParentPtr(GenerateDef, "step", step); const arena = b.allocator; - var man = b.cache.obtain(); + var man = b.graph.cache.obtain(); defer man.deinit(); // Random bytes to make GenerateDef unique. Refresh this with new diff --git a/lib/build_runner.zig b/lib/build_runner.zig index 54186685d6c4..5c1daaaf426a 100644 --- a/lib/build_runner.zig +++ b/lib/build_runner.zig @@ -46,11 +46,6 @@ pub fn main() !void { return error.InvalidArgs; }; - const host: std.Build.ResolvedTarget = .{ - .query = .{}, - .result = try std.zig.system.resolveTargetQuery(.{}), - }; - const build_root_directory: std.Build.Cache.Directory = .{ .path = build_root, .handle = try std.fs.cwd().openDir(build_root, .{}), @@ -66,27 +61,29 @@ pub fn main() !void { .handle = try std.fs.cwd().makeOpenPath(global_cache_root, .{}), }; - var cache: std.Build.Cache = .{ - .gpa = arena, - .manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}), + var graph: std.Build.Graph = .{ + .arena = arena, + .cache = .{ + .gpa = arena, + .manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}), + }, + .zig_exe = zig_exe, + .env_map = try process.getEnvMap(arena), + .global_cache_root = global_cache_directory, }; - cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); - cache.addPrefix(build_root_directory); - cache.addPrefix(local_cache_directory); - cache.addPrefix(global_cache_directory); - cache.hash.addBytes(builtin.zig_version_string); + + graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); + graph.cache.addPrefix(build_root_directory); + graph.cache.addPrefix(local_cache_directory); + graph.cache.addPrefix(global_cache_directory); + graph.cache.hash.addBytes(builtin.zig_version_string); const builder = try std.Build.create( - arena, - zig_exe, + &graph, build_root_directory, local_cache_directory, - global_cache_directory, - host, - &cache, dependencies.root_deps, ); - defer builder.destroy(); var targets = ArrayList([]const u8).init(arena); var debug_log_scopes = ArrayList([]const u8).init(arena); @@ -100,64 +97,67 @@ pub fn main() !void { var color: Color = .auto; var seed: u32 = 0; var prominent_compile_errors: bool = false; - - const stderr_stream = io.getStdErr().writer(); - const stdout_stream = io.getStdOut().writer(); + var help_menu: bool = false; + var steps_menu: bool = false; + var output_tmp_nonce: ?[16]u8 = null; while (nextArg(args, &arg_idx)) |arg| { - if (mem.startsWith(u8, arg, "-D")) { + if (mem.startsWith(u8, arg, "-Z")) { + if (arg.len != 18) fatalWithHint("bad argument: '{s}'", .{arg}); + output_tmp_nonce = arg[2..18].*; + } else if (mem.startsWith(u8, arg, "-D")) { const option_contents = arg[2..]; - if (option_contents.len == 0) { - std.debug.print("Expected option name after '-D'\n\n", .{}); - usageAndErr(builder, false, stderr_stream); - } + if (option_contents.len == 0) + fatalWithHint("expected option name after '-D'", .{}); if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { const option_name = option_contents[0..name_end]; const option_value = option_contents[name_end + 1 ..]; if (try builder.addUserInputOption(option_name, option_value)) - usageAndErr(builder, false, stderr_stream); + fatal(" access the help menu with 'zig build -h'", .{}); } else { if (try builder.addUserInputFlag(option_contents)) - usageAndErr(builder, false, stderr_stream); + fatal(" access the help menu with 'zig build -h'", .{}); } } else if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "--verbose")) { builder.verbose = true; } else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - return usage(builder, false, stdout_stream); + help_menu = true; } else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) { - install_prefix = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + install_prefix = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) { - return steps(builder, false, stdout_stream); - } else if (mem.eql(u8, arg, "--prefix-lib-dir")) { - dir_list.lib_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); + steps_menu = true; + } else if (mem.startsWith(u8, arg, "-fsys=")) { + const name = arg["-fsys=".len..]; + graph.system_library_options.put(arena, name, .user_enabled) catch @panic("OOM"); + } else if (mem.startsWith(u8, arg, "-fno-sys=")) { + const name = arg["-fno-sys=".len..]; + graph.system_library_options.put(arena, name, .user_disabled) catch @panic("OOM"); + } else if (mem.eql(u8, arg, "--release")) { + builder.release_mode = .any; + } else if (mem.startsWith(u8, arg, "--release=")) { + const text = arg["--release=".len..]; + builder.release_mode = std.meta.stringToEnum(std.Build.ReleaseMode, text) orelse { + fatalWithHint("expected [off|any|fast|safe|small] in '{s}', found '{s}'", .{ + arg, text, + }); }; + } else if (mem.eql(u8, arg, "--host-target")) { + graph.host_query_options.arch_os_abi = nextArgOrFatal(args, &arg_idx); + } else if (mem.eql(u8, arg, "--host-cpu")) { + graph.host_query_options.cpu_features = nextArgOrFatal(args, &arg_idx); + } else if (mem.eql(u8, arg, "--host-dynamic-linker")) { + graph.host_query_options.dynamic_linker = nextArgOrFatal(args, &arg_idx); + } else if (mem.eql(u8, arg, "--prefix-lib-dir")) { + dir_list.lib_dir = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--prefix-exe-dir")) { - dir_list.exe_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + dir_list.exe_dir = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--prefix-include-dir")) { - dir_list.include_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + dir_list.include_dir = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--sysroot")) { - const sysroot = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - builder.sysroot = sysroot; + builder.sysroot = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--maxrss")) { - const max_rss_text = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + const max_rss_text = nextArgOrFatal(args, &arg_idx); max_rss = std.fmt.parseIntSizeSuffix(max_rss_text, 10) catch |err| { std.debug.print("invalid byte size: '{s}': {s}\n", .{ max_rss_text, @errorName(err), @@ -167,66 +167,50 @@ pub fn main() !void { } else if (mem.eql(u8, arg, "--skip-oom-steps")) { skip_oom_steps = true; } else if (mem.eql(u8, arg, "--search-prefix")) { - const search_prefix = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + const search_prefix = nextArgOrFatal(args, &arg_idx); builder.addSearchPrefix(search_prefix); } else if (mem.eql(u8, arg, "--libc")) { - const libc_file = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - builder.libc_file = libc_file; + builder.libc_file = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--color")) { - const next_arg = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected [auto|on|off] after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + const next_arg = nextArg(args, &arg_idx) orelse + fatalWithHint("expected [auto|on|off] after '{s}'", .{arg}); color = std.meta.stringToEnum(Color, next_arg) orelse { - std.debug.print("Expected [auto|on|off] after {s}, found '{s}'\n\n", .{ arg, next_arg }); - usageAndErr(builder, false, stderr_stream); + fatalWithHint("expected [auto|on|off] after '{s}', found '{s}'", .{ + arg, next_arg, + }); }; } else if (mem.eql(u8, arg, "--summary")) { - const next_arg = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected [all|failures|none] after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + const next_arg = nextArg(args, &arg_idx) orelse + fatalWithHint("expected [all|failures|none] after '{s}'", .{arg}); summary = std.meta.stringToEnum(Summary, next_arg) orelse { - std.debug.print("Expected [all|failures|none] after {s}, found '{s}'\n\n", .{ arg, next_arg }); - usageAndErr(builder, false, stderr_stream); + fatalWithHint("expected [all|failures|none] after '{s}', found '{s}'", .{ + arg, next_arg, + }); }; } else if (mem.eql(u8, arg, "--zig-lib-dir")) { - builder.zig_lib_dir = .{ .cwd_relative = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - } }; + builder.zig_lib_dir = .{ .cwd_relative = nextArgOrFatal(args, &arg_idx) }; } else if (mem.eql(u8, arg, "--seed")) { - const next_arg = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected u32 after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + const next_arg = nextArg(args, &arg_idx) orelse + fatalWithHint("expected u32 after '{s}'", .{arg}); seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| { - std.debug.print("unable to parse seed '{s}' as 32-bit integer: {s}\n", .{ + fatal("unable to parse seed '{s}' as 32-bit integer: {s}\n", .{ next_arg, @errorName(err), }); - process.exit(1); }; } else if (mem.eql(u8, arg, "--debug-log")) { - const next_arg = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + const next_arg = nextArgOrFatal(args, &arg_idx); try debug_log_scopes.append(next_arg); } else if (mem.eql(u8, arg, "--debug-pkg-config")) { builder.debug_pkg_config = true; } else if (mem.eql(u8, arg, "--debug-compile-errors")) { builder.debug_compile_errors = true; + } else if (mem.eql(u8, arg, "--system")) { + // The usage text shows another argument after this parameter + // but it is handled by the parent process. The build runner + // only sees this flag. + graph.system_package_mode = true; } else if (mem.eql(u8, arg, "--glibc-runtimes")) { - builder.glibc_runtimes_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; + builder.glibc_runtimes_dir = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--verbose-link")) { builder.verbose_link = true; } else if (mem.eql(u8, arg, "--verbose-air")) { @@ -292,19 +276,26 @@ pub fn main() !void { builder.args = argsRest(args, arg_idx); break; } else { - std.debug.print("Unrecognized argument: {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); + fatalWithHint("unrecognized argument: '{s}'", .{arg}); } } else { try targets.append(arg); } } + const host_query = std.Build.parseTargetQuery(graph.host_query_options) catch |err| switch (err) { + error.ParseFailed => process.exit(1), + }; + builder.host = .{ + .query = .{}, + .result = try std.zig.system.resolveTargetQuery(host_query), + }; + const stderr = std.io.getStdErr(); const ttyconf = get_tty_conf(color, stderr); switch (ttyconf) { - .no_color => try builder.env_map.put("NO_COLOR", "1"), - .escape_codes => try builder.env_map.put("YES_COLOR", "1"), + .no_color => try graph.env_map.put("NO_COLOR", "1"), + .escape_codes => try graph.env_map.put("YES_COLOR", "1"), .windows_api => {}, } @@ -319,8 +310,39 @@ pub fn main() !void { try builder.runBuild(root); } - if (builder.validateUserInputDidItFail()) - usageAndErr(builder, true, stderr_stream); + if (graph.needed_lazy_dependencies.entries.len != 0) { + var buffer: std.ArrayListUnmanaged(u8) = .{}; + for (graph.needed_lazy_dependencies.keys()) |k| { + try buffer.appendSlice(arena, k); + try buffer.append(arena, '\n'); + } + const s = std.fs.path.sep_str; + const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{})); + local_cache_directory.handle.writeFile2(.{ + .sub_path = tmp_sub_path, + .data = buffer.items, + .flags = .{ .exclusive = true }, + }) catch |err| { + fatal("unable to write configuration results to '{}{s}': {s}", .{ + local_cache_directory, tmp_sub_path, @errorName(err), + }); + }; + process.exit(3); // Indicate configure phase failed with meaningful stdout. + } + + if (builder.validateUserInputDidItFail()) { + fatal(" access the help menu with 'zig build -h'", .{}); + } + + validateSystemLibraryOptions(builder); + + const stdout_writer = io.getStdOut().writer(); + + if (help_menu) + return usage(builder, stdout_writer); + + if (steps_menu) + return steps(builder, stdout_writer); var run: Run = .{ .max_rss = max_rss, @@ -389,7 +411,7 @@ fn runStepNames( for (0..step_names.len) |i| { const step_name = step_names[step_names.len - i - 1]; const s = b.top_level_steps.get(step_name) orelse { - std.debug.print("no step named '{s}'. Access the help menu with 'zig build -h'\n", .{step_name}); + std.debug.print("no step named '{s}'\n access the help menu with 'zig build -h'\n", .{step_name}); process.exit(1); }; step_stack.putAssumeCapacity(&s.step, {}); @@ -1037,13 +1059,7 @@ fn printErrorMessages(b: *std.Build, failing_step: *Step, run: *const Run) !void } } -fn steps(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void { - // run the build script to collect the options - if (!already_ran_build) { - builder.resolveInstallPrefix(null, .{}); - try builder.runBuild(root); - } - +fn steps(builder: *std.Build, out_stream: anytype) !void { const allocator = builder.allocator; for (builder.top_level_steps.values()) |top_level_step| { const name = if (&top_level_step.step == builder.default_step) @@ -1054,33 +1070,25 @@ fn steps(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi } } -fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void { - // run the build script to collect the options - if (!already_ran_build) { - builder.resolveInstallPrefix(null, .{}); - try builder.runBuild(root); - } - +fn usage(b: *std.Build, out_stream: anytype) !void { try out_stream.print( - \\ \\Usage: {s} build [steps] [options] \\ \\Steps: \\ - , .{builder.zig_exe}); - try steps(builder, true, out_stream); + , .{b.graph.zig_exe}); + try steps(b, out_stream); try out_stream.writeAll( \\ \\General Options: - \\ -p, --prefix [path] Override default install prefix - \\ --prefix-lib-dir [path] Override default library directory path - \\ --prefix-exe-dir [path] Override default executable directory path - \\ --prefix-include-dir [path] Override default include directory path + \\ -p, --prefix [path] Where to install files (default: zig-out) + \\ --prefix-lib-dir [path] Where to install libraries + \\ --prefix-exe-dir [path] Where to install executables + \\ --prefix-include-dir [path] Where to install C header files \\ - \\ --sysroot [path] Set the system root directory (usually /) - \\ --search-prefix [path] Add a path to look for binaries, libraries, headers - \\ --libc [file] Provide a file which specifies libc paths + \\ --release[=mode] Request release mode, optionally specifying a + \\ preferred optimization mode: fast, safe, small \\ \\ -fdarling, -fno-darling Integration with system-installed Darling to \\ execute macOS programs on Linux hosts @@ -1116,16 +1124,15 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi \\ ); - const allocator = builder.allocator; - if (builder.available_options_list.items.len == 0) { + const arena = b.allocator; + if (b.available_options_list.items.len == 0) { try out_stream.print(" (none)\n", .{}); } else { - for (builder.available_options_list.items) |option| { - const name = try fmt.allocPrint(allocator, " -D{s}=[{s}]", .{ + for (b.available_options_list.items) |option| { + const name = try fmt.allocPrint(arena, " -D{s}=[{s}]", .{ option.name, @tagName(option.type_id), }); - defer allocator.free(name); try out_stream.print("{s:<30} {s}\n", .{ name, option.description }); if (option.enum_options) |enum_options| { const padding = " " ** 33; @@ -1137,6 +1144,37 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi } } + try out_stream.writeAll( + \\ + \\System Integration Options: + \\ --search-prefix [path] Add a path to look for binaries, libraries, headers + \\ --sysroot [path] Set the system root directory (usually /) + \\ --libc [file] Provide a file which specifies libc paths + \\ + \\ --host-target [triple] Use the provided target as the host + \\ --host-cpu [cpu] Use the provided CPU as the host + \\ --host-dynamic-linker [path] Use the provided dynamic linker as the host + \\ + \\ --system [pkgdir] Disable package fetching; enable all integrations + \\ -fsys=[name] Enable a system integration + \\ -fno-sys=[name] Disable a system integration + \\ + \\ Available System Integrations: Enabled: + \\ + ); + if (b.graph.system_library_options.entries.len == 0) { + try out_stream.writeAll(" (none) -\n"); + } else { + for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| { + const status = switch (v) { + .declared_enabled => "yes", + .declared_disabled => "no", + .user_enabled, .user_disabled => unreachable, // already emitted error + }; + try out_stream.print(" {s:<43} {s}\n", .{ k, status }); + } + } + try out_stream.writeAll( \\ \\Advanced Options: @@ -1161,17 +1199,19 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi ); } -fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) noreturn { - usage(builder, already_ran_build, out_stream) catch {}; - process.exit(1); -} - fn nextArg(args: [][:0]const u8, idx: *usize) ?[:0]const u8 { if (idx.* >= args.len) return null; defer idx.* += 1; return args[idx.*]; } +fn nextArgOrFatal(args: [][:0]const u8, idx: *usize) [:0]const u8 { + return nextArg(args, idx) orelse { + std.debug.print("expected argument after '{s}'\n access the help menu with 'zig build -h'\n", .{args[idx.*]}); + process.exit(1); + }; +} + fn argsRest(args: [][:0]const u8, idx: usize) ?[][:0]const u8 { if (idx >= args.len) return null; return args[idx..]; @@ -1202,3 +1242,32 @@ fn renderOptions(ttyconf: std.io.tty.Config) std.zig.ErrorBundle.RenderOptions { .include_reference_trace = ttyconf != .no_color, }; } + +fn fatalWithHint(comptime f: []const u8, args: anytype) noreturn { + std.debug.print(f ++ "\n access the help menu with 'zig build -h'\n", args); + process.exit(1); +} + +fn fatal(comptime f: []const u8, args: anytype) noreturn { + std.debug.print(f ++ "\n", args); + process.exit(1); +} + +fn validateSystemLibraryOptions(b: *std.Build) void { + var bad = false; + for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| { + switch (v) { + .user_disabled, .user_enabled => { + // The user tried to enable or disable a system library integration, but + // the build script did not recognize that option. + std.debug.print("system library name not recognized by build script: '{s}'\n", .{k}); + bad = true; + }, + .declared_disabled, .declared_enabled => {}, + } + } + if (bad) { + std.debug.print(" access the help menu with 'zig build -h'\n", .{}); + process.exit(1); + } +} diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 59dee7d89db2..d962a82d89c4 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -22,6 +22,8 @@ pub const Cache = @import("Build/Cache.zig"); pub const Step = @import("Build/Step.zig"); pub const Module = @import("Build/Module.zig"); +/// Shared state among all Build instances. +graph: *Graph, install_tls: TopLevelStep, uninstall_tls: TopLevelStep, allocator: Allocator, @@ -38,9 +40,7 @@ verbose_cimport: bool, verbose_llvm_cpu_features: bool, reference_trace: ?u32 = null, invalid_user_input: bool, -zig_exe: [:0]const u8, default_step: *Step, -env_map: *EnvMap, top_level_steps: std.StringArrayHashMapUnmanaged(*TopLevelStep), install_prefix: []const u8, dest_dir: ?[]const u8, @@ -49,14 +49,12 @@ exe_dir: []const u8, h_dir: []const u8, install_path: []const u8, sysroot: ?[]const u8 = null, -search_prefixes: ArrayList([]const u8), +search_prefixes: std.ArrayListUnmanaged([]const u8), libc_file: ?[]const u8 = null, installed_files: ArrayList(InstalledFile), /// Path to the directory containing build.zig. build_root: Cache.Directory, cache_root: Cache.Directory, -global_cache_root: Cache.Directory, -cache: *Cache, zig_lib_dir: ?LazyPath, pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null, args: ?[][]const u8 = null, @@ -98,8 +96,47 @@ initialized_deps: *InitializedDepMap, /// A mapping from dependency names to package hashes. available_deps: AvailableDeps, +release_mode: ReleaseMode, + +pub const ReleaseMode = enum { + off, + any, + fast, + safe, + small, +}; + +/// Shared state among all Build instances. +/// Settings that are here rather than in Build are not configurable per-package. +pub const Graph = struct { + arena: Allocator, + system_library_options: std.StringArrayHashMapUnmanaged(SystemLibraryMode) = .{}, + system_package_mode: bool = false, + cache: Cache, + zig_exe: [:0]const u8, + env_map: EnvMap, + global_cache_root: Cache.Directory, + host_query_options: std.Target.Query.ParseOptions = .{}, + needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{}, +}; + const AvailableDeps = []const struct { []const u8, []const u8 }; +const SystemLibraryMode = enum { + /// User asked for the library to be disabled. + /// The build runner has not confirmed whether the setting is recognized yet. + user_disabled, + /// User asked for the library to be enabled. + /// The build runner has not confirmed whether the setting is recognized yet. + user_enabled, + /// The build runner has confirmed that this setting is recognized. + /// System integration with this library has been resolved to off. + declared_disabled, + /// The build runner has confirmed that this setting is recognized. + /// System integration with this library has been resolved to on. + declared_enabled, +}; + const InitializedDepMap = std.HashMap(InitializedDepKey, *Dependency, InitializedDepContext, std.hash_map.default_max_load_percentage); const InitializedDepKey = struct { build_root_string: []const u8, @@ -208,28 +245,20 @@ pub const DirList = struct { }; pub fn create( - allocator: Allocator, - zig_exe: [:0]const u8, + graph: *Graph, build_root: Cache.Directory, cache_root: Cache.Directory, - global_cache_root: Cache.Directory, - host: ResolvedTarget, - cache: *Cache, available_deps: AvailableDeps, ) !*Build { - const env_map = try allocator.create(EnvMap); - env_map.* = try process.getEnvMap(allocator); + const arena = graph.arena; + const initialized_deps = try arena.create(InitializedDepMap); + initialized_deps.* = InitializedDepMap.initContext(arena, .{ .allocator = arena }); - const initialized_deps = try allocator.create(InitializedDepMap); - initialized_deps.* = InitializedDepMap.initContext(allocator, .{ .allocator = allocator }); - - const self = try allocator.create(Build); + const self = try arena.create(Build); self.* = .{ - .zig_exe = zig_exe, + .graph = graph, .build_root = build_root, .cache_root = cache_root, - .global_cache_root = global_cache_root, - .cache = cache, .verbose = false, .verbose_link = false, .verbose_cc = false, @@ -239,20 +268,19 @@ pub fn create( .verbose_cimport = false, .verbose_llvm_cpu_features = false, .invalid_user_input = false, - .allocator = allocator, - .user_input_options = UserInputOptionsMap.init(allocator), - .available_options_map = AvailableOptionsMap.init(allocator), - .available_options_list = ArrayList(AvailableOption).init(allocator), + .allocator = arena, + .user_input_options = UserInputOptionsMap.init(arena), + .available_options_map = AvailableOptionsMap.init(arena), + .available_options_list = ArrayList(AvailableOption).init(arena), .top_level_steps = .{}, .default_step = undefined, - .env_map = env_map, - .search_prefixes = ArrayList([]const u8).init(allocator), + .search_prefixes = .{}, .install_prefix = undefined, .lib_dir = undefined, .exe_dir = undefined, .h_dir = undefined, - .dest_dir = env_map.get("DESTDIR"), - .installed_files = ArrayList(InstalledFile).init(allocator), + .dest_dir = graph.env_map.get("DESTDIR"), + .installed_files = ArrayList(InstalledFile).init(arena), .install_tls = .{ .step = Step.init(.{ .id = .top_level, @@ -273,14 +301,15 @@ pub fn create( .zig_lib_dir = null, .install_path = undefined, .args = null, - .host = host, - .modules = std.StringArrayHashMap(*Module).init(allocator), - .named_writefiles = std.StringArrayHashMap(*Step.WriteFile).init(allocator), + .host = undefined, + .modules = std.StringArrayHashMap(*Module).init(arena), + .named_writefiles = std.StringArrayHashMap(*Step.WriteFile).init(arena), .initialized_deps = initialized_deps, .available_deps = available_deps, + .release_mode = .off, }; - try self.top_level_steps.put(allocator, self.install_tls.step.name, &self.install_tls); - try self.top_level_steps.put(allocator, self.uninstall_tls.step.name, &self.uninstall_tls); + try self.top_level_steps.put(arena, self.install_tls.step.name, &self.install_tls); + try self.top_level_steps.put(arena, self.uninstall_tls.step.name, &self.uninstall_tls); self.default_step = &self.install_tls.step; return self; } @@ -297,10 +326,17 @@ fn createChild( return child; } -fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Directory, pkg_deps: AvailableDeps, user_input_options: UserInputOptionsMap) !*Build { +fn createChildOnly( + parent: *Build, + dep_name: []const u8, + build_root: Cache.Directory, + pkg_deps: AvailableDeps, + user_input_options: UserInputOptionsMap, +) !*Build { const allocator = parent.allocator; const child = try allocator.create(Build); child.* = .{ + .graph = parent.graph, .allocator = allocator, .install_tls = .{ .step = Step.init(.{ @@ -332,9 +368,7 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc .verbose_llvm_cpu_features = parent.verbose_llvm_cpu_features, .reference_trace = parent.reference_trace, .invalid_user_input = false, - .zig_exe = parent.zig_exe, .default_step = undefined, - .env_map = parent.env_map, .top_level_steps = .{}, .install_prefix = undefined, .dest_dir = parent.dest_dir, @@ -348,8 +382,6 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc .installed_files = ArrayList(InstalledFile).init(allocator), .build_root = build_root, .cache_root = parent.cache_root, - .global_cache_root = parent.global_cache_root, - .cache = parent.cache, .zig_lib_dir = parent.zig_lib_dir, .debug_log_scopes = parent.debug_log_scopes, .debug_compile_errors = parent.debug_compile_errors, @@ -366,6 +398,7 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc .named_writefiles = std.StringArrayHashMap(*Step.WriteFile).init(allocator), .initialized_deps = parent.initialized_deps, .available_deps = pkg_deps, + .release_mode = parent.release_mode, }; try child.top_level_steps.put(allocator, child.install_tls.step.name, &child.install_tls); try child.top_level_steps.put(allocator, child.uninstall_tls.step.name, &child.uninstall_tls); @@ -543,7 +576,7 @@ fn hashUserInputOptionsMap(allocator: Allocator, user_input_options: UserInputOp fn determineAndApplyInstallPrefix(b: *Build) !void { // Create an installation directory local to this package. This will be used when // dependant packages require a standard prefix, such as include directories for C headers. - var hash = b.cache.hash; + var hash = b.graph.cache.hash; // Random bytes to make unique. Refresh this with new random bytes when // implementation is modified in a non-backwards-compatible way. hash.add(@as(u32, 0xd8cb0055)); @@ -558,12 +591,6 @@ fn determineAndApplyInstallPrefix(b: *Build) !void { b.resolveInstallPrefix(install_prefix, .{}); } -pub fn destroy(b: *Build) void { - b.env_map.deinit(); - b.top_level_steps.deinit(b.allocator); - b.allocator.destroy(b); -} - /// This function is intended to be called by lib/build_runner.zig, not a build.zig file. pub fn resolveInstallPrefix(self: *Build, install_prefix: ?[]const u8, dir_list: DirList) void { if (self.dest_dir) |dest_dir| { @@ -1216,20 +1243,33 @@ pub const StandardOptimizeOptionOptions = struct { preferred_optimize_mode: ?std.builtin.OptimizeMode = null, }; -pub fn standardOptimizeOption(self: *Build, options: StandardOptimizeOptionOptions) std.builtin.OptimizeMode { +pub fn standardOptimizeOption(b: *Build, options: StandardOptimizeOptionOptions) std.builtin.OptimizeMode { if (options.preferred_optimize_mode) |mode| { - if (self.option(bool, "release", "optimize for end users") orelse false) { + if (b.option(bool, "release", "optimize for end users") orelse (b.release_mode != .off)) { return mode; } else { return .Debug; } - } else { - return self.option( - std.builtin.OptimizeMode, - "optimize", - "Prioritize performance, safety, or binary size (-O flag)", - ) orelse .Debug; } + + if (b.option( + std.builtin.OptimizeMode, + "optimize", + "Prioritize performance, safety, or binary size", + )) |mode| { + return mode; + } + + return switch (b.release_mode) { + .off => .Debug, + .any => { + std.debug.print("the project does not declare a preferred optimization mode. choose: --release=fast, --release=safe, or --release=small\n", .{}); + process.exit(1); + }, + .fast => .ReleaseFast, + .safe => .ReleaseSafe, + .small => .ReleaseSmall, + }; } pub const StandardTargetOptionsArgs = struct { @@ -1244,67 +1284,83 @@ pub fn standardTargetOptions(b: *Build, args: StandardTargetOptionsArgs) Resolve return b.resolveTargetQuery(query); } -/// Exposes standard `zig build` options for choosing a target. -pub fn standardTargetOptionsQueryOnly(b: *Build, args: StandardTargetOptionsArgs) Target.Query { - const maybe_triple = b.option( - []const u8, - "target", - "The CPU architecture, OS, and ABI to build for", - ); - const mcpu = b.option([]const u8, "cpu", "Target CPU features to add or subtract"); - - if (maybe_triple == null and mcpu == null) { - return args.default_target; - } - - const triple = maybe_triple orelse "native"; - +pub fn parseTargetQuery(options: std.Target.Query.ParseOptions) error{ParseFailed}!std.Target.Query { var diags: Target.Query.ParseOptions.Diagnostics = .{}; - const selected_target = Target.Query.parse(.{ - .arch_os_abi = triple, - .cpu_features = mcpu, - .diagnostics = &diags, - }) catch |err| switch (err) { + var opts_copy = options; + opts_copy.diagnostics = &diags; + return std.Target.Query.parse(options) catch |err| switch (err) { error.UnknownCpuModel => { - log.err("Unknown CPU: '{s}'\nAvailable CPUs for architecture '{s}':", .{ - diags.cpu_name.?, - @tagName(diags.arch.?), + std.debug.print("unknown CPU: '{s}'\navailable CPUs for architecture '{s}':\n", .{ + diags.cpu_name.?, @tagName(diags.arch.?), }); for (diags.arch.?.allCpuModels()) |cpu| { - log.err(" {s}", .{cpu.name}); + std.debug.print(" {s}\n", .{cpu.name}); } - b.markInvalidUserInput(); - return args.default_target; + return error.ParseFailed; }, error.UnknownCpuFeature => { - log.err( - \\Unknown CPU feature: '{s}' - \\Available CPU features for architecture '{s}': + std.debug.print( + \\unknown CPU feature: '{s}' + \\available CPU features for architecture '{s}': \\ , .{ diags.unknown_feature_name.?, @tagName(diags.arch.?), }); for (diags.arch.?.allFeaturesList()) |feature| { - log.err(" {s}: {s}", .{ feature.name, feature.description }); + std.debug.print(" {s}: {s}\n", .{ feature.name, feature.description }); } - b.markInvalidUserInput(); - return args.default_target; + return error.ParseFailed; }, error.UnknownOperatingSystem => { - log.err( - \\Unknown OS: '{s}' - \\Available operating systems: + std.debug.print( + \\unknown OS: '{s}' + \\available operating systems: \\ , .{diags.os_name.?}); inline for (std.meta.fields(Target.Os.Tag)) |field| { - log.err(" {s}", .{field.name}); + std.debug.print(" {s}\n", .{field.name}); } - b.markInvalidUserInput(); - return args.default_target; + return error.ParseFailed; }, else => |e| { - log.err("Unable to parse target '{s}': {s}\n", .{ triple, @errorName(e) }); + std.debug.print("unable to parse target '{s}': {s}\n", .{ + options.arch_os_abi, @errorName(e), + }); + return error.ParseFailed; + }, + }; +} + +/// Exposes standard `zig build` options for choosing a target. +pub fn standardTargetOptionsQueryOnly(b: *Build, args: StandardTargetOptionsArgs) Target.Query { + const maybe_triple = b.option( + []const u8, + "target", + "The CPU architecture, OS, and ABI to build for", + ); + const mcpu = b.option( + []const u8, + "cpu", + "Target CPU features to add or subtract", + ); + const dynamic_linker = b.option( + []const u8, + "dynamic-linker", + "Path to interpreter on the target system", + ); + + if (maybe_triple == null and mcpu == null and dynamic_linker == null) + return args.default_target; + + const triple = maybe_triple orelse "native"; + + const selected_target = parseTargetQuery(.{ + .arch_os_abi = triple, + .cpu_features = mcpu, + .dynamic_linker = dynamic_linker, + }) catch |err| switch (err) { + error.ParseFailed => { b.markInvalidUserInput(); return args.default_target; }, @@ -1367,7 +1423,7 @@ pub fn addUserInputOption(self: *Build, name_raw: []const u8, value_raw: []const }); }, .flag => { - log.warn("Option '-D{s}={s}' conflicts with flag '-D{s}'.", .{ name, value, name }); + log.warn("option '-D{s}={s}' conflicts with flag '-D{s}'.", .{ name, value, name }); return true; }, .map => |*map| { @@ -1427,17 +1483,17 @@ fn markInvalidUserInput(self: *Build) void { self.invalid_user_input = true; } -pub fn validateUserInputDidItFail(self: *Build) bool { - // make sure all args are used - var it = self.user_input_options.iterator(); +pub fn validateUserInputDidItFail(b: *Build) bool { + // Make sure all args are used. + var it = b.user_input_options.iterator(); while (it.next()) |entry| { if (!entry.value_ptr.used) { - log.err("Invalid option: -D{s}", .{entry.key_ptr.*}); - self.markInvalidUserInput(); + log.err("invalid option: -D{s}", .{entry.key_ptr.*}); + b.markInvalidUserInput(); } } - return self.invalid_user_input; + return b.invalid_user_input; } fn allocPrintCmd(ally: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) ![]u8 { @@ -1593,7 +1649,7 @@ pub fn findProgram(self: *Build, names: []const []const u8, paths: []const []con return fs.realpathAlloc(self.allocator, full_path) catch continue; } } - if (self.env_map.get("PATH")) |PATH| { + if (self.graph.env_map.get("PATH")) |PATH| { for (names) |name| { if (fs.path.isAbsolute(name)) { return name; @@ -1639,7 +1695,7 @@ pub fn runAllowFail( child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = stderr_behavior; - child.env_map = self.env_map; + child.env_map = &self.graph.env_map; try child.spawn(); @@ -1685,8 +1741,8 @@ pub fn run(b: *Build, argv: []const []const u8) []u8 { }; } -pub fn addSearchPrefix(self: *Build, search_prefix: []const u8) void { - self.search_prefixes.append(self.dupePath(search_prefix)) catch @panic("OOM"); +pub fn addSearchPrefix(b: *Build, search_prefix: []const u8) void { + b.search_prefixes.append(b.allocator, b.dupePath(search_prefix)) catch @panic("OOM"); } pub fn getInstallPath(self: *Build, dir: InstallDir, dest_rel_path: []const u8) []const u8 { @@ -1747,21 +1803,63 @@ pub const Dependency = struct { } }; -pub fn dependency(b: *Build, name: []const u8, args: anytype) *Dependency { +fn findPkgHashOrFatal(b: *Build, name: []const u8) []const u8 { + for (b.available_deps) |dep| { + if (mem.eql(u8, dep[0], name)) return dep[1]; + } + + const full_path = b.pathFromRoot("build.zig.zon"); + std.debug.panic("no dependency named '{s}' in '{s}'. All packages used in build.zig must be declared in this file", .{ name, full_path }); +} + +fn markNeededLazyDep(b: *Build, pkg_hash: []const u8) void { + b.graph.needed_lazy_dependencies.put(b.graph.arena, pkg_hash, {}) catch @panic("OOM"); +} + +/// When this function is called, it means that the current build does, in +/// fact, require this dependency. If the dependency is already fetched, it +/// proceeds in the same manner as `dependency`. However if the dependency was +/// not fetched, then when the build script is finished running, the build will +/// not proceed to the make phase. Instead, the parent process will +/// additionally fetch all the lazy dependencies that were actually required by +/// running the build script, rebuild the build script, and then run it again. +/// In other words, if this function returns `null` it means that the only +/// purpose of completing the configure phase is to find out all the other lazy +/// dependencies that are also required. +/// It is allowed to use this function for non-lazy dependencies, in which case +/// it will never return `null`. This allows toggling laziness via +/// build.zig.zon without changing build.zig logic. +pub fn lazyDependency(b: *Build, name: []const u8, args: anytype) ?*Dependency { const build_runner = @import("root"); const deps = build_runner.dependencies; + const pkg_hash = findPkgHashOrFatal(b, name); - const pkg_hash = for (b.available_deps) |dep| { - if (mem.eql(u8, dep[0], name)) break dep[1]; - } else { - const full_path = b.pathFromRoot("build.zig.zon"); - std.debug.print("no dependency named '{s}' in '{s}'. All packages used in build.zig must be declared in this file.\n", .{ name, full_path }); - process.exit(1); - }; + inline for (@typeInfo(deps.packages).Struct.decls) |decl| { + if (mem.eql(u8, decl.name, pkg_hash)) { + const pkg = @field(deps.packages, decl.name); + const available = !@hasDecl(pkg, "available") or pkg.available; + if (!available) { + markNeededLazyDep(b, pkg_hash); + return null; + } + return dependencyInner(b, name, pkg.build_root, if (@hasDecl(pkg, "build_zig")) pkg.build_zig else null, pkg.deps, args); + } + } + + unreachable; // Bad @dependencies source +} + +pub fn dependency(b: *Build, name: []const u8, args: anytype) *Dependency { + const build_runner = @import("root"); + const deps = build_runner.dependencies; + const pkg_hash = findPkgHashOrFatal(b, name); inline for (@typeInfo(deps.packages).Struct.decls) |decl| { if (mem.eql(u8, decl.name, pkg_hash)) { const pkg = @field(deps.packages, decl.name); + if (@hasDecl(pkg, "available")) { + std.debug.panic("dependency '{s}{s}' is marked as lazy in build.zig.zon which means it must use the lazyDependency function instead", .{ b.dep_prefix, name }); + } return dependencyInner(b, name, pkg.build_root, if (@hasDecl(pkg, "build_zig")) pkg.build_zig else null, pkg.deps, args); } } @@ -2281,9 +2379,14 @@ pub const ResolvedTarget = struct { /// Converts a target query into a fully resolved target that can be passed to /// various parts of the API. pub fn resolveTargetQuery(b: *Build, query: Target.Query) ResolvedTarget { - // This context will likely be required in the future when the target is - // resolved via a WASI API or via the build protocol. - _ = b; + if (query.isNative()) { + var adjusted = b.host; + if (query.ofmt) |ofmt| { + adjusted.query.ofmt = ofmt; + adjusted.result.ofmt = ofmt; + } + return adjusted; + } return .{ .query = query, @@ -2296,6 +2399,40 @@ pub fn wantSharedLibSymLinks(target: Target) bool { return target.os.tag != .windows; } +pub const SystemIntegrationOptionConfig = struct { + /// If left as null, then the default will depend on system_package_mode. + default: ?bool = null, +}; + +pub fn systemIntegrationOption( + b: *Build, + name: []const u8, + config: SystemIntegrationOptionConfig, +) bool { + const gop = b.graph.system_library_options.getOrPut(b.allocator, name) catch @panic("OOM"); + if (gop.found_existing) switch (gop.value_ptr.*) { + .user_disabled => { + gop.value_ptr.* = .declared_disabled; + return false; + }, + .user_enabled => { + gop.value_ptr.* = .declared_enabled; + return true; + }, + .declared_disabled => return false, + .declared_enabled => return true, + } else { + gop.key_ptr.* = b.dupe(name); + if (config.default orelse b.graph.system_package_mode) { + gop.value_ptr.* = .declared_enabled; + return true; + } else { + gop.value_ptr.* = .declared_disabled; + return false; + } + } +} + test { _ = Cache; _ = Step; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index fade29db1589..f67cba6c3fb8 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -314,7 +314,7 @@ pub fn evalZigProcess( try handleVerbose(s.owner, null, argv); var child = std.ChildProcess.init(argv, arena); - child.env_map = b.env_map; + child.env_map = &b.graph.env_map; child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 68e68b28adb5..0c438069f861 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -923,7 +923,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { var zig_args = ArrayList([]const u8).init(arena); defer zig_args.deinit(); - try zig_args.append(b.zig_exe); + try zig_args.append(b.graph.zig_exe); const cmd = switch (self.kind) { .lib => "build-lib", @@ -933,6 +933,16 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { }; try zig_args.append(cmd); + if (!mem.eql(u8, b.graph.host_query_options.arch_os_abi, "native")) { + try zig_args.appendSlice(&.{ "--host-target", b.graph.host_query_options.arch_os_abi }); + } + if (b.graph.host_query_options.cpu_features) |cpu| { + try zig_args.appendSlice(&.{ "--host-cpu", cpu }); + } + if (b.graph.host_query_options.dynamic_linker) |dl| { + try zig_args.appendSlice(&.{ "--host-dynamic-linker", dl }); + } + if (b.reference_trace) |some| { try zig_args.append(try std.fmt.allocPrint(arena, "-freference-trace={d}", .{some})); } @@ -1393,7 +1403,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { try zig_args.append(b.cache_root.path orelse "."); try zig_args.append("--global-cache-dir"); - try zig_args.append(b.global_cache_root.path orelse "."); + try zig_args.append(b.graph.global_cache_root.path orelse "."); try zig_args.append("--name"); try zig_args.append(self.name); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 55c7cf387a7c..9c2f5d0826c5 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -171,7 +171,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const gpa = b.allocator; const arena = b.allocator; - var man = b.cache.obtain(); + var man = b.graph.cache.obtain(); defer man.deinit(); // Random bytes to make ConfigHeader unique. Refresh this with new diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig index 8e8cc51c0d4a..b9828e34ddb8 100644 --- a/lib/std/Build/Step/Fmt.zig +++ b/lib/std/Build/Step/Fmt.zig @@ -52,7 +52,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { var argv: std.ArrayListUnmanaged([]const u8) = .{}; try argv.ensureUnusedCapacity(arena, 2 + 1 + self.paths.len + 2 * self.exclude_paths.len); - argv.appendAssumeCapacity(b.zig_exe); + argv.appendAssumeCapacity(b.graph.zig_exe); argv.appendAssumeCapacity("fmt"); if (self.check) { diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 6691107bf5d6..e10c5ce9a93b 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -94,7 +94,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const b = step.owner; const self = @fieldParentPtr(ObjCopy, "step", step); - var man = b.cache.obtain(); + var man = b.graph.cache.obtain(); defer man.deinit(); // Random bytes to make ObjCopy unique. Refresh this with new random @@ -133,7 +133,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { }; var argv = std.ArrayList([]const u8).init(b.allocator); - try argv.appendSlice(&.{ b.zig_exe, "objcopy" }); + try argv.appendSlice(&.{ b.graph.zig_exe, "objcopy" }); if (self.only_section) |only_section| { try argv.appendSlice(&.{ "-j", only_section }); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index beaaf4e3a063..8dfe73570397 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -222,7 +222,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const basename = "options.zig"; // Hash contents to file name. - var hash = b.cache.hash; + var hash = b.graph.cache.hash; // Random bytes to make unique. Refresh this with new random bytes when // implementation is modified in a non-backwards-compatible way. hash.add(@as(u32, 0xad95e922)); @@ -301,27 +301,28 @@ test Options { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); - const host: std.Build.ResolvedTarget = .{ - .query = .{}, - .result = try std.zig.system.resolveTargetQuery(.{}), - }; - - var cache: std.Build.Cache = .{ - .gpa = arena.allocator(), - .manifest_dir = std.fs.cwd(), + var graph: std.Build.Graph = .{ + .arena = arena.allocator(), + .cache = .{ + .gpa = arena.allocator(), + .manifest_dir = std.fs.cwd(), + }, + .zig_exe = "test", + .env_map = std.process.EnvMap.init(arena.allocator()), + .global_cache_root = .{ .path = "test", .handle = std.fs.cwd() }, }; var builder = try std.Build.create( - arena.allocator(), - "test", + &graph, .{ .path = "test", .handle = std.fs.cwd() }, .{ .path = "test", .handle = std.fs.cwd() }, - .{ .path = "test", .handle = std.fs.cwd() }, - host, - &cache, &.{}, ); - defer builder.destroy(); + + builder.host = .{ + .query = .{}, + .result = try std.zig.system.resolveTargetQuery(.{}), + }; const options = builder.addOptions(); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index de8f9816ab58..3df3d9ee537b 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -463,7 +463,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { var argv_list = ArrayList([]const u8).init(arena); var output_placeholders = ArrayList(IndexedOutput).init(arena); - var man = b.cache.obtain(); + var man = b.graph.cache.obtain(); defer man.deinit(); for (self.argv.items) |arg| { @@ -1036,7 +1036,7 @@ fn spawnChildAndCollect( child.cwd = b.build_root.path; child.cwd_dir = b.build_root.handle; } - child.env_map = self.env_map orelse b.env_map; + child.env_map = self.env_map orelse &b.graph.env_map; child.request_resource_usage_statistics = true; child.stdin_behavior = switch (self.stdio) { diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index c55c5889ef7d..223b00499326 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -121,7 +121,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const self = @fieldParentPtr(TranslateC, "step", step); var argv_list = std.ArrayList([]const u8).init(b.allocator); - try argv_list.append(b.zig_exe); + try argv_list.append(b.graph.zig_exe); try argv_list.append("translate-c"); if (self.link_libc) { try argv_list.append("-lc"); diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 7fcf06249f38..aab40a8a5e09 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -190,7 +190,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { // If, for example, a hard-coded path was used as the location to put WriteFile // files, then two WriteFiles executing in parallel might clobber each other. - var man = b.cache.obtain(); + var man = b.graph.cache.obtain(); defer man.deinit(); // Random bytes to make WriteFile unique. Refresh this with diff --git a/lib/std/Target/Query.zig b/lib/std/Target/Query.zig index 10130e03bba6..e54ac0c3c679 100644 --- a/lib/std/Target/Query.zig +++ b/lib/std/Target/Query.zig @@ -468,7 +468,7 @@ pub fn zigTriple(self: Query, allocator: Allocator) Allocator.Error![]u8 { } if (self.glibc_version) |v| { - const name = @tagName(self.abi orelse builtin.target.abi); + const name = if (self.abi) |abi| @tagName(abi) else "gnu"; try result.ensureUnusedCapacity(name.len + 2); result.appendAssumeCapacity('-'); result.appendSliceAssumeCapacity(name); diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 0a0c06ff8943..8b15b7d63b8a 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -298,7 +298,9 @@ pub const ChildProcess = struct { // we could make this work with multiple allocators but YAGNI if (stdout.allocator.ptr != stderr.allocator.ptr or stdout.allocator.vtable != stderr.allocator.vtable) - @panic("ChildProcess.collectOutput only supports 1 allocator"); + { + unreachable; // ChildProcess.collectOutput only supports 1 allocator + } var poller = std.io.poll(stdout.allocator, enum { stdout, stderr }, .{ .stdout = child.stdout.?, diff --git a/src/Compilation.zig b/src/Compilation.zig index beb2c8cf9970..e837004779f3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -4530,6 +4530,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P log.err("{}: failed to parse clang diagnostics: {s}", .{ err, stderr }); return comp.failCObj(c_object, "clang exited with code {d}", .{code}); }; + zig_cache_tmp_dir.deleteFile(out_diag_path) catch |err| { + log.warn("failed to delete '{s}': {s}", .{ out_diag_path, @errorName(err) }); + }; return comp.failCObjWithOwnedDiagBundle(c_object, bundle); } }, diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 58fdb68278ef..fb9d7c823c0c 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -31,6 +31,8 @@ arena: std.heap.ArenaAllocator, location: Location, location_tok: std.zig.Ast.TokenIndex, hash_tok: std.zig.Ast.TokenIndex, +name_tok: std.zig.Ast.TokenIndex, +lazy_status: LazyStatus, parent_package_root: Package.Path, parent_manifest_ast: ?*const std.zig.Ast, prog_node: *std.Progress.Node, @@ -64,6 +66,15 @@ oom_flag: bool, /// the root source file. module: ?*Package.Module, +pub const LazyStatus = enum { + /// Not lazy. + eager, + /// Lazy, found. + available, + /// Lazy, not found. + unavailable, +}; + /// Contains shared state among all `Fetch` tasks. pub const JobQueue = struct { mutex: std.Thread.Mutex = .{}, @@ -80,14 +91,27 @@ pub const JobQueue = struct { thread_pool: *ThreadPool, wait_group: WaitGroup = .{}, global_cache: Cache.Directory, + /// If true then, no fetching occurs, and: + /// * The `global_cache` directory is assumed to be the direct parent + /// directory of on-disk packages rather than having the "p/" directory + /// prefix inside of it. + /// * An error occurs if any non-lazy packages are not already present in + /// the package cache directory. + /// * Missing hash field causes an error, and no fetching occurs so it does + /// not print the correct hash like usual. + read_only: bool, recursive: bool, /// Dumps hash information to stdout which can be used to troubleshoot why /// two hashes of the same package do not match. /// If this is true, `recursive` must be false. debug_hash: bool, work_around_btrfs_bug: bool, + /// Set of hashes that will be additionally fetched even if they are marked + /// as lazy. + unlazy_set: UnlazySet = .{}, pub const Table = std.AutoArrayHashMapUnmanaged(Manifest.MultiHashHexDigest, *Fetch); + pub const UnlazySet = std.AutoArrayHashMapUnmanaged(Manifest.MultiHashHexDigest, void); pub fn deinit(jq: *JobQueue) void { if (jq.all_fetches.items.len == 0) return; @@ -141,11 +165,37 @@ pub const JobQueue = struct { // The first one is a dummy package for the current project. continue; } + try buf.writer().print( \\ pub const {} = struct {{ + \\ + , .{std.zig.fmtId(&hash)}); + + lazy: { + switch (fetch.lazy_status) { + .eager => break :lazy, + .available => { + try buf.appendSlice( + \\ pub const available = true; + \\ + ); + break :lazy; + }, + .unavailable => { + try buf.appendSlice( + \\ pub const available = false; + \\ }; + \\ + ); + continue; + }, + } + } + + try buf.writer().print( \\ pub const build_root = "{q}"; \\ - , .{ std.zig.fmtId(&hash), fetch.package_root }); + , .{fetch.package_root}); if (fetch.has_build_zig) { try buf.writer().print( @@ -270,7 +320,8 @@ pub fn run(f: *Fetch) RunError!void { // We want to fail unless the resolved relative path has a // prefix of "p/$hash/". const digest_len = @typeInfo(Manifest.MultiHashHexDigest).Array.len; - const expected_prefix = f.parent_package_root.sub_path[0 .. "p/".len + digest_len]; + const prefix_len: usize = if (f.job_queue.read_only) 0 else "p/".len; + const expected_prefix = f.parent_package_root.sub_path[0 .. prefix_len + digest_len]; if (!std.mem.startsWith(u8, pkg_root.sub_path, expected_prefix)) { return f.fail( f.location_tok, @@ -311,8 +362,11 @@ pub fn run(f: *Fetch) RunError!void { const s = fs.path.sep_str; if (remote.hash) |expected_hash| { - const pkg_sub_path = "p" ++ s ++ expected_hash; + const prefixed_pkg_sub_path = "p" ++ s ++ expected_hash; + const prefix_len: usize = if (f.job_queue.read_only) "p/".len else 0; + const pkg_sub_path = prefixed_pkg_sub_path[prefix_len..]; if (cache_root.handle.access(pkg_sub_path, .{})) |_| { + assert(f.lazy_status != .unavailable); f.package_root = .{ .root_dir = cache_root, .sub_path = try arena.dupe(u8, pkg_sub_path), @@ -322,7 +376,22 @@ pub fn run(f: *Fetch) RunError!void { if (!f.job_queue.recursive) return; return queueJobsForDeps(f); } else |err| switch (err) { - error.FileNotFound => {}, + error.FileNotFound => { + switch (f.lazy_status) { + .eager => {}, + .available => if (!f.job_queue.unlazy_set.contains(expected_hash)) { + f.lazy_status = .unavailable; + return; + }, + .unavailable => unreachable, + } + if (f.job_queue.read_only) return f.fail( + f.name_tok, + try eb.printString("package not found at '{}{s}'", .{ + cache_root, pkg_sub_path, + }), + ); + }, else => |e| { try eb.addRootErrorMessage(.{ .msg = try eb.printString("unable to open global package cache directory '{}{s}': {s}", .{ @@ -332,6 +401,12 @@ pub fn run(f: *Fetch) RunError!void { return error.FetchFailed; }, } + } else { + try eb.addRootErrorMessage(.{ + .msg = try eb.addString("dependency is missing hash field"), + .src_loc = try f.srcLoc(f.location_tok), + }); + return error.FetchFailed; } // Fetch and unpack the remote into a temporary directory. @@ -602,6 +677,8 @@ fn queueJobsForDeps(f: *Fetch) RunError!void { .location = location, .location_tok = dep.location_tok, .hash_tok = dep.hash_tok, + .name_tok = dep.name_tok, + .lazy_status = if (dep.lazy) .available else .eager, .parent_package_root = f.package_root, .parent_manifest_ast = &f.manifest_ast, .prog_node = f.prog_node, diff --git a/src/Package/Manifest.zig b/src/Package/Manifest.zig index e8b954fb10ea..589be91357fa 100644 --- a/src/Package/Manifest.zig +++ b/src/Package/Manifest.zig @@ -12,6 +12,8 @@ pub const Dependency = struct { hash: ?[]const u8, hash_tok: Ast.TokenIndex, node: Ast.Node.Index, + name_tok: Ast.TokenIndex, + lazy: bool, pub const Location = union(enum) { url: []const u8, @@ -303,11 +305,14 @@ const Parse = struct { .hash = null, .hash_tok = 0, .node = node, + .name_tok = 0, + .lazy = false, }; var has_location = false; for (struct_init.ast.fields) |field_init| { const name_token = ast.firstToken(field_init) - 2; + dep.name_tok = name_token; const field_name = try identifierTokenString(p, name_token); // We could get fancy with reflection and comptime logic here but doing // things manually provides an opportunity to do any additional verification @@ -342,6 +347,11 @@ const Parse = struct { else => |e| return e, }; dep.hash_tok = main_tokens[field_init]; + } else if (mem.eql(u8, field_name, "lazy")) { + dep.lazy = parseBool(p, field_init) catch |err| switch (err) { + error.ParseFailure => continue, + else => |e| return e, + }; } else { // Ignore unknown fields so that we can add fields in future zig // versions without breaking older zig versions. @@ -374,6 +384,24 @@ const Parse = struct { } } + fn parseBool(p: *Parse, node: Ast.Node.Index) !bool { + const ast = p.ast; + const node_tags = ast.nodes.items(.tag); + const main_tokens = ast.nodes.items(.main_token); + if (node_tags[node] != .identifier) { + return fail(p, main_tokens[node], "expected identifier", .{}); + } + const ident_token = main_tokens[node]; + const token_bytes = ast.tokenSlice(ident_token); + if (mem.eql(u8, token_bytes, "true")) { + return true; + } else if (mem.eql(u8, token_bytes, "false")) { + return false; + } else { + return fail(p, ident_token, "expected boolean", .{}); + } + } + fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 { const ast = p.ast; const node_tags = ast.nodes.items(.tag); diff --git a/src/main.zig b/src/main.zig index e7956669418e..a1b4a098db8a 100644 --- a/src/main.zig +++ b/src/main.zig @@ -969,6 +969,9 @@ fn buildOutputType( .libc_paths_file = try EnvVar.ZIG_LIBC.get(arena), .link_objects = .{}, .native_system_include_paths = &.{}, + .host_triple = null, + .host_cpu = null, + .host_dynamic_linker = null, }; // before arg parsing, check for the NO_COLOR environment variable @@ -1262,6 +1265,12 @@ fn buildOutputType( mod_opts.optimize_mode = parseOptimizeMode(arg["-O".len..]); } else if (mem.eql(u8, arg, "--dynamic-linker")) { create_module.dynamic_linker = args_iter.nextOrFatal(); + } else if (mem.eql(u8, arg, "--host-target")) { + create_module.host_triple = args_iter.nextOrFatal(); + } else if (mem.eql(u8, arg, "--host-cpu")) { + create_module.host_cpu = args_iter.nextOrFatal(); + } else if (mem.eql(u8, arg, "--host-dynamic-linker")) { + create_module.host_dynamic_linker = args_iter.nextOrFatal(); } else if (mem.eql(u8, arg, "--sysroot")) { const next_arg = args_iter.nextOrFatal(); create_module.sysroot = next_arg; @@ -3455,6 +3464,9 @@ const CreateModule = struct { each_lib_rpath: ?bool, libc_paths_file: ?[]const u8, link_objects: std.ArrayListUnmanaged(Compilation.LinkObject), + host_triple: ?[]const u8, + host_cpu: ?[]const u8, + host_dynamic_linker: ?[]const u8, }; fn createModule( @@ -3539,7 +3551,15 @@ fn createModule( } const target_query = parseTargetQueryOrReportFatalError(arena, target_parse_options); - const target = resolveTargetQueryOrFatal(target_query); + const adjusted_target_query = a: { + if (!target_query.isNative()) break :a target_query; + if (create_module.host_triple) |triple| target_parse_options.arch_os_abi = triple; + if (create_module.host_cpu) |cpu| target_parse_options.cpu_features = cpu; + if (create_module.host_dynamic_linker) |dl| target_parse_options.dynamic_linker = dl; + break :a parseTargetQueryOrReportFatalError(arena, target_parse_options); + }; + + const target = resolveTargetQueryOrFatal(adjusted_target_query); break :t .{ .result = target, .is_native_os = target_query.isNativeOs(), @@ -5130,476 +5150,576 @@ pub const usage_build = ; pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { - const work_around_btrfs_bug = builtin.os.tag == .linux and - EnvVar.ZIG_BTRFS_WORKAROUND.isSet(); - const color: Color = .auto; + var progress: std.Progress = .{ .dont_print_on_dumb = true }; - // We want to release all the locks before executing the child process, so we make a nice - // big block here to ensure the cleanup gets run when we extract out our argv. - const child_argv = argv: { - const self_exe_path = try introspect.findZigExePath(arena); + var build_file: ?[]const u8 = null; + var override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena); + var override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena); + var override_local_cache_dir: ?[]const u8 = try EnvVar.ZIG_LOCAL_CACHE_DIR.get(arena); + var override_build_runner: ?[]const u8 = try EnvVar.ZIG_BUILD_RUNNER.get(arena); + var child_argv = std.ArrayList([]const u8).init(arena); + var reference_trace: ?u32 = null; + var debug_compile_errors = false; + var verbose_link = (builtin.os.tag != .wasi or builtin.link_libc) and + EnvVar.ZIG_VERBOSE_LINK.isSet(); + var verbose_cc = (builtin.os.tag != .wasi or builtin.link_libc) and + EnvVar.ZIG_VERBOSE_CC.isSet(); + var verbose_air = false; + var verbose_intern_pool = false; + var verbose_generic_instances = false; + var verbose_llvm_ir: ?[]const u8 = null; + var verbose_llvm_bc: ?[]const u8 = null; + var verbose_cimport = false; + var verbose_llvm_cpu_features = false; + var fetch_only = false; + var system_pkg_dir_path: ?[]const u8 = null; - var build_file: ?[]const u8 = null; - var override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena); - var override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena); - var override_local_cache_dir: ?[]const u8 = try EnvVar.ZIG_LOCAL_CACHE_DIR.get(arena); - var override_build_runner: ?[]const u8 = try EnvVar.ZIG_BUILD_RUNNER.get(arena); - var child_argv = std.ArrayList([]const u8).init(arena); - var reference_trace: ?u32 = null; - var debug_compile_errors = false; - var verbose_link = (builtin.os.tag != .wasi or builtin.link_libc) and - EnvVar.ZIG_VERBOSE_LINK.isSet(); - var verbose_cc = (builtin.os.tag != .wasi or builtin.link_libc) and - EnvVar.ZIG_VERBOSE_CC.isSet(); - var verbose_air = false; - var verbose_intern_pool = false; - var verbose_generic_instances = false; - var verbose_llvm_ir: ?[]const u8 = null; - var verbose_llvm_bc: ?[]const u8 = null; - var verbose_cimport = false; - var verbose_llvm_cpu_features = false; - var fetch_only = false; - - const argv_index_exe = child_argv.items.len; - _ = try child_argv.addOne(); - - try child_argv.append(self_exe_path); - - const argv_index_build_file = child_argv.items.len; - _ = try child_argv.addOne(); - - const argv_index_cache_dir = child_argv.items.len; - _ = try child_argv.addOne(); - - const argv_index_global_cache_dir = child_argv.items.len; - _ = try child_argv.addOne(); - - try child_argv.appendSlice(&.{ - "--seed", - try std.fmt.allocPrint(arena, "0x{x}", .{std.crypto.random.int(u32)}), - }); - const argv_index_seed = child_argv.items.len - 1; + const argv_index_exe = child_argv.items.len; + _ = try child_argv.addOne(); - { - var i: usize = 0; - while (i < args.len) : (i += 1) { - const arg = args[i]; - if (mem.startsWith(u8, arg, "-")) { - if (mem.eql(u8, arg, "--build-file")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - i += 1; - build_file = args[i]; - continue; - } else if (mem.eql(u8, arg, "--zig-lib-dir")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - i += 1; - override_lib_dir = args[i]; - try child_argv.appendSlice(&.{ arg, args[i] }); - continue; - } else if (mem.eql(u8, arg, "--build-runner")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - i += 1; - override_build_runner = args[i]; - continue; - } else if (mem.eql(u8, arg, "--cache-dir")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - i += 1; - override_local_cache_dir = args[i]; - continue; - } else if (mem.eql(u8, arg, "--global-cache-dir")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - i += 1; - override_global_cache_dir = args[i]; - continue; - } else if (mem.eql(u8, arg, "-freference-trace")) { - reference_trace = 256; - } else if (mem.eql(u8, arg, "--fetch")) { - fetch_only = true; - } else if (mem.startsWith(u8, arg, "-freference-trace=")) { - const num = arg["-freference-trace=".len..]; - reference_trace = std.fmt.parseUnsigned(u32, num, 10) catch |err| { - fatal("unable to parse reference_trace count '{s}': {s}", .{ num, @errorName(err) }); - }; - } else if (mem.eql(u8, arg, "-fno-reference-trace")) { - reference_trace = null; - } else if (mem.eql(u8, arg, "--debug-log")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - try child_argv.appendSlice(args[i .. i + 2]); - i += 1; - if (!build_options.enable_logging) { - warn("Zig was compiled without logging enabled (-Dlog). --debug-log has no effect.", .{}); - } else { - try log_scopes.append(arena, args[i]); - } - continue; - } else if (mem.eql(u8, arg, "--debug-compile-errors")) { - if (!crash_report.is_enabled) { - warn("Zig was compiled in a release mode. --debug-compile-errors has no effect.", .{}); - } else { - debug_compile_errors = true; - } - } else if (mem.eql(u8, arg, "--verbose-link")) { - verbose_link = true; - } else if (mem.eql(u8, arg, "--verbose-cc")) { - verbose_cc = true; - } else if (mem.eql(u8, arg, "--verbose-air")) { - verbose_air = true; - } else if (mem.eql(u8, arg, "--verbose-intern-pool")) { - verbose_intern_pool = true; - } else if (mem.eql(u8, arg, "--verbose-generic-instances")) { - verbose_generic_instances = true; - } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { - verbose_llvm_ir = "-"; - } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { - verbose_llvm_ir = arg["--verbose-llvm-ir=".len..]; - } else if (mem.startsWith(u8, arg, "--verbose-llvm-bc=")) { - verbose_llvm_bc = arg["--verbose-llvm-bc=".len..]; - } else if (mem.eql(u8, arg, "--verbose-cimport")) { - verbose_cimport = true; - } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { - verbose_llvm_cpu_features = true; - } else if (mem.eql(u8, arg, "--seed")) { - if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); - i += 1; - child_argv.items[argv_index_seed] = args[i]; - continue; + const self_exe_path = try introspect.findZigExePath(arena); + try child_argv.append(self_exe_path); + + const argv_index_build_file = child_argv.items.len; + _ = try child_argv.addOne(); + + const argv_index_cache_dir = child_argv.items.len; + _ = try child_argv.addOne(); + + const argv_index_global_cache_dir = child_argv.items.len; + _ = try child_argv.addOne(); + + try child_argv.appendSlice(&.{ + "--seed", + try std.fmt.allocPrint(arena, "0x{x}", .{std.crypto.random.int(u32)}), + }); + const argv_index_seed = child_argv.items.len - 1; + + // This parent process needs a way to obtain results from the configuration + // phase of the child process. In the future, the make phase will be + // executed in a separate process than the configure phase, and we can then + // use stdout from the configuration phase for this purpose. + // + // However, currently, both phases are in the same process, and Run Step + // provides API for making the runned subprocesses inherit stdout and stderr + // which means these streams are not available for passing metadata back + // to the parent. + // + // Until make and configure phases are separated into different processes, + // the strategy is to choose a temporary file name ahead of time, and then + // read this file in the parent to obtain the results, in the case the child + // exits with code 3. + const results_tmp_file_nonce = Package.Manifest.hex64(std.crypto.random.int(u64)); + try child_argv.append("-Z" ++ results_tmp_file_nonce); + + { + var i: usize = 0; + while (i < args.len) : (i += 1) { + const arg = args[i]; + if (mem.startsWith(u8, arg, "-")) { + if (mem.eql(u8, arg, "--build-file")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + build_file = args[i]; + continue; + } else if (mem.eql(u8, arg, "--zig-lib-dir")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + override_lib_dir = args[i]; + try child_argv.appendSlice(&.{ arg, args[i] }); + continue; + } else if (mem.eql(u8, arg, "--build-runner")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + override_build_runner = args[i]; + continue; + } else if (mem.eql(u8, arg, "--cache-dir")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + override_local_cache_dir = args[i]; + continue; + } else if (mem.eql(u8, arg, "--global-cache-dir")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + override_global_cache_dir = args[i]; + continue; + } else if (mem.eql(u8, arg, "-freference-trace")) { + reference_trace = 256; + } else if (mem.eql(u8, arg, "--fetch")) { + fetch_only = true; + } else if (mem.eql(u8, arg, "--system")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + system_pkg_dir_path = args[i]; + try child_argv.append("--system"); + continue; + } else if (mem.startsWith(u8, arg, "-freference-trace=")) { + const num = arg["-freference-trace=".len..]; + reference_trace = std.fmt.parseUnsigned(u32, num, 10) catch |err| { + fatal("unable to parse reference_trace count '{s}': {s}", .{ num, @errorName(err) }); + }; + } else if (mem.eql(u8, arg, "-fno-reference-trace")) { + reference_trace = null; + } else if (mem.eql(u8, arg, "--debug-log")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + try child_argv.appendSlice(args[i .. i + 2]); + i += 1; + if (!build_options.enable_logging) { + warn("Zig was compiled without logging enabled (-Dlog). --debug-log has no effect.", .{}); + } else { + try log_scopes.append(arena, args[i]); + } + continue; + } else if (mem.eql(u8, arg, "--debug-compile-errors")) { + if (!crash_report.is_enabled) { + warn("Zig was compiled in a release mode. --debug-compile-errors has no effect.", .{}); + } else { + debug_compile_errors = true; } + } else if (mem.eql(u8, arg, "--verbose-link")) { + verbose_link = true; + } else if (mem.eql(u8, arg, "--verbose-cc")) { + verbose_cc = true; + } else if (mem.eql(u8, arg, "--verbose-air")) { + verbose_air = true; + } else if (mem.eql(u8, arg, "--verbose-intern-pool")) { + verbose_intern_pool = true; + } else if (mem.eql(u8, arg, "--verbose-generic-instances")) { + verbose_generic_instances = true; + } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { + verbose_llvm_ir = "-"; + } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { + verbose_llvm_ir = arg["--verbose-llvm-ir=".len..]; + } else if (mem.startsWith(u8, arg, "--verbose-llvm-bc=")) { + verbose_llvm_bc = arg["--verbose-llvm-bc=".len..]; + } else if (mem.eql(u8, arg, "--verbose-cimport")) { + verbose_cimport = true; + } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { + verbose_llvm_cpu_features = true; + } else if (mem.eql(u8, arg, "--seed")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + child_argv.items[argv_index_seed] = args[i]; + continue; } - try child_argv.append(arg); } + try child_argv.append(arg); } + } - var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ - .path = lib_dir, - .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { - fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) }); - }, - } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { - fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); - }; - defer zig_lib_directory.handle.close(); + const work_around_btrfs_bug = builtin.os.tag == .linux and + EnvVar.ZIG_BTRFS_WORKAROUND.isSet(); + const color: Color = .auto; - var cleanup_build_dir: ?fs.Dir = null; - defer if (cleanup_build_dir) |*dir| dir.close(); + const target_query: std.Target.Query = .{}; + const resolved_target: Package.Module.ResolvedTarget = .{ + .result = resolveTargetQueryOrFatal(target_query), + .is_native_os = true, + .is_native_abi = true, + }; - const cwd_path = try process.getCwdAlloc(arena); - const build_root = try findBuildRoot(arena, .{ - .cwd_path = cwd_path, - .build_file = build_file, - }); - child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path; + const exe_basename = try std.zig.binNameAlloc(arena, .{ + .root_name = "build", + .target = resolved_target.result, + .output_mode = .Exe, + }); + const emit_bin: Compilation.EmitLoc = .{ + .directory = null, // Use the local zig-cache. + .basename = exe_basename, + }; - var global_cache_directory: Compilation.Directory = l: { - const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); - break :l .{ - .handle = try fs.cwd().makeOpenPath(p, .{}), - .path = p, - }; + gimmeMoreOfThoseSweetSweetFileDescriptors(); + + var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ + .path = lib_dir, + .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { + fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) }); + }, + } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { + fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); + }; + defer zig_lib_directory.handle.close(); + + const cwd_path = try process.getCwdAlloc(arena); + const build_root = try findBuildRoot(arena, .{ + .cwd_path = cwd_path, + .build_file = build_file, + }); + child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path; + + var global_cache_directory: Compilation.Directory = l: { + const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); + break :l .{ + .handle = try fs.cwd().makeOpenPath(p, .{}), + .path = p, }; - defer global_cache_directory.handle.close(); + }; + defer global_cache_directory.handle.close(); - child_argv.items[argv_index_global_cache_dir] = global_cache_directory.path orelse cwd_path; + child_argv.items[argv_index_global_cache_dir] = global_cache_directory.path orelse cwd_path; - var local_cache_directory: Compilation.Directory = l: { - if (override_local_cache_dir) |local_cache_dir_path| { - break :l .{ - .handle = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}), - .path = local_cache_dir_path, - }; - } - const cache_dir_path = try build_root.directory.join(arena, &[_][]const u8{"zig-cache"}); + var local_cache_directory: Compilation.Directory = l: { + if (override_local_cache_dir) |local_cache_dir_path| { break :l .{ - .handle = try build_root.directory.handle.makeOpenPath("zig-cache", .{}), - .path = cache_dir_path, + .handle = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}), + .path = local_cache_dir_path, }; + } + const cache_dir_path = try build_root.directory.join(arena, &[_][]const u8{"zig-cache"}); + break :l .{ + .handle = try build_root.directory.handle.makeOpenPath("zig-cache", .{}), + .path = cache_dir_path, }; - defer local_cache_directory.handle.close(); + }; + defer local_cache_directory.handle.close(); - child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path; + child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path; - gimmeMoreOfThoseSweetSweetFileDescriptors(); + var thread_pool: ThreadPool = undefined; + try thread_pool.init(.{ .allocator = gpa }); + defer thread_pool.deinit(); - const target_query: std.Target.Query = .{}; - const resolved_target: Package.Module.ResolvedTarget = .{ - .result = resolveTargetQueryOrFatal(target_query), - .is_native_os = true, - .is_native_abi = true, - }; + // Dummy http client that is not actually used when only_core_functionality is enabled. + // Prevents bootstrap from depending on a bunch of unnecessary stuff. + const HttpClient = if (build_options.only_core_functionality) struct { + allocator: Allocator, + fn deinit(self: *@This()) void { + _ = self; + } + } else std.http.Client; - const exe_basename = try std.zig.binNameAlloc(arena, .{ - .root_name = "build", - .target = resolved_target.result, - .output_mode = .Exe, - }); - const emit_bin: Compilation.EmitLoc = .{ - .directory = null, // Use the local zig-cache. - .basename = exe_basename, - }; - var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa }); - defer thread_pool.deinit(); + var http_client: HttpClient = .{ .allocator = gpa }; + defer http_client.deinit(); - const main_mod_paths: Package.Module.CreateOptions.Paths = if (override_build_runner) |runner| .{ - .root = .{ - .root_dir = Cache.Directory.cwd(), - .sub_path = fs.path.dirname(runner) orelse "", - }, - .root_src_path = fs.path.basename(runner), - } else .{ - .root = .{ .root_dir = zig_lib_directory }, - .root_src_path = "build_runner.zig", - }; + var unlazy_set: Package.Fetch.JobQueue.UnlazySet = .{}; - const config = try Compilation.Config.resolve(.{ - .output_mode = .Exe, - .resolved_target = resolved_target, - .have_zcu = true, - .emit_bin = true, - .is_test = false, - }); + // This loop is re-evaluated when the build script exits with an indication that it + // could not continue due to missing lazy dependencies. + while (true) { + // We want to release all the locks before executing the child process, so we make a nice + // big block here to ensure the cleanup gets run when we extract out our argv. + { + const main_mod_paths: Package.Module.CreateOptions.Paths = if (override_build_runner) |runner| .{ + .root = .{ + .root_dir = Cache.Directory.cwd(), + .sub_path = fs.path.dirname(runner) orelse "", + }, + .root_src_path = fs.path.basename(runner), + } else .{ + .root = .{ .root_dir = zig_lib_directory }, + .root_src_path = "build_runner.zig", + }; - const root_mod = try Package.Module.create(arena, .{ - .global_cache_directory = global_cache_directory, - .paths = main_mod_paths, - .fully_qualified_name = "root", - .cc_argv = &.{}, - .inherited = .{ + const config = try Compilation.Config.resolve(.{ + .output_mode = .Exe, .resolved_target = resolved_target, - }, - .global = config, - .parent = null, - .builtin_mod = null, - }); + .have_zcu = true, + .emit_bin = true, + .is_test = false, + }); - const builtin_mod = root_mod.getBuiltinDependency(); + const root_mod = try Package.Module.create(arena, .{ + .global_cache_directory = global_cache_directory, + .paths = main_mod_paths, + .fully_qualified_name = "root", + .cc_argv = &.{}, + .inherited = .{ + .resolved_target = resolved_target, + }, + .global = config, + .parent = null, + .builtin_mod = null, + }); - const build_mod = try Package.Module.create(arena, .{ - .global_cache_directory = global_cache_directory, - .paths = .{ - .root = .{ .root_dir = build_root.directory }, - .root_src_path = build_root.build_zig_basename, - }, - .fully_qualified_name = "root.@build", - .cc_argv = &.{}, - .inherited = .{}, - .global = config, - .parent = root_mod, - .builtin_mod = builtin_mod, - }); - if (build_options.only_core_functionality) { - try createEmptyDependenciesModule( - arena, - root_mod, - global_cache_directory, - local_cache_directory, - builtin_mod, - config, - ); - } else { - var http_client: std.http.Client = .{ .allocator = gpa }; - defer http_client.deinit(); + const builtin_mod = root_mod.getBuiltinDependency(); - try http_client.loadDefaultProxies(); + const build_mod = try Package.Module.create(arena, .{ + .global_cache_directory = global_cache_directory, + .paths = .{ + .root = .{ .root_dir = build_root.directory }, + .root_src_path = build_root.build_zig_basename, + }, + .fully_qualified_name = "root.@build", + .cc_argv = &.{}, + .inherited = .{}, + .global = config, + .parent = root_mod, + .builtin_mod = builtin_mod, + }); - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const root_prog_node = progress.start("Fetch Packages", 0); - defer root_prog_node.end(); + var cleanup_build_dir: ?fs.Dir = null; + defer if (cleanup_build_dir) |*dir| dir.close(); + + if (build_options.only_core_functionality) { + try createEmptyDependenciesModule( + arena, + root_mod, + global_cache_directory, + local_cache_directory, + builtin_mod, + config, + ); + } else { + const root_prog_node = progress.start("Fetch Packages", 0); + defer root_prog_node.end(); + + var job_queue: Package.Fetch.JobQueue = .{ + .http_client = &http_client, + .thread_pool = &thread_pool, + .global_cache = global_cache_directory, + .read_only = false, + .recursive = true, + .debug_hash = false, + .work_around_btrfs_bug = work_around_btrfs_bug, + .unlazy_set = unlazy_set, + }; + defer job_queue.deinit(); + + if (system_pkg_dir_path) |p| { + job_queue.global_cache = .{ + .path = p, + .handle = fs.cwd().openDir(p, .{}) catch |err| { + fatal("unable to open system package directory '{s}': {s}", .{ + p, @errorName(err), + }); + }, + }; + job_queue.read_only = true; + cleanup_build_dir = job_queue.global_cache.handle; + } else { + try http_client.loadDefaultProxies(); + } - var job_queue: Package.Fetch.JobQueue = .{ - .http_client = &http_client, - .thread_pool = &thread_pool, - .global_cache = global_cache_directory, - .recursive = true, - .debug_hash = false, - .work_around_btrfs_bug = work_around_btrfs_bug, - }; - defer job_queue.deinit(); - - try job_queue.all_fetches.ensureUnusedCapacity(gpa, 1); - try job_queue.table.ensureUnusedCapacity(gpa, 1); - - var fetch: Package.Fetch = .{ - .arena = std.heap.ArenaAllocator.init(gpa), - .location = .{ .relative_path = build_mod.root }, - .location_tok = 0, - .hash_tok = 0, - .parent_package_root = build_mod.root, - .parent_manifest_ast = null, - .prog_node = root_prog_node, - .job_queue = &job_queue, - .omit_missing_hash_error = true, - .allow_missing_paths_field = false, - - .package_root = undefined, - .error_bundle = undefined, - .manifest = null, - .manifest_ast = undefined, - .actual_hash = undefined, - .has_build_zig = true, - .oom_flag = false, - - .module = build_mod, - }; - job_queue.all_fetches.appendAssumeCapacity(&fetch); + try job_queue.all_fetches.ensureUnusedCapacity(gpa, 1); + try job_queue.table.ensureUnusedCapacity(gpa, 1); + + var fetch: Package.Fetch = .{ + .arena = std.heap.ArenaAllocator.init(gpa), + .location = .{ .relative_path = build_mod.root }, + .location_tok = 0, + .hash_tok = 0, + .name_tok = 0, + .lazy_status = .eager, + .parent_package_root = build_mod.root, + .parent_manifest_ast = null, + .prog_node = root_prog_node, + .job_queue = &job_queue, + .omit_missing_hash_error = true, + .allow_missing_paths_field = false, + + .package_root = undefined, + .error_bundle = undefined, + .manifest = null, + .manifest_ast = undefined, + .actual_hash = undefined, + .has_build_zig = true, + .oom_flag = false, + + .module = build_mod, + }; + job_queue.all_fetches.appendAssumeCapacity(&fetch); - job_queue.table.putAssumeCapacityNoClobber( - Package.Fetch.relativePathDigest(build_mod.root, global_cache_directory), - &fetch, - ); + job_queue.table.putAssumeCapacityNoClobber( + Package.Fetch.relativePathDigest(build_mod.root, global_cache_directory), + &fetch, + ); - job_queue.wait_group.start(); - try job_queue.thread_pool.spawn(Package.Fetch.workerRun, .{ &fetch, "root" }); - job_queue.wait_group.wait(); + job_queue.wait_group.start(); + try job_queue.thread_pool.spawn(Package.Fetch.workerRun, .{ &fetch, "root" }); + job_queue.wait_group.wait(); - try job_queue.consolidateErrors(); + try job_queue.consolidateErrors(); - if (fetch.error_bundle.root_list.items.len > 0) { - var errors = try fetch.error_bundle.toOwnedBundle(""); - errors.renderToStdErr(renderOptions(color)); - process.exit(1); - } + if (fetch.error_bundle.root_list.items.len > 0) { + var errors = try fetch.error_bundle.toOwnedBundle(""); + errors.renderToStdErr(renderOptions(color)); + process.exit(1); + } - if (fetch_only) return cleanExit(); - - var source_buf = std.ArrayList(u8).init(gpa); - defer source_buf.deinit(); - try job_queue.createDependenciesSource(&source_buf); - const deps_mod = try createDependenciesModule( - arena, - source_buf.items, - root_mod, - global_cache_directory, - local_cache_directory, - builtin_mod, - config, - ); + if (fetch_only) return cleanExit(); + + var source_buf = std.ArrayList(u8).init(gpa); + defer source_buf.deinit(); + try job_queue.createDependenciesSource(&source_buf); + const deps_mod = try createDependenciesModule( + arena, + source_buf.items, + root_mod, + global_cache_directory, + local_cache_directory, + builtin_mod, + config, + ); - { - // We need a Module for each package's build.zig. - const hashes = job_queue.table.keys(); - const fetches = job_queue.table.values(); - try deps_mod.deps.ensureUnusedCapacity(arena, @intCast(hashes.len)); - for (hashes, fetches) |hash, f| { - if (f == &fetch) { - // The first one is a dummy package for the current project. - continue; + { + // We need a Module for each package's build.zig. + const hashes = job_queue.table.keys(); + const fetches = job_queue.table.values(); + try deps_mod.deps.ensureUnusedCapacity(arena, @intCast(hashes.len)); + for (hashes, fetches) |hash, f| { + if (f == &fetch) { + // The first one is a dummy package for the current project. + continue; + } + if (!f.has_build_zig) + continue; + const m = try Package.Module.create(arena, .{ + .global_cache_directory = global_cache_directory, + .paths = .{ + .root = try f.package_root.clone(arena), + .root_src_path = Package.build_zig_basename, + }, + .fully_qualified_name = try std.fmt.allocPrint( + arena, + "root.@dependencies.{s}", + .{&hash}, + ), + .cc_argv = &.{}, + .inherited = .{}, + .global = config, + .parent = root_mod, + .builtin_mod = builtin_mod, + }); + const hash_cloned = try arena.dupe(u8, &hash); + deps_mod.deps.putAssumeCapacityNoClobber(hash_cloned, m); + f.module = m; } - if (!f.has_build_zig) - continue; - const m = try Package.Module.create(arena, .{ - .global_cache_directory = global_cache_directory, - .paths = .{ - .root = try f.package_root.clone(arena), - .root_src_path = Package.build_zig_basename, - }, - .fully_qualified_name = try std.fmt.allocPrint( - arena, - "root.@dependencies.{s}", - .{&hash}, - ), - .cc_argv = &.{}, - .inherited = .{}, - .global = config, - .parent = root_mod, - .builtin_mod = builtin_mod, - }); - const hash_cloned = try arena.dupe(u8, &hash); - deps_mod.deps.putAssumeCapacityNoClobber(hash_cloned, m); - f.module = m; - } - // Each build.zig module needs access to each of its - // dependencies' build.zig modules by name. - for (fetches) |f| { - const mod = f.module orelse continue; - const man = f.manifest orelse continue; - const dep_names = man.dependencies.keys(); - try mod.deps.ensureUnusedCapacity(arena, @intCast(dep_names.len)); - for (dep_names, man.dependencies.values()) |name, dep| { - const dep_digest = Package.Fetch.depDigest( - f.package_root, - global_cache_directory, - dep, - ) orelse continue; - const dep_mod = job_queue.table.get(dep_digest).?.module orelse continue; - const name_cloned = try arena.dupe(u8, name); - mod.deps.putAssumeCapacityNoClobber(name_cloned, dep_mod); + // Each build.zig module needs access to each of its + // dependencies' build.zig modules by name. + for (fetches) |f| { + const mod = f.module orelse continue; + const man = f.manifest orelse continue; + const dep_names = man.dependencies.keys(); + try mod.deps.ensureUnusedCapacity(arena, @intCast(dep_names.len)); + for (dep_names, man.dependencies.values()) |name, dep| { + const dep_digest = Package.Fetch.depDigest( + f.package_root, + global_cache_directory, + dep, + ) orelse continue; + const dep_mod = job_queue.table.get(dep_digest).?.module orelse continue; + const name_cloned = try arena.dupe(u8, name); + mod.deps.putAssumeCapacityNoClobber(name_cloned, dep_mod); + } } } } - } - try root_mod.deps.put(arena, "@build", build_mod); + try root_mod.deps.put(arena, "@build", build_mod); - const comp = Compilation.create(gpa, arena, .{ - .zig_lib_directory = zig_lib_directory, - .local_cache_directory = local_cache_directory, - .global_cache_directory = global_cache_directory, - .root_name = "build", - .config = config, - .root_mod = root_mod, - .main_mod = build_mod, - .emit_bin = emit_bin, - .emit_h = null, - .self_exe_path = self_exe_path, - .thread_pool = &thread_pool, - .verbose_cc = verbose_cc, - .verbose_link = verbose_link, - .verbose_air = verbose_air, - .verbose_intern_pool = verbose_intern_pool, - .verbose_generic_instances = verbose_generic_instances, - .verbose_llvm_ir = verbose_llvm_ir, - .verbose_llvm_bc = verbose_llvm_bc, - .verbose_cimport = verbose_cimport, - .verbose_llvm_cpu_features = verbose_llvm_cpu_features, - .cache_mode = .whole, - .reference_trace = reference_trace, - .debug_compile_errors = debug_compile_errors, - }) catch |err| { - fatal("unable to create compilation: {s}", .{@errorName(err)}); - }; - defer comp.destroy(); + const comp = Compilation.create(gpa, arena, .{ + .zig_lib_directory = zig_lib_directory, + .local_cache_directory = local_cache_directory, + .global_cache_directory = global_cache_directory, + .root_name = "build", + .config = config, + .root_mod = root_mod, + .main_mod = build_mod, + .emit_bin = emit_bin, + .emit_h = null, + .self_exe_path = self_exe_path, + .thread_pool = &thread_pool, + .verbose_cc = verbose_cc, + .verbose_link = verbose_link, + .verbose_air = verbose_air, + .verbose_intern_pool = verbose_intern_pool, + .verbose_generic_instances = verbose_generic_instances, + .verbose_llvm_ir = verbose_llvm_ir, + .verbose_llvm_bc = verbose_llvm_bc, + .verbose_cimport = verbose_cimport, + .verbose_llvm_cpu_features = verbose_llvm_cpu_features, + .cache_mode = .whole, + .reference_trace = reference_trace, + .debug_compile_errors = debug_compile_errors, + }) catch |err| { + fatal("unable to create compilation: {s}", .{@errorName(err)}); + }; + defer comp.destroy(); - updateModule(comp, color) catch |err| switch (err) { - error.SemanticAnalyzeFail => process.exit(2), - else => |e| return e, - }; + updateModule(comp, color) catch |err| switch (err) { + error.SemanticAnalyzeFail => process.exit(2), + else => |e| return e, + }; - // Since incremental compilation isn't done yet, we use cache_mode = whole - // above, and thus the output file is already closed. - //try comp.makeBinFileExecutable(); - child_argv.items[argv_index_exe] = - try local_cache_directory.join(arena, &.{comp.cache_use.whole.bin_sub_path.?}); + // Since incremental compilation isn't done yet, we use cache_mode = whole + // above, and thus the output file is already closed. + //try comp.makeBinFileExecutable(); + child_argv.items[argv_index_exe] = + try local_cache_directory.join(arena, &.{comp.cache_use.whole.bin_sub_path.?}); + } - break :argv child_argv.items; - }; + if (process.can_spawn) { + var child = std.ChildProcess.init(child_argv.items, gpa); + child.stdin_behavior = .Inherit; + child.stdout_behavior = .Inherit; + child.stderr_behavior = .Inherit; - if (process.can_spawn) { - var child = std.ChildProcess.init(child_argv, gpa); - child.stdin_behavior = .Inherit; - child.stdout_behavior = .Inherit; - child.stderr_behavior = .Inherit; + const term = try child.spawnAndWait(); + switch (term) { + .Exited => |code| { + if (code == 0) return cleanExit(); + // Indicates that the build runner has reported compile errors + // and this parent process does not need to report any further + // diagnostics. + if (code == 2) process.exit(2); + + if (code == 3) { + if (build_options.only_core_functionality) process.exit(3); + // Indicates the configure phase failed due to missing lazy + // dependencies and stdout contains the hashes of the ones + // that are missing. + const s = fs.path.sep_str; + const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce; + const stdout = local_cache_directory.handle.readFileAlloc(arena, tmp_sub_path, 50 * 1024 * 1024) catch |err| { + fatal("unable to read results of configure phase from '{}{s}': {s}", .{ + local_cache_directory, tmp_sub_path, @errorName(err), + }); + }; + local_cache_directory.handle.deleteFile(tmp_sub_path) catch {}; + + var it = mem.splitScalar(u8, stdout, '\n'); + var any_errors = false; + while (it.next()) |hash| { + if (hash.len == 0) continue; + const digest_len = @typeInfo(Package.Manifest.MultiHashHexDigest).Array.len; + if (hash.len != digest_len) { + std.log.err("invalid digest (length {d} instead of {d}): '{s}'", .{ + hash.len, digest_len, hash, + }); + any_errors = true; + continue; + } + try unlazy_set.put(arena, hash[0..digest_len].*, {}); + } + if (any_errors) process.exit(3); + if (system_pkg_dir_path) |p| { + // In this mode, the system needs to provide these packages; they + // cannot be fetched by Zig. + for (unlazy_set.keys()) |hash| { + std.log.err("lazy dependency package not found: {s}" ++ s ++ "{s}", .{ + p, hash, + }); + } + std.log.info("remote package fetching disabled due to --system mode", .{}); + std.log.info("dependencies might be avoidable depending on build configuration", .{}); + process.exit(3); + } + continue; + } - const term = try child.spawnAndWait(); - switch (term) { - .Exited => |code| { - if (code == 0) return cleanExit(); - // Indicates that the build runner has reported compile errors - // and this parent process does not need to report any further - // diagnostics. - if (code == 2) process.exit(2); - - const cmd = try std.mem.join(arena, " ", child_argv); - fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd }); - }, - else => { - const cmd = try std.mem.join(arena, " ", child_argv); - fatal("the following build command crashed:\n{s}", .{cmd}); - }, + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd }); + }, + else => { + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following build command crashed:\n{s}", .{cmd}); + }, + } + } else { + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd }); } - } else { - const cmd = try std.mem.join(arena, " ", child_argv); - fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd }); } } @@ -7343,6 +7463,7 @@ fn cmdFetch( .thread_pool = &thread_pool, .global_cache = global_cache_directory, .recursive = false, + .read_only = false, .debug_hash = debug_hash, .work_around_btrfs_bug = work_around_btrfs_bug, }; @@ -7353,6 +7474,8 @@ fn cmdFetch( .location = .{ .path_or_url = path_or_url }, .location_tok = 0, .hash_tok = 0, + .name_tok = 0, + .lazy_status = .eager, .parent_package_root = undefined, .parent_manifest_ast = null, .prog_node = root_prog_node, diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 041d537625cb..614cf690a9e5 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -562,7 +562,7 @@ pub fn lowerToBuildSteps( run.setName(incr_case.base_path); run.addArgs(&.{ case_base_path_with_dir, - b.zig_exe, + b.graph.zig_exe, }); run.expectStdOutEqual(""); parent_step.dependOn(&run.step); @@ -653,7 +653,7 @@ pub fn lowerToBuildSteps( break :no_exec; } const run_c = b.addSystemCommand(&.{ - b.zig_exe, + b.graph.zig_exe, "run", "-cflags", "-Ilib", diff --git a/test/tests.zig b/test/tests.zig index 9dcc77b14130..a749719c83e7 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -796,7 +796,7 @@ pub fn addCliTests(b: *std.Build) *Step { { // Test `zig init`. const tmp_path = b.makeTempPath(); - const init_exe = b.addSystemCommand(&.{ b.zig_exe, "init" }); + const init_exe = b.addSystemCommand(&.{ b.graph.zig_exe, "init" }); init_exe.setCwd(.{ .cwd_relative = tmp_path }); init_exe.setName("zig init"); init_exe.expectStdOutEqual(""); @@ -810,20 +810,20 @@ pub fn addCliTests(b: *std.Build) *Step { const bad_out_arg = "-femit-bin=does" ++ s ++ "not" ++ s ++ "exist" ++ s ++ "foo.exe"; const ok_src_arg = "src" ++ s ++ "main.zig"; const expected = "error: unable to open output directory 'does" ++ s ++ "not" ++ s ++ "exist': FileNotFound\n"; - const run_bad = b.addSystemCommand(&.{ b.zig_exe, "build-exe", ok_src_arg, bad_out_arg }); + const run_bad = b.addSystemCommand(&.{ b.graph.zig_exe, "build-exe", ok_src_arg, bad_out_arg }); run_bad.setName("zig build-exe error message for bad -femit-bin arg"); run_bad.expectExitCode(1); run_bad.expectStdErrEqual(expected); run_bad.expectStdOutEqual(""); run_bad.step.dependOn(&init_exe.step); - const run_test = b.addSystemCommand(&.{ b.zig_exe, "build", "test" }); + const run_test = b.addSystemCommand(&.{ b.graph.zig_exe, "build", "test" }); run_test.setCwd(.{ .cwd_relative = tmp_path }); run_test.setName("zig build test"); run_test.expectStdOutEqual(""); run_test.step.dependOn(&init_exe.step); - const run_run = b.addSystemCommand(&.{ b.zig_exe, "build", "run" }); + const run_run = b.addSystemCommand(&.{ b.graph.zig_exe, "build", "run" }); run_run.setCwd(.{ .cwd_relative = tmp_path }); run_run.setName("zig build run"); run_run.expectStdOutEqual("Run `zig build test` to run the tests.\n"); @@ -857,7 +857,7 @@ pub fn addCliTests(b: *std.Build) *Step { // This is intended to be the exact CLI usage used by godbolt.org. const run = b.addSystemCommand(&.{ - b.zig_exe, "build-obj", + b.graph.zig_exe, "build-obj", "--cache-dir", tmp_path, "--name", "example", "-fno-emit-bin", "-fno-emit-h", @@ -900,7 +900,7 @@ pub fn addCliTests(b: *std.Build) *Step { subdir.writeFile("fmt3.zig", unformatted_code) catch @panic("unhandled"); // Test zig fmt affecting only the appropriate files. - const run1 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "fmt1.zig" }); + const run1 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "fmt1.zig" }); run1.setName("run zig fmt one file"); run1.setCwd(.{ .cwd_relative = tmp_path }); run1.has_side_effects = true; @@ -908,7 +908,7 @@ pub fn addCliTests(b: *std.Build) *Step { run1.expectStdOutEqual("fmt1.zig\n"); // Test excluding files and directories from a run - const run2 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "--exclude", "fmt2.zig", "--exclude", "subdir", "." }); + const run2 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "--exclude", "fmt2.zig", "--exclude", "subdir", "." }); run2.setName("run zig fmt on directory with exclusions"); run2.setCwd(.{ .cwd_relative = tmp_path }); run2.has_side_effects = true; @@ -916,7 +916,7 @@ pub fn addCliTests(b: *std.Build) *Step { run2.step.dependOn(&run1.step); // Test excluding non-existent file - const run3 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "--exclude", "fmt2.zig", "--exclude", "nonexistent.zig", "." }); + const run3 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "--exclude", "fmt2.zig", "--exclude", "nonexistent.zig", "." }); run3.setName("run zig fmt on directory with non-existent exclusion"); run3.setCwd(.{ .cwd_relative = tmp_path }); run3.has_side_effects = true; @@ -924,7 +924,7 @@ pub fn addCliTests(b: *std.Build) *Step { run3.step.dependOn(&run2.step); // running it on the dir, only the new file should be changed - const run4 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "." }); + const run4 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "." }); run4.setName("run zig fmt the directory"); run4.setCwd(.{ .cwd_relative = tmp_path }); run4.has_side_effects = true; @@ -932,7 +932,7 @@ pub fn addCliTests(b: *std.Build) *Step { run4.step.dependOn(&run3.step); // both files have been formatted, nothing should change now - const run5 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "." }); + const run5 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "." }); run5.setName("run zig fmt with nothing to do"); run5.setCwd(.{ .cwd_relative = tmp_path }); run5.has_side_effects = true; @@ -946,7 +946,7 @@ pub fn addCliTests(b: *std.Build) *Step { write6.step.dependOn(&run5.step); // Test `zig fmt` handling UTF-16 decoding. - const run6 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "." }); + const run6 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "." }); run6.setName("run zig fmt convert UTF-16 to UTF-8"); run6.setCwd(.{ .cwd_relative = tmp_path }); run6.has_side_effects = true;