diff --git a/CMakeLists.txt b/CMakeLists.txt
index 081872fbdfbc..c14a9383534f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -564,7 +564,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
"${CMAKE_SOURCE_DIR}/src/codegen.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c.zig"
- "${CMAKE_SOURCE_DIR}/src/codegen/c/type.zig"
+ "${CMAKE_SOURCE_DIR}/src/codegen/c/Type.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig"
"${CMAKE_SOURCE_DIR}/src/glibc.zig"
diff --git a/build.zig b/build.zig
index f661bd6887be..36bb1e0f73e6 100644
--- a/build.zig
+++ b/build.zig
@@ -16,9 +16,7 @@ pub fn build(b: *std.Build) !void {
const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false;
const target = t: {
var default_target: std.zig.CrossTarget = .{};
- if (only_c) {
- default_target.ofmt = .c;
- }
+ default_target.ofmt = b.option(std.Target.ObjectFormat, "ofmt", "Object format to target") orelse if (only_c) .c else null;
break :t b.standardTargetOptions(.{ .default_target = default_target });
};
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 4ad6b3f866d9..f49457403633 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -3107,7 +3107,7 @@ test "struct namespaced variable" {
// struct field order is determined by the compiler for optimal performance.
// however, you can still calculate a struct base pointer given a field pointer:
fn setYBasedOnX(x: *f32, y: f32) void {
- const point = @fieldParentPtr(Point, "x", x);
+ const point: *Point = @fieldParentPtr("x", x);
point.y = y;
}
test "field parent pointer" {
@@ -8757,8 +8757,7 @@ test "decl access by string" {
{#header_close#}
{#header_open|@fieldParentPtr#}
-
{#syntax#}@fieldParentPtr(comptime ParentType: type, comptime field_name: []const u8,
- field_ptr: *T) *ParentType{#endsyntax#}
+ {#syntax#}@fieldParentPtr(comptime field_name: []const u8, field_ptr: *T) anytype{#endsyntax#}
Given a pointer to a field, returns the base pointer of a struct.
diff --git a/lib/compiler/aro/aro/pragmas/gcc.zig b/lib/compiler/aro/aro/pragmas/gcc.zig
index 83a4a134a68b..91ab750b4c81 100644
--- a/lib/compiler/aro/aro/pragmas/gcc.zig
+++ b/lib/compiler/aro/aro/pragmas/gcc.zig
@@ -37,18 +37,18 @@ const Directive = enum {
};
fn beforePreprocess(pragma: *Pragma, comp: *Compilation) void {
- var self = @fieldParentPtr(GCC, "pragma", pragma);
+ var self: *GCC = @fieldParentPtr("pragma", pragma);
self.original_options = comp.diagnostics.options;
}
fn beforeParse(pragma: *Pragma, comp: *Compilation) void {
- var self = @fieldParentPtr(GCC, "pragma", pragma);
+ var self: *GCC = @fieldParentPtr("pragma", pragma);
comp.diagnostics.options = self.original_options;
self.options_stack.items.len = 0;
}
fn afterParse(pragma: *Pragma, comp: *Compilation) void {
- var self = @fieldParentPtr(GCC, "pragma", pragma);
+ var self: *GCC = @fieldParentPtr("pragma", pragma);
comp.diagnostics.options = self.original_options;
self.options_stack.items.len = 0;
}
@@ -60,7 +60,7 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
- var self = @fieldParentPtr(GCC, "pragma", pragma);
+ var self: *GCC = @fieldParentPtr("pragma", pragma);
self.options_stack.deinit(comp.gpa);
comp.gpa.destroy(self);
}
@@ -108,7 +108,7 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
}
fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
- var self = @fieldParentPtr(GCC, "pragma", pragma);
+ var self: *GCC = @fieldParentPtr("pragma", pragma);
const directive_tok = pp.tokens.get(start_idx + 1);
if (directive_tok.id == .nl) return;
@@ -174,7 +174,7 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
}
fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void {
- var self = @fieldParentPtr(GCC, "pragma", pragma);
+ var self: *GCC = @fieldParentPtr("pragma", pragma);
const directive_tok = p.pp.tokens.get(start_idx + 1);
if (directive_tok.id == .nl) return;
const name = p.pp.expandedSlice(directive_tok);
diff --git a/lib/compiler/aro/aro/pragmas/message.zig b/lib/compiler/aro/aro/pragmas/message.zig
index a42b5a087422..a364c6d8c12a 100644
--- a/lib/compiler/aro/aro/pragmas/message.zig
+++ b/lib/compiler/aro/aro/pragmas/message.zig
@@ -22,7 +22,7 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
- const self = @fieldParentPtr(Message, "pragma", pragma);
+ const self: *Message = @fieldParentPtr("pragma", pragma);
comp.gpa.destroy(self);
}
diff --git a/lib/compiler/aro/aro/pragmas/once.zig b/lib/compiler/aro/aro/pragmas/once.zig
index 790e5e129cea..21d6c9854efc 100644
--- a/lib/compiler/aro/aro/pragmas/once.zig
+++ b/lib/compiler/aro/aro/pragmas/once.zig
@@ -27,18 +27,18 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
}
fn afterParse(pragma: *Pragma, _: *Compilation) void {
- var self = @fieldParentPtr(Once, "pragma", pragma);
+ var self: *Once = @fieldParentPtr("pragma", pragma);
self.pragma_once.clearRetainingCapacity();
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
- var self = @fieldParentPtr(Once, "pragma", pragma);
+ var self: *Once = @fieldParentPtr("pragma", pragma);
self.pragma_once.deinit();
comp.gpa.destroy(self);
}
fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
- var self = @fieldParentPtr(Once, "pragma", pragma);
+ var self: *Once = @fieldParentPtr("pragma", pragma);
const name_tok = pp.tokens.get(start_idx);
const next = pp.tokens.get(start_idx + 1);
if (next.id != .nl) {
diff --git a/lib/compiler/aro/aro/pragmas/pack.zig b/lib/compiler/aro/aro/pragmas/pack.zig
index 61306e8849c4..81d1dbc59a1c 100644
--- a/lib/compiler/aro/aro/pragmas/pack.zig
+++ b/lib/compiler/aro/aro/pragmas/pack.zig
@@ -24,13 +24,13 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
- var self = @fieldParentPtr(Pack, "pragma", pragma);
+ var self: *Pack = @fieldParentPtr("pragma", pragma);
self.stack.deinit(comp.gpa);
comp.gpa.destroy(self);
}
fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void {
- var pack = @fieldParentPtr(Pack, "pragma", pragma);
+ var pack: *Pack = @fieldParentPtr("pragma", pragma);
var idx = start_idx + 1;
const l_paren = p.pp.tokens.get(idx);
if (l_paren.id != .l_paren) {
diff --git a/lib/compiler/aro/backend/Object.zig b/lib/compiler/aro/backend/Object.zig
index db880099051a..b42ad4bdcbb7 100644
--- a/lib/compiler/aro/backend/Object.zig
+++ b/lib/compiler/aro/backend/Object.zig
@@ -16,7 +16,7 @@ pub fn create(gpa: Allocator, target: std.Target) !*Object {
pub fn deinit(obj: *Object) void {
switch (obj.format) {
- .elf => @fieldParentPtr(Elf, "obj", obj).deinit(),
+ .elf => @as(*Elf, @fieldParentPtr("obj", obj)).deinit(),
else => unreachable,
}
}
@@ -32,7 +32,7 @@ pub const Section = union(enum) {
pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) {
switch (obj.format) {
- .elf => return @fieldParentPtr(Elf, "obj", obj).getSection(section),
+ .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).getSection(section),
else => unreachable,
}
}
@@ -53,21 +53,21 @@ pub fn declareSymbol(
size: u64,
) ![]const u8 {
switch (obj.format) {
- .elf => return @fieldParentPtr(Elf, "obj", obj).declareSymbol(section, name, linkage, @"type", offset, size),
+ .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).declareSymbol(section, name, linkage, @"type", offset, size),
else => unreachable,
}
}
pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address: u64, addend: i64) !void {
switch (obj.format) {
- .elf => return @fieldParentPtr(Elf, "obj", obj).addRelocation(name, section, address, addend),
+ .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).addRelocation(name, section, address, addend),
else => unreachable,
}
}
pub fn finish(obj: *Object, file: std.fs.File) !void {
switch (obj.format) {
- .elf => return @fieldParentPtr(Elf, "obj", obj).finish(file),
+ .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).finish(file),
else => unreachable,
}
}
diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig
index cf0c39354b41..1c481bf1a78b 100644
--- a/lib/compiler/aro_translate_c.zig
+++ b/lib/compiler/aro_translate_c.zig
@@ -1098,13 +1098,13 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
}
};
- pub fn findBlockScope(inner: *ScopeExtraScope, c: *ScopeExtraContext) !*ScopeExtraScope.Block {
+ pub fn findBlockScope(inner: *ScopeExtraScope, c: *ScopeExtraContext) !*Block {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
- .block => return @fieldParentPtr(Block, "base", scope),
- .condition => return @fieldParentPtr(Condition, "base", scope).getBlockScope(c),
+ .block => return @fieldParentPtr("base", scope),
+ .condition => return @as(*Condition, @fieldParentPtr("base", scope)).getBlockScope(c),
else => scope = scope.parent.?,
}
}
@@ -1116,7 +1116,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
switch (scope.id) {
.root => unreachable,
.block => {
- const block = @fieldParentPtr(Block, "base", scope);
+ const block: *Block = @fieldParentPtr("base", scope);
if (block.return_type) |ty| return ty;
scope = scope.parent.?;
},
@@ -1128,15 +1128,15 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
pub fn getAlias(scope: *ScopeExtraScope, name: []const u8) []const u8 {
return switch (scope.id) {
.root => return name,
- .block => @fieldParentPtr(Block, "base", scope).getAlias(name),
+ .block => @as(*Block, @fieldParentPtr("base", scope)).getAlias(name),
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
};
}
pub fn contains(scope: *ScopeExtraScope, name: []const u8) bool {
return switch (scope.id) {
- .root => @fieldParentPtr(Root, "base", scope).contains(name),
- .block => @fieldParentPtr(Block, "base", scope).contains(name),
+ .root => @as(*Root, @fieldParentPtr("base", scope)).contains(name),
+ .block => @as(*Block, @fieldParentPtr("base", scope)).contains(name),
.loop, .do_loop, .condition => scope.parent.?.contains(name),
};
}
@@ -1158,11 +1158,11 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
while (true) {
switch (scope.id) {
.root => {
- const root = @fieldParentPtr(Root, "base", scope);
+ const root: *Root = @fieldParentPtr("base", scope);
return root.nodes.append(node);
},
.block => {
- const block = @fieldParentPtr(Block, "base", scope);
+ const block: *Block = @fieldParentPtr("base", scope);
return block.statements.append(node);
},
else => scope = scope.parent.?,
@@ -1184,7 +1184,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
switch (scope.id) {
.root => return,
.block => {
- const block = @fieldParentPtr(Block, "base", scope);
+ const block: *Block = @fieldParentPtr("base", scope);
if (block.variable_discards.get(name)) |discard| {
discard.data.should_skip = true;
return;
diff --git a/lib/compiler/aro_translate_c/ast.zig b/lib/compiler/aro_translate_c/ast.zig
index b63d9fbc3274..722fed4f1838 100644
--- a/lib/compiler/aro_translate_c/ast.zig
+++ b/lib/compiler/aro_translate_c/ast.zig
@@ -409,7 +409,7 @@ pub const Node = extern union {
return null;
if (self.ptr_otherwise.tag == t)
- return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
+ return @alignCast(@fieldParentPtr("base", self.ptr_otherwise));
return null;
}
@@ -1220,7 +1220,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
});
},
.pub_var_simple, .var_simple => {
- const payload = @fieldParentPtr(Payload.SimpleVarDecl, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.SimpleVarDecl, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
if (node.tag() == .pub_var_simple) _ = try c.addToken(.keyword_pub, "pub");
const const_tok = try c.addToken(.keyword_const, "const");
_ = try c.addIdentifier(payload.name);
@@ -1293,7 +1293,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
.var_decl => return renderVar(c, node),
.arg_redecl, .alias => {
- const payload = @fieldParentPtr(Payload.ArgRedecl, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.ArgRedecl, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
if (node.tag() == .alias) _ = try c.addToken(.keyword_pub, "pub");
const mut_tok = if (node.tag() == .alias)
try c.addToken(.keyword_const, "const")
@@ -1492,7 +1492,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
});
},
.c_pointer, .single_pointer => {
- const payload = @fieldParentPtr(Payload.Pointer, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.Pointer, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
const asterisk = if (node.tag() == .single_pointer)
try c.addToken(.asterisk, "*")
@@ -2085,7 +2085,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
}
fn renderRecord(c: *Context, node: Node) !NodeIndex {
- const payload = @fieldParentPtr(Payload.Record, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.Record, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
if (payload.layout == .@"packed")
_ = try c.addToken(.keyword_packed, "packed")
else if (payload.layout == .@"extern")
@@ -2487,7 +2487,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
}
fn renderPrefixOp(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
- const payload = @fieldParentPtr(Payload.UnOp, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.UnOp, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
return c.addNode(.{
.tag = tag,
.main_token = try c.addToken(tok_tag, bytes),
@@ -2499,7 +2499,7 @@ fn renderPrefixOp(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: T
}
fn renderBinOpGrouped(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
- const payload = @fieldParentPtr(Payload.BinOp, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.BinOp, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
const lhs = try renderNodeGrouped(c, payload.lhs);
return c.addNode(.{
.tag = tag,
@@ -2512,7 +2512,7 @@ fn renderBinOpGrouped(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_ta
}
fn renderBinOp(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
- const payload = @fieldParentPtr(Payload.BinOp, "base", node.ptr_otherwise).data;
+ const payload = @as(*Payload.BinOp, @alignCast(@fieldParentPtr("base", node.ptr_otherwise))).data;
const lhs = try renderNode(c, payload.lhs);
return c.addNode(.{
.tag = tag,
diff --git a/lib/compiler/resinator/ast.zig b/lib/compiler/resinator/ast.zig
index e6f6c030c08c..d55d91e52c64 100644
--- a/lib/compiler/resinator/ast.zig
+++ b/lib/compiler/resinator/ast.zig
@@ -19,7 +19,7 @@ pub const Tree = struct {
}
pub fn root(self: *Tree) *Node.Root {
- return @fieldParentPtr(Node.Root, "base", self.node);
+ return @alignCast(@fieldParentPtr("base", self.node));
}
pub fn dump(self: *Tree, writer: anytype) @TypeOf(writer).Error!void {
@@ -174,7 +174,7 @@ pub const Node = struct {
pub fn cast(base: *Node, comptime id: Id) ?*id.Type() {
if (base.id == id) {
- return @fieldParentPtr(id.Type(), "base", base);
+ return @alignCast(@fieldParentPtr("base", base));
}
return null;
}
@@ -461,7 +461,7 @@ pub const Node = struct {
pub fn isNumberExpression(node: *const Node) bool {
switch (node.id) {
.literal => {
- const literal = @fieldParentPtr(Node.Literal, "base", node);
+ const literal: *const Node.Literal = @alignCast(@fieldParentPtr("base", node));
return switch (literal.token.id) {
.number => true,
else => false,
@@ -475,7 +475,7 @@ pub const Node = struct {
pub fn isStringLiteral(node: *const Node) bool {
switch (node.id) {
.literal => {
- const literal = @fieldParentPtr(Node.Literal, "base", node);
+ const literal: *const Node.Literal = @alignCast(@fieldParentPtr("base", node));
return switch (literal.token.id) {
.quoted_ascii_string, .quoted_wide_string => true,
else => false,
@@ -489,105 +489,103 @@ pub const Node = struct {
switch (node.id) {
.root => unreachable,
.resource_external => {
- const casted = @fieldParentPtr(Node.ResourceExternal, "base", node);
+ const casted: *const Node.ResourceExternal = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
.resource_raw_data => {
- const casted = @fieldParentPtr(Node.ResourceRawData, "base", node);
+ const casted: *const Node.ResourceRawData = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
.literal => {
- const casted = @fieldParentPtr(Node.Literal, "base", node);
+ const casted: *const Node.Literal = @alignCast(@fieldParentPtr("base", node));
return casted.token;
},
.binary_expression => {
- const casted = @fieldParentPtr(Node.BinaryExpression, "base", node);
+ const casted: *const Node.BinaryExpression = @alignCast(@fieldParentPtr("base", node));
return casted.left.getFirstToken();
},
.grouped_expression => {
- const casted = @fieldParentPtr(Node.GroupedExpression, "base", node);
+ const casted: *const Node.GroupedExpression = @alignCast(@fieldParentPtr("base", node));
return casted.open_token;
},
.not_expression => {
- const casted = @fieldParentPtr(Node.NotExpression, "base", node);
+ const casted: *const Node.NotExpression = @alignCast(@fieldParentPtr("base", node));
return casted.not_token;
},
.accelerators => {
- const casted = @fieldParentPtr(Node.Accelerators, "base", node);
+ const casted: *const Node.Accelerators = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
.accelerator => {
- const casted = @fieldParentPtr(Node.Accelerator, "base", node);
+ const casted: *const Node.Accelerator = @alignCast(@fieldParentPtr("base", node));
return casted.event.getFirstToken();
},
.dialog => {
- const casted = @fieldParentPtr(Node.Dialog, "base", node);
+ const casted: *const Node.Dialog = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
.control_statement => {
- const casted = @fieldParentPtr(Node.ControlStatement, "base", node);
+ const casted: *const Node.ControlStatement = @alignCast(@fieldParentPtr("base", node));
return casted.type;
},
.toolbar => {
- const casted = @fieldParentPtr(Node.Toolbar, "base", node);
+ const casted: *const Node.Toolbar = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
.menu => {
- const casted = @fieldParentPtr(Node.Menu, "base", node);
+ const casted: *const Node.Menu = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
inline .menu_item, .menu_item_separator, .menu_item_ex => |menu_item_type| {
- const node_type = menu_item_type.Type();
- const casted = @fieldParentPtr(node_type, "base", node);
+ const casted: *const menu_item_type.Type() = @alignCast(@fieldParentPtr("base", node));
return casted.menuitem;
},
inline .popup, .popup_ex => |popup_type| {
- const node_type = popup_type.Type();
- const casted = @fieldParentPtr(node_type, "base", node);
+ const casted: *const popup_type.Type() = @alignCast(@fieldParentPtr("base", node));
return casted.popup;
},
.version_info => {
- const casted = @fieldParentPtr(Node.VersionInfo, "base", node);
+ const casted: *const Node.VersionInfo = @alignCast(@fieldParentPtr("base", node));
return casted.id;
},
.version_statement => {
- const casted = @fieldParentPtr(Node.VersionStatement, "base", node);
+ const casted: *const Node.VersionStatement = @alignCast(@fieldParentPtr("base", node));
return casted.type;
},
.block => {
- const casted = @fieldParentPtr(Node.Block, "base", node);
+ const casted: *const Node.Block = @alignCast(@fieldParentPtr("base", node));
return casted.identifier;
},
.block_value => {
- const casted = @fieldParentPtr(Node.BlockValue, "base", node);
+ const casted: *const Node.BlockValue = @alignCast(@fieldParentPtr("base", node));
return casted.identifier;
},
.block_value_value => {
- const casted = @fieldParentPtr(Node.BlockValueValue, "base", node);
+ const casted: *const Node.BlockValueValue = @alignCast(@fieldParentPtr("base", node));
return casted.expression.getFirstToken();
},
.string_table => {
- const casted = @fieldParentPtr(Node.StringTable, "base", node);
+ const casted: *const Node.StringTable = @alignCast(@fieldParentPtr("base", node));
return casted.type;
},
.string_table_string => {
- const casted = @fieldParentPtr(Node.StringTableString, "base", node);
+ const casted: *const Node.StringTableString = @alignCast(@fieldParentPtr("base", node));
return casted.id.getFirstToken();
},
.language_statement => {
- const casted = @fieldParentPtr(Node.LanguageStatement, "base", node);
+ const casted: *const Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
return casted.language_token;
},
.font_statement => {
- const casted = @fieldParentPtr(Node.FontStatement, "base", node);
+ const casted: *const Node.FontStatement = @alignCast(@fieldParentPtr("base", node));
return casted.identifier;
},
.simple_statement => {
- const casted = @fieldParentPtr(Node.SimpleStatement, "base", node);
+ const casted: *const Node.SimpleStatement = @alignCast(@fieldParentPtr("base", node));
return casted.identifier;
},
.invalid => {
- const casted = @fieldParentPtr(Node.Invalid, "base", node);
+ const casted: *const Node.Invalid = @alignCast(@fieldParentPtr("base", node));
return casted.context[0];
},
}
@@ -597,44 +595,44 @@ pub const Node = struct {
switch (node.id) {
.root => unreachable,
.resource_external => {
- const casted = @fieldParentPtr(Node.ResourceExternal, "base", node);
+ const casted: *const Node.ResourceExternal = @alignCast(@fieldParentPtr("base", node));
return casted.filename.getLastToken();
},
.resource_raw_data => {
- const casted = @fieldParentPtr(Node.ResourceRawData, "base", node);
+ const casted: *const Node.ResourceRawData = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.literal => {
- const casted = @fieldParentPtr(Node.Literal, "base", node);
+ const casted: *const Node.Literal = @alignCast(@fieldParentPtr("base", node));
return casted.token;
},
.binary_expression => {
- const casted = @fieldParentPtr(Node.BinaryExpression, "base", node);
+ const casted: *const Node.BinaryExpression = @alignCast(@fieldParentPtr("base", node));
return casted.right.getLastToken();
},
.grouped_expression => {
- const casted = @fieldParentPtr(Node.GroupedExpression, "base", node);
+ const casted: *const Node.GroupedExpression = @alignCast(@fieldParentPtr("base", node));
return casted.close_token;
},
.not_expression => {
- const casted = @fieldParentPtr(Node.NotExpression, "base", node);
+ const casted: *const Node.NotExpression = @alignCast(@fieldParentPtr("base", node));
return casted.number_token;
},
.accelerators => {
- const casted = @fieldParentPtr(Node.Accelerators, "base", node);
+ const casted: *const Node.Accelerators = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.accelerator => {
- const casted = @fieldParentPtr(Node.Accelerator, "base", node);
+ const casted: *const Node.Accelerator = @alignCast(@fieldParentPtr("base", node));
if (casted.type_and_options.len > 0) return casted.type_and_options[casted.type_and_options.len - 1];
return casted.idvalue.getLastToken();
},
.dialog => {
- const casted = @fieldParentPtr(Node.Dialog, "base", node);
+ const casted: *const Node.Dialog = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.control_statement => {
- const casted = @fieldParentPtr(Node.ControlStatement, "base", node);
+ const casted: *const Node.ControlStatement = @alignCast(@fieldParentPtr("base", node));
if (casted.extra_data_end) |token| return token;
if (casted.help_id) |help_id_node| return help_id_node.getLastToken();
if (casted.exstyle) |exstyle_node| return exstyle_node.getLastToken();
@@ -647,80 +645,79 @@ pub const Node = struct {
return casted.height.getLastToken();
},
.toolbar => {
- const casted = @fieldParentPtr(Node.Toolbar, "base", node);
+ const casted: *const Node.Toolbar = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.menu => {
- const casted = @fieldParentPtr(Node.Menu, "base", node);
+ const casted: *const Node.Menu = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.menu_item => {
- const casted = @fieldParentPtr(Node.MenuItem, "base", node);
+ const casted: *const Node.MenuItem = @alignCast(@fieldParentPtr("base", node));
if (casted.option_list.len > 0) return casted.option_list[casted.option_list.len - 1];
return casted.result.getLastToken();
},
.menu_item_separator => {
- const casted = @fieldParentPtr(Node.MenuItemSeparator, "base", node);
+ const casted: *const Node.MenuItemSeparator = @alignCast(@fieldParentPtr("base", node));
return casted.separator;
},
.menu_item_ex => {
- const casted = @fieldParentPtr(Node.MenuItemEx, "base", node);
+ const casted: *const Node.MenuItemEx = @alignCast(@fieldParentPtr("base", node));
if (casted.state) |state_node| return state_node.getLastToken();
if (casted.type) |type_node| return type_node.getLastToken();
if (casted.id) |id_node| return id_node.getLastToken();
return casted.text;
},
inline .popup, .popup_ex => |popup_type| {
- const node_type = popup_type.Type();
- const casted = @fieldParentPtr(node_type, "base", node);
+ const casted: *const popup_type.Type() = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.version_info => {
- const casted = @fieldParentPtr(Node.VersionInfo, "base", node);
+ const casted: *const Node.VersionInfo = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.version_statement => {
- const casted = @fieldParentPtr(Node.VersionStatement, "base", node);
+ const casted: *const Node.VersionStatement = @alignCast(@fieldParentPtr("base", node));
return casted.parts[casted.parts.len - 1].getLastToken();
},
.block => {
- const casted = @fieldParentPtr(Node.Block, "base", node);
+ const casted: *const Node.Block = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.block_value => {
- const casted = @fieldParentPtr(Node.BlockValue, "base", node);
+ const casted: *const Node.BlockValue = @alignCast(@fieldParentPtr("base", node));
if (casted.values.len > 0) return casted.values[casted.values.len - 1].getLastToken();
return casted.key;
},
.block_value_value => {
- const casted = @fieldParentPtr(Node.BlockValueValue, "base", node);
+ const casted: *const Node.BlockValueValue = @alignCast(@fieldParentPtr("base", node));
return casted.expression.getLastToken();
},
.string_table => {
- const casted = @fieldParentPtr(Node.StringTable, "base", node);
+ const casted: *const Node.StringTable = @alignCast(@fieldParentPtr("base", node));
return casted.end_token;
},
.string_table_string => {
- const casted = @fieldParentPtr(Node.StringTableString, "base", node);
+ const casted: *const Node.StringTableString = @alignCast(@fieldParentPtr("base", node));
return casted.string;
},
.language_statement => {
- const casted = @fieldParentPtr(Node.LanguageStatement, "base", node);
+ const casted: *const Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
return casted.sublanguage_id.getLastToken();
},
.font_statement => {
- const casted = @fieldParentPtr(Node.FontStatement, "base", node);
+ const casted: *const Node.FontStatement = @alignCast(@fieldParentPtr("base", node));
if (casted.char_set) |char_set_node| return char_set_node.getLastToken();
if (casted.italic) |italic_node| return italic_node.getLastToken();
if (casted.weight) |weight_node| return weight_node.getLastToken();
return casted.typeface;
},
.simple_statement => {
- const casted = @fieldParentPtr(Node.SimpleStatement, "base", node);
+ const casted: *const Node.SimpleStatement = @alignCast(@fieldParentPtr("base", node));
return casted.value.getLastToken();
},
.invalid => {
- const casted = @fieldParentPtr(Node.Invalid, "base", node);
+ const casted: *const Node.Invalid = @alignCast(@fieldParentPtr("base", node));
return casted.context[casted.context.len - 1];
},
}
@@ -737,31 +734,31 @@ pub const Node = struct {
switch (node.id) {
.root => {
try writer.writeAll("\n");
- const root = @fieldParentPtr(Node.Root, "base", node);
+ const root: *Node.Root = @alignCast(@fieldParentPtr("base", node));
for (root.body) |body_node| {
try body_node.dump(tree, writer, indent + 1);
}
},
.resource_external => {
- const resource = @fieldParentPtr(Node.ResourceExternal, "base", node);
+ const resource: *Node.ResourceExternal = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ resource.id.slice(tree.source), resource.type.slice(tree.source), resource.common_resource_attributes.len });
try resource.filename.dump(tree, writer, indent + 1);
},
.resource_raw_data => {
- const resource = @fieldParentPtr(Node.ResourceRawData, "base", node);
+ const resource: *Node.ResourceRawData = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes] raw data: {}\n", .{ resource.id.slice(tree.source), resource.type.slice(tree.source), resource.common_resource_attributes.len, resource.raw_data.len });
for (resource.raw_data) |data_expression| {
try data_expression.dump(tree, writer, indent + 1);
}
},
.literal => {
- const literal = @fieldParentPtr(Node.Literal, "base", node);
+ const literal: *Node.Literal = @alignCast(@fieldParentPtr("base", node));
try writer.writeAll(" ");
try writer.writeAll(literal.token.slice(tree.source));
try writer.writeAll("\n");
},
.binary_expression => {
- const binary = @fieldParentPtr(Node.BinaryExpression, "base", node);
+ const binary: *Node.BinaryExpression = @alignCast(@fieldParentPtr("base", node));
try writer.writeAll(" ");
try writer.writeAll(binary.operator.slice(tree.source));
try writer.writeAll("\n");
@@ -769,7 +766,7 @@ pub const Node = struct {
try binary.right.dump(tree, writer, indent + 1);
},
.grouped_expression => {
- const grouped = @fieldParentPtr(Node.GroupedExpression, "base", node);
+ const grouped: *Node.GroupedExpression = @alignCast(@fieldParentPtr("base", node));
try writer.writeAll("\n");
try writer.writeByteNTimes(' ', indent);
try writer.writeAll(grouped.open_token.slice(tree.source));
@@ -780,7 +777,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.not_expression => {
- const not = @fieldParentPtr(Node.NotExpression, "base", node);
+ const not: *Node.NotExpression = @alignCast(@fieldParentPtr("base", node));
try writer.writeAll(" ");
try writer.writeAll(not.not_token.slice(tree.source));
try writer.writeAll(" ");
@@ -788,7 +785,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.accelerators => {
- const accelerators = @fieldParentPtr(Node.Accelerators, "base", node);
+ const accelerators: *Node.Accelerators = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ accelerators.id.slice(tree.source), accelerators.type.slice(tree.source), accelerators.common_resource_attributes.len });
for (accelerators.optional_statements) |statement| {
try statement.dump(tree, writer, indent + 1);
@@ -804,7 +801,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.accelerator => {
- const accelerator = @fieldParentPtr(Node.Accelerator, "base", node);
+ const accelerator: *Node.Accelerator = @alignCast(@fieldParentPtr("base", node));
for (accelerator.type_and_options, 0..) |option, i| {
if (i != 0) try writer.writeAll(",");
try writer.writeByte(' ');
@@ -815,7 +812,7 @@ pub const Node = struct {
try accelerator.idvalue.dump(tree, writer, indent + 1);
},
.dialog => {
- const dialog = @fieldParentPtr(Node.Dialog, "base", node);
+ const dialog: *Node.Dialog = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ dialog.id.slice(tree.source), dialog.type.slice(tree.source), dialog.common_resource_attributes.len });
inline for (.{ "x", "y", "width", "height" }) |arg| {
try writer.writeByteNTimes(' ', indent + 1);
@@ -841,7 +838,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.control_statement => {
- const control = @fieldParentPtr(Node.ControlStatement, "base", node);
+ const control: *Node.ControlStatement = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s}", .{control.type.slice(tree.source)});
if (control.text) |text| {
try writer.print(" text: {s}", .{text.slice(tree.source)});
@@ -877,7 +874,7 @@ pub const Node = struct {
}
},
.toolbar => {
- const toolbar = @fieldParentPtr(Node.Toolbar, "base", node);
+ const toolbar: *Node.Toolbar = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ toolbar.id.slice(tree.source), toolbar.type.slice(tree.source), toolbar.common_resource_attributes.len });
inline for (.{ "button_width", "button_height" }) |arg| {
try writer.writeByteNTimes(' ', indent + 1);
@@ -895,7 +892,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.menu => {
- const menu = @fieldParentPtr(Node.Menu, "base", node);
+ const menu: *Node.Menu = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ menu.id.slice(tree.source), menu.type.slice(tree.source), menu.common_resource_attributes.len });
for (menu.optional_statements) |statement| {
try statement.dump(tree, writer, indent + 1);
@@ -916,16 +913,16 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.menu_item => {
- const menu_item = @fieldParentPtr(Node.MenuItem, "base", node);
+ const menu_item: *Node.MenuItem = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} options]\n", .{ menu_item.menuitem.slice(tree.source), menu_item.text.slice(tree.source), menu_item.option_list.len });
try menu_item.result.dump(tree, writer, indent + 1);
},
.menu_item_separator => {
- const menu_item = @fieldParentPtr(Node.MenuItemSeparator, "base", node);
+ const menu_item: *Node.MenuItemSeparator = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s}\n", .{ menu_item.menuitem.slice(tree.source), menu_item.separator.slice(tree.source) });
},
.menu_item_ex => {
- const menu_item = @fieldParentPtr(Node.MenuItemEx, "base", node);
+ const menu_item: *Node.MenuItemEx = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s}\n", .{ menu_item.menuitem.slice(tree.source), menu_item.text.slice(tree.source) });
inline for (.{ "id", "type", "state" }) |arg| {
if (@field(menu_item, arg)) |val_node| {
@@ -936,7 +933,7 @@ pub const Node = struct {
}
},
.popup => {
- const popup = @fieldParentPtr(Node.Popup, "base", node);
+ const popup: *Node.Popup = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} options]\n", .{ popup.popup.slice(tree.source), popup.text.slice(tree.source), popup.option_list.len });
try writer.writeByteNTimes(' ', indent);
try writer.writeAll(popup.begin_token.slice(tree.source));
@@ -949,7 +946,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.popup_ex => {
- const popup = @fieldParentPtr(Node.PopupEx, "base", node);
+ const popup: *Node.PopupEx = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s}\n", .{ popup.popup.slice(tree.source), popup.text.slice(tree.source) });
inline for (.{ "id", "type", "state", "help_id" }) |arg| {
if (@field(popup, arg)) |val_node| {
@@ -969,7 +966,7 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.version_info => {
- const version_info = @fieldParentPtr(Node.VersionInfo, "base", node);
+ const version_info: *Node.VersionInfo = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ version_info.id.slice(tree.source), version_info.versioninfo.slice(tree.source), version_info.common_resource_attributes.len });
for (version_info.fixed_info) |fixed_info| {
try fixed_info.dump(tree, writer, indent + 1);
@@ -985,14 +982,14 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.version_statement => {
- const version_statement = @fieldParentPtr(Node.VersionStatement, "base", node);
+ const version_statement: *Node.VersionStatement = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s}\n", .{version_statement.type.slice(tree.source)});
for (version_statement.parts) |part| {
try part.dump(tree, writer, indent + 1);
}
},
.block => {
- const block = @fieldParentPtr(Node.Block, "base", node);
+ const block: *Node.Block = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s}\n", .{ block.identifier.slice(tree.source), block.key.slice(tree.source) });
for (block.values) |value| {
try value.dump(tree, writer, indent + 1);
@@ -1008,14 +1005,14 @@ pub const Node = struct {
try writer.writeAll("\n");
},
.block_value => {
- const block_value = @fieldParentPtr(Node.BlockValue, "base", node);
+ const block_value: *Node.BlockValue = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s}\n", .{ block_value.identifier.slice(tree.source), block_value.key.slice(tree.source) });
for (block_value.values) |value| {
try value.dump(tree, writer, indent + 1);
}
},
.block_value_value => {
- const block_value = @fieldParentPtr(Node.BlockValueValue, "base", node);
+ const block_value: *Node.BlockValueValue = @alignCast(@fieldParentPtr("base", node));
if (block_value.trailing_comma) {
try writer.writeAll(" ,");
}
@@ -1023,7 +1020,7 @@ pub const Node = struct {
try block_value.expression.dump(tree, writer, indent + 1);
},
.string_table => {
- const string_table = @fieldParentPtr(Node.StringTable, "base", node);
+ const string_table: *Node.StringTable = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} [{d} common_resource_attributes]\n", .{ string_table.type.slice(tree.source), string_table.common_resource_attributes.len });
for (string_table.optional_statements) |statement| {
try statement.dump(tree, writer, indent + 1);
@@ -1040,19 +1037,19 @@ pub const Node = struct {
},
.string_table_string => {
try writer.writeAll("\n");
- const string = @fieldParentPtr(Node.StringTableString, "base", node);
+ const string: *Node.StringTableString = @alignCast(@fieldParentPtr("base", node));
try string.id.dump(tree, writer, indent + 1);
try writer.writeByteNTimes(' ', indent + 1);
try writer.print("{s}\n", .{string.string.slice(tree.source)});
},
.language_statement => {
- const language = @fieldParentPtr(Node.LanguageStatement, "base", node);
+ const language: *Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s}\n", .{language.language_token.slice(tree.source)});
try language.primary_language_id.dump(tree, writer, indent + 1);
try language.sublanguage_id.dump(tree, writer, indent + 1);
},
.font_statement => {
- const font = @fieldParentPtr(Node.FontStatement, "base", node);
+ const font: *Node.FontStatement = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} typeface: {s}\n", .{ font.identifier.slice(tree.source), font.typeface.slice(tree.source) });
try writer.writeByteNTimes(' ', indent + 1);
try writer.writeAll("point_size:\n");
@@ -1066,12 +1063,12 @@ pub const Node = struct {
}
},
.simple_statement => {
- const statement = @fieldParentPtr(Node.SimpleStatement, "base", node);
+ const statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s}\n", .{statement.identifier.slice(tree.source)});
try statement.value.dump(tree, writer, indent + 1);
},
.invalid => {
- const invalid = @fieldParentPtr(Node.Invalid, "base", node);
+ const invalid: *Node.Invalid = @alignCast(@fieldParentPtr("base", node));
try writer.print(" context.len: {}\n", .{invalid.context.len});
for (invalid.context) |context_token| {
try writer.writeByteNTimes(' ', indent + 1);
diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig
index ebe741e79b0c..25f3a5f5d3bb 100644
--- a/lib/compiler/resinator/compile.zig
+++ b/lib/compiler/resinator/compile.zig
@@ -229,34 +229,34 @@ pub const Compiler = struct {
pub fn writeNode(self: *Compiler, node: *Node, writer: anytype) !void {
switch (node.id) {
.root => unreachable, // writeRoot should be called directly instead
- .resource_external => try self.writeResourceExternal(@fieldParentPtr(Node.ResourceExternal, "base", node), writer),
- .resource_raw_data => try self.writeResourceRawData(@fieldParentPtr(Node.ResourceRawData, "base", node), writer),
+ .resource_external => try self.writeResourceExternal(@alignCast(@fieldParentPtr("base", node)), writer),
+ .resource_raw_data => try self.writeResourceRawData(@alignCast(@fieldParentPtr("base", node)), writer),
.literal => unreachable, // this is context dependent and should be handled by its parent
.binary_expression => unreachable,
.grouped_expression => unreachable,
.not_expression => unreachable,
.invalid => {}, // no-op, currently only used for dangling literals at EOF
- .accelerators => try self.writeAccelerators(@fieldParentPtr(Node.Accelerators, "base", node), writer),
+ .accelerators => try self.writeAccelerators(@alignCast(@fieldParentPtr("base", node)), writer),
.accelerator => unreachable, // handled by writeAccelerators
- .dialog => try self.writeDialog(@fieldParentPtr(Node.Dialog, "base", node), writer),
+ .dialog => try self.writeDialog(@alignCast(@fieldParentPtr("base", node)), writer),
.control_statement => unreachable,
- .toolbar => try self.writeToolbar(@fieldParentPtr(Node.Toolbar, "base", node), writer),
- .menu => try self.writeMenu(@fieldParentPtr(Node.Menu, "base", node), writer),
+ .toolbar => try self.writeToolbar(@alignCast(@fieldParentPtr("base", node)), writer),
+ .menu => try self.writeMenu(@alignCast(@fieldParentPtr("base", node)), writer),
.menu_item => unreachable,
.menu_item_separator => unreachable,
.menu_item_ex => unreachable,
.popup => unreachable,
.popup_ex => unreachable,
- .version_info => try self.writeVersionInfo(@fieldParentPtr(Node.VersionInfo, "base", node), writer),
+ .version_info => try self.writeVersionInfo(@alignCast(@fieldParentPtr("base", node)), writer),
.version_statement => unreachable,
.block => unreachable,
.block_value => unreachable,
.block_value_value => unreachable,
- .string_table => try self.writeStringTable(@fieldParentPtr(Node.StringTable, "base", node)),
+ .string_table => try self.writeStringTable(@alignCast(@fieldParentPtr("base", node))),
.string_table_string => unreachable, // handled by writeStringTable
- .language_statement => self.writeLanguageStatement(@fieldParentPtr(Node.LanguageStatement, "base", node)),
+ .language_statement => self.writeLanguageStatement(@alignCast(@fieldParentPtr("base", node))),
.font_statement => unreachable,
- .simple_statement => self.writeTopLevelSimpleStatement(@fieldParentPtr(Node.SimpleStatement, "base", node)),
+ .simple_statement => self.writeTopLevelSimpleStatement(@alignCast(@fieldParentPtr("base", node))),
}
}
@@ -1289,7 +1289,7 @@ pub const Compiler = struct {
return evaluateNumberExpression(node, self.source, self.input_code_pages).asWord();
} else {
std.debug.assert(node.isStringLiteral());
- const literal = @fieldParentPtr(Node.Literal, "base", node);
+ const literal: *Node.Literal = @alignCast(@fieldParentPtr("base", node));
const bytes = SourceBytes{
.slice = literal.token.slice(self.source),
.code_page = self.input_code_pages.getForToken(literal.token),
@@ -1342,7 +1342,7 @@ pub const Compiler = struct {
/// the writer within this function could return error.NoSpaceLeft
pub fn writeAcceleratorsData(self: *Compiler, node: *Node.Accelerators, data_writer: anytype) !void {
for (node.accelerators, 0..) |accel_node, i| {
- const accelerator = @fieldParentPtr(Node.Accelerator, "base", accel_node);
+ const accelerator: *Node.Accelerator = @alignCast(@fieldParentPtr("base", accel_node));
var modifiers = res.AcceleratorModifiers{};
for (accelerator.type_and_options) |type_or_option| {
const modifier = rc.AcceleratorTypeAndOptions.map.get(type_or_option.slice(self.source)).?;
@@ -1426,7 +1426,7 @@ pub const Compiler = struct {
for (node.optional_statements) |optional_statement| {
switch (optional_statement.id) {
.simple_statement => {
- const simple_statement = @fieldParentPtr(Node.SimpleStatement, "base", optional_statement);
+ const simple_statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", optional_statement));
const statement_identifier = simple_statement.identifier;
const statement_type = rc.OptionalStatements.dialog_map.get(statement_identifier.slice(self.source)) orelse continue;
switch (statement_type) {
@@ -1440,7 +1440,7 @@ pub const Compiler = struct {
},
.caption => {
std.debug.assert(simple_statement.value.id == .literal);
- const literal_node = @fieldParentPtr(Node.Literal, "base", simple_statement.value);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", simple_statement.value));
optional_statement_values.caption = literal_node.token;
},
.class => {
@@ -1466,7 +1466,7 @@ pub const Compiler = struct {
optional_statement_values.class = NameOrOrdinal{ .ordinal = class_ordinal.asWord() };
} else {
std.debug.assert(simple_statement.value.isStringLiteral());
- const literal_node = @fieldParentPtr(Node.Literal, "base", simple_statement.value);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", simple_statement.value));
const parsed = try self.parseQuotedStringAsWideString(literal_node.token);
optional_statement_values.class = NameOrOrdinal{ .name = parsed };
}
@@ -1492,7 +1492,7 @@ pub const Compiler = struct {
}
std.debug.assert(simple_statement.value.id == .literal);
- const literal_node = @fieldParentPtr(Node.Literal, "base", simple_statement.value);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", simple_statement.value));
const token_slice = literal_node.token.slice(self.source);
const bytes = SourceBytes{
@@ -1542,7 +1542,7 @@ pub const Compiler = struct {
}
},
.font_statement => {
- const font = @fieldParentPtr(Node.FontStatement, "base", optional_statement);
+ const font: *Node.FontStatement = @alignCast(@fieldParentPtr("base", optional_statement));
if (optional_statement_values.font != null) {
optional_statement_values.font.?.node = font;
} else {
@@ -1581,7 +1581,7 @@ pub const Compiler = struct {
// Multiple CLASS parameters are specified and any of them are treated as a number, then
// the last CLASS is always treated as a number no matter what
if (last_class_would_be_forced_ordinal and optional_statement_values.class.? == .name) {
- const literal_node = @fieldParentPtr(Node.Literal, "base", last_class.value);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", last_class.value));
const ordinal_value = res.ForcedOrdinal.fromUtf16Le(optional_statement_values.class.?.name);
try self.addErrorDetails(.{
@@ -1611,7 +1611,7 @@ pub const Compiler = struct {
// 2. Multiple MENU parameters are specified and any of them are treated as a number, then
// the last MENU is always treated as a number no matter what
if ((last_menu_would_be_forced_ordinal or last_menu_has_digit_as_first_char) and optional_statement_values.menu.? == .name) {
- const literal_node = @fieldParentPtr(Node.Literal, "base", last_menu.value);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", last_menu.value));
const token_slice = literal_node.token.slice(self.source);
const bytes = SourceBytes{
.slice = token_slice,
@@ -1658,7 +1658,7 @@ pub const Compiler = struct {
// between resinator and the Win32 RC compiler, we only emit a hint instead of
// a warning.
if (last_menu_did_uppercase) {
- const literal_node = @fieldParentPtr(Node.Literal, "base", last_menu.value);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", last_menu.value));
try self.addErrorDetails(.{
.err = .dialog_menu_id_was_uppercased,
.type = .hint,
@@ -1704,7 +1704,7 @@ pub const Compiler = struct {
defer controls_by_id.deinit();
for (node.controls) |control_node| {
- const control = @fieldParentPtr(Node.ControlStatement, "base", control_node);
+ const control: *Node.ControlStatement = @alignCast(@fieldParentPtr("base", control_node));
self.writeDialogControl(
control,
@@ -1940,7 +1940,7 @@ pub const Compiler = struct {
// And then write out the ordinal using a proper a NameOrOrdinal encoding.
try ordinal.write(data_writer);
} else if (class_node.isStringLiteral()) {
- const literal_node = @fieldParentPtr(Node.Literal, "base", class_node);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", class_node));
const parsed = try self.parseQuotedStringAsWideString(literal_node.token);
defer self.allocator.free(parsed);
if (rc.ControlClass.fromWideString(parsed)) |control_class| {
@@ -1955,7 +1955,7 @@ pub const Compiler = struct {
try name.write(data_writer);
}
} else {
- const literal_node = @fieldParentPtr(Node.Literal, "base", class_node);
+ const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", class_node));
const literal_slice = literal_node.token.slice(self.source);
// This succeeding is guaranteed by the parser
const control_class = rc.ControlClass.map.get(literal_slice) orelse unreachable;
@@ -2178,7 +2178,7 @@ pub const Compiler = struct {
try writer.writeInt(u16, 0, .little); // null-terminated UTF-16 text
},
.menu_item => {
- const menu_item = @fieldParentPtr(Node.MenuItem, "base", node);
+ const menu_item: *Node.MenuItem = @alignCast(@fieldParentPtr("base", node));
var flags = res.MenuItemFlags{};
for (menu_item.option_list) |option_token| {
// This failing would be a bug in the parser
@@ -2196,7 +2196,7 @@ pub const Compiler = struct {
try writer.writeAll(std.mem.sliceAsBytes(text[0 .. text.len + 1]));
},
.popup => {
- const popup = @fieldParentPtr(Node.Popup, "base", node);
+ const popup: *Node.Popup = @alignCast(@fieldParentPtr("base", node));
var flags = res.MenuItemFlags{ .value = res.MF.POPUP };
for (popup.option_list) |option_token| {
// This failing would be a bug in the parser
@@ -2216,7 +2216,7 @@ pub const Compiler = struct {
}
},
inline .menu_item_ex, .popup_ex => |node_type| {
- const menu_item = @fieldParentPtr(node_type.Type(), "base", node);
+ const menu_item: *node_type.Type() = @alignCast(@fieldParentPtr("base", node));
if (menu_item.type) |flags| {
const value = evaluateNumberExpression(flags, self.source, self.input_code_pages);
@@ -2295,7 +2295,7 @@ pub const Compiler = struct {
for (node.fixed_info) |fixed_info| {
switch (fixed_info.id) {
.version_statement => {
- const version_statement = @fieldParentPtr(Node.VersionStatement, "base", fixed_info);
+ const version_statement: *Node.VersionStatement = @alignCast(@fieldParentPtr("base", fixed_info));
const version_type = rc.VersionInfo.map.get(version_statement.type.slice(self.source)).?;
// Ensure that all parts are cleared for each version, to properly account for
@@ -2345,7 +2345,7 @@ pub const Compiler = struct {
}
},
.simple_statement => {
- const statement = @fieldParentPtr(Node.SimpleStatement, "base", fixed_info);
+ const statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", fixed_info));
const statement_type = rc.VersionInfo.map.get(statement.identifier.slice(self.source)).?;
const value = evaluateNumberExpression(statement.value, self.source, self.input_code_pages);
switch (statement_type) {
@@ -2416,7 +2416,7 @@ pub const Compiler = struct {
switch (node.id) {
inline .block, .block_value => |node_type| {
- const block_or_value = @fieldParentPtr(node_type.Type(), "base", node);
+ const block_or_value: *node_type.Type() = @alignCast(@fieldParentPtr("base", node));
const parsed_key = try self.parseQuotedStringAsWideString(block_or_value.key);
defer self.allocator.free(parsed_key);
@@ -2506,7 +2506,7 @@ pub const Compiler = struct {
const language = getLanguageFromOptionalStatements(node.optional_statements, self.source, self.input_code_pages) orelse self.state.language;
for (node.strings) |string_node| {
- const string = @fieldParentPtr(Node.StringTableString, "base", string_node);
+ const string: *Node.StringTableString = @alignCast(@fieldParentPtr("base", string_node));
const string_id_data = try self.evaluateDataExpression(string.id);
const string_id = string_id_data.number.asWord();
@@ -2795,11 +2795,11 @@ pub const Compiler = struct {
fn applyToOptionalStatements(language: *res.Language, version: *u32, characteristics: *u32, statements: []*Node, source: []const u8, code_page_lookup: *const CodePageLookup) void {
for (statements) |node| switch (node.id) {
.language_statement => {
- const language_statement = @fieldParentPtr(Node.LanguageStatement, "base", node);
+ const language_statement: *Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
language.* = languageFromLanguageStatement(language_statement, source, code_page_lookup);
},
.simple_statement => {
- const simple_statement = @fieldParentPtr(Node.SimpleStatement, "base", node);
+ const simple_statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", node));
const statement_type = rc.OptionalStatements.map.get(simple_statement.identifier.slice(source)) orelse continue;
const result = Compiler.evaluateNumberExpression(simple_statement.value, source, code_page_lookup);
switch (statement_type) {
@@ -2824,7 +2824,7 @@ pub const Compiler = struct {
pub fn getLanguageFromOptionalStatements(statements: []*Node, source: []const u8, code_page_lookup: *const CodePageLookup) ?res.Language {
for (statements) |node| switch (node.id) {
.language_statement => {
- const language_statement = @fieldParentPtr(Node.LanguageStatement, "base", node);
+ const language_statement: *Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
return languageFromLanguageStatement(language_statement, source, code_page_lookup);
},
else => continue,
diff --git a/lib/compiler/resinator/parse.zig b/lib/compiler/resinator/parse.zig
index 3426c389b162..e6fe228dcc49 100644
--- a/lib/compiler/resinator/parse.zig
+++ b/lib/compiler/resinator/parse.zig
@@ -889,7 +889,7 @@ pub const Parser = struct {
if (control == .control) {
class = try self.parseExpression(.{});
if (class.?.id == .literal) {
- const class_literal = @fieldParentPtr(Node.Literal, "base", class.?);
+ const class_literal: *Node.Literal = @alignCast(@fieldParentPtr("base", class.?));
const is_invalid_control_class = class_literal.token.id == .literal and !rc.ControlClass.map.has(class_literal.token.slice(self.lexer.buffer));
if (is_invalid_control_class) {
return self.addErrorDetailsAndFail(.{
diff --git a/lib/docs/wasm/Walk.zig b/lib/docs/wasm/Walk.zig
index 486f4c32d260..3f7ccc4bac3b 100644
--- a/lib/docs/wasm/Walk.zig
+++ b/lib/docs/wasm/Walk.zig
@@ -48,7 +48,7 @@ pub const File = struct {
pub fn field_count(file: *const File, node: Ast.Node.Index) u32 {
const scope = file.scopes.get(node) orelse return 0;
if (scope.tag != .namespace) return 0;
- const namespace = @fieldParentPtr(Scope.Namespace, "base", scope);
+ const namespace: *Scope.Namespace = @alignCast(@fieldParentPtr("base", scope));
return namespace.field_count;
}
@@ -439,11 +439,11 @@ pub const Scope = struct {
while (true) switch (it.tag) {
.top => unreachable,
.local => {
- const local = @fieldParentPtr(Local, "base", it);
+ const local: *Local = @alignCast(@fieldParentPtr("base", it));
it = local.parent;
},
.namespace => {
- const namespace = @fieldParentPtr(Namespace, "base", it);
+ const namespace: *Namespace = @alignCast(@fieldParentPtr("base", it));
return namespace.decl_index;
},
};
@@ -453,7 +453,7 @@ pub const Scope = struct {
switch (scope.tag) {
.top, .local => return null,
.namespace => {
- const namespace = @fieldParentPtr(Namespace, "base", scope);
+ const namespace: *Namespace = @alignCast(@fieldParentPtr("base", scope));
return namespace.names.get(name);
},
}
@@ -465,7 +465,7 @@ pub const Scope = struct {
while (true) switch (it.tag) {
.top => break,
.local => {
- const local = @fieldParentPtr(Local, "base", it);
+ const local: *Local = @alignCast(@fieldParentPtr("base", it));
const name_token = main_tokens[local.var_node] + 1;
const ident_name = ast.tokenSlice(name_token);
if (std.mem.eql(u8, ident_name, name)) {
@@ -474,7 +474,7 @@ pub const Scope = struct {
it = local.parent;
},
.namespace => {
- const namespace = @fieldParentPtr(Namespace, "base", it);
+ const namespace: *Namespace = @alignCast(@fieldParentPtr("base", it));
if (namespace.names.get(name)) |node| {
return node;
}
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index f850f733953e..5db70d5491e2 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1062,8 +1062,8 @@ pub fn getUninstallStep(self: *Build) *Step {
fn makeUninstall(uninstall_step: *Step, prog_node: *std.Progress.Node) anyerror!void {
_ = prog_node;
- const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
- const self = @fieldParentPtr(Build, "uninstall_tls", uninstall_tls);
+ const uninstall_tls: *TopLevelStep = @fieldParentPtr("step", uninstall_step);
+ const self: *Build = @fieldParentPtr("uninstall_tls", uninstall_tls);
for (self.installed_files.items) |installed_file| {
const full_path = self.getInstallPath(installed_file.dir, installed_file.path);
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 48af9e54d6fa..d88288748c41 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -231,7 +231,7 @@ fn makeNoOp(step: *Step, prog_node: *std.Progress.Node) anyerror!void {
pub fn cast(step: *Step, comptime T: type) ?*T {
if (step.id == T.base_id) {
- return @fieldParentPtr(T, "step", step);
+ return @fieldParentPtr("step", step);
}
return null;
}
diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig
index d072865922fb..19d697a3b65c 100644
--- a/lib/std/Build/Step/CheckFile.zig
+++ b/lib/std/Build/Step/CheckFile.zig
@@ -49,7 +49,7 @@ pub fn setName(self: *CheckFile, name: []const u8) void {
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
- const self = @fieldParentPtr(CheckFile, "step", step);
+ const self: *CheckFile = @fieldParentPtr("step", step);
const src_path = self.source.getPath(b);
const contents = fs.cwd().readFileAlloc(b.allocator, src_path, self.max_bytes) catch |err| {
diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig
index c5eb1f776bbd..c39a487649bc 100644
--- a/lib/std/Build/Step/CheckObject.zig
+++ b/lib/std/Build/Step/CheckObject.zig
@@ -530,7 +530,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const gpa = b.allocator;
- const self = @fieldParentPtr(CheckObject, "step", step);
+ const self: *CheckObject = @fieldParentPtr("step", step);
const src_path = self.source.getPath(b);
const contents = fs.cwd().readFileAllocOptions(
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 9c1354fd3034..0c37769e9cf2 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -918,7 +918,7 @@ fn getGeneratedFilePath(self: *Compile, comptime tag_name: []const u8, asking_st
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
const b = step.owner;
const arena = b.allocator;
- const self = @fieldParentPtr(Compile, "step", step);
+ const self: *Compile = @fieldParentPtr("step", step);
var zig_args = ArrayList([]const u8).init(arena);
defer zig_args.deinit();
diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig
index 9c2f5d0826c5..46631cac24a4 100644
--- a/lib/std/Build/Step/ConfigHeader.zig
+++ b/lib/std/Build/Step/ConfigHeader.zig
@@ -167,7 +167,7 @@ fn putValue(self: *ConfigHeader, field_name: []const u8, comptime T: type, v: T)
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
- const self = @fieldParentPtr(ConfigHeader, "step", step);
+ const self: *ConfigHeader = @fieldParentPtr("step", step);
const gpa = b.allocator;
const arena = b.allocator;
diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig
index 4ff9d4e58931..a39a0057ff14 100644
--- a/lib/std/Build/Step/Fmt.zig
+++ b/lib/std/Build/Step/Fmt.zig
@@ -47,7 +47,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
const b = step.owner;
const arena = b.allocator;
- const self = @fieldParentPtr(Fmt, "step", step);
+ const self: *Fmt = @fieldParentPtr("step", step);
var argv: std.ArrayListUnmanaged([]const u8) = .{};
try argv.ensureUnusedCapacity(arena, 2 + 1 + self.paths.len + 2 * self.exclude_paths.len);
diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig
index b9c3acfbc912..3ef69f597512 100644
--- a/lib/std/Build/Step/InstallArtifact.zig
+++ b/lib/std/Build/Step/InstallArtifact.zig
@@ -121,7 +121,7 @@ pub fn create(owner: *std.Build, artifact: *Step.Compile, options: Options) *Ins
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
- const self = @fieldParentPtr(InstallArtifact, "step", step);
+ const self: *InstallArtifact = @fieldParentPtr("step", step);
const dest_builder = step.owner;
const cwd = fs.cwd();
diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig
index 7472dba2a740..96ab9205316f 100644
--- a/lib/std/Build/Step/InstallDir.zig
+++ b/lib/std/Build/Step/InstallDir.zig
@@ -63,7 +63,7 @@ pub fn create(owner: *std.Build, options: Options) *InstallDirStep {
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
- const self = @fieldParentPtr(InstallDirStep, "step", step);
+ const self: *InstallDirStep = @fieldParentPtr("step", step);
const dest_builder = self.dest_builder;
const arena = dest_builder.allocator;
const dest_prefix = dest_builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig
index f7487de5985f..ca5a986fd16a 100644
--- a/lib/std/Build/Step/InstallFile.zig
+++ b/lib/std/Build/Step/InstallFile.zig
@@ -43,7 +43,7 @@ pub fn create(
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const src_builder = step.owner;
- const self = @fieldParentPtr(InstallFile, "step", step);
+ const self: *InstallFile = @fieldParentPtr("step", step);
const dest_builder = self.dest_builder;
const full_src_path = self.source.getPath2(src_builder, step);
const full_dest_path = dest_builder.getInstallPath(self.dir, self.dest_rel_path);
diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig
index 5ad123f89ff3..ddd56833c04b 100644
--- a/lib/std/Build/Step/ObjCopy.zig
+++ b/lib/std/Build/Step/ObjCopy.zig
@@ -92,7 +92,7 @@ pub fn getOutputSeparatedDebug(self: *const ObjCopy) ?std.Build.LazyPath {
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
const b = step.owner;
- const self = @fieldParentPtr(ObjCopy, "step", step);
+ const self: *ObjCopy = @fieldParentPtr("step", step);
var man = b.graph.cache.obtain();
defer man.deinit();
diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig
index 436c7fb8e7c5..1d0e34a254b7 100644
--- a/lib/std/Build/Step/Options.zig
+++ b/lib/std/Build/Step/Options.zig
@@ -415,7 +415,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
- const self = @fieldParentPtr(Options, "step", step);
+ const self: *Options = @fieldParentPtr("step", step);
for (self.args.items) |item| {
self.addOption(
diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig
index 7a2a56771a6d..de1d786e41b1 100644
--- a/lib/std/Build/Step/RemoveDir.zig
+++ b/lib/std/Build/Step/RemoveDir.zig
@@ -28,7 +28,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
- const self = @fieldParentPtr(RemoveDir, "step", step);
+ const self: *RemoveDir = @fieldParentPtr("step", step);
b.build_root.handle.deleteTree(self.dir_path) catch |err| {
if (b.build_root.path) |base| {
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index cb1ed7e7dc76..b3550624fc6d 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -497,7 +497,7 @@ const IndexedOutput = struct {
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
const b = step.owner;
const arena = b.allocator;
- const self = @fieldParentPtr(Run, "step", step);
+ const self: *Run = @fieldParentPtr("step", step);
const has_side_effects = self.hasSideEffects();
var argv_list = ArrayList([]const u8).init(arena);
diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig
index 7d69ffa8b018..ac421a67cad5 100644
--- a/lib/std/Build/Step/TranslateC.zig
+++ b/lib/std/Build/Step/TranslateC.zig
@@ -118,7 +118,7 @@ pub fn defineCMacroRaw(self: *TranslateC, name_and_value: []const u8) void {
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
const b = step.owner;
- const self = @fieldParentPtr(TranslateC, "step", step);
+ const self: *TranslateC = @fieldParentPtr("step", step);
var argv_list = std.ArrayList([]const u8).init(b.allocator);
try argv_list.append(b.graph.zig_exe);
diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig
index aab40a8a5e09..d0ac68377ac1 100644
--- a/lib/std/Build/Step/WriteFile.zig
+++ b/lib/std/Build/Step/WriteFile.zig
@@ -141,7 +141,7 @@ fn maybeUpdateName(wf: *WriteFile) void {
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
- const wf = @fieldParentPtr(WriteFile, "step", step);
+ const wf: *WriteFile = @fieldParentPtr("step", step);
// Writing to source files is kind of an extra capability of this
// WriteFile - arguably it should be a different step. But anyway here
diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig
index 39afe249d19e..4bbe1f629389 100644
--- a/lib/std/Thread/Futex.zig
+++ b/lib/std/Thread/Futex.zig
@@ -644,7 +644,7 @@ const PosixImpl = struct {
};
// There's a wait queue on the address; get the queue head and tail.
- const head = @fieldParentPtr(Waiter, "node", entry_node);
+ const head: *Waiter = @fieldParentPtr("node", entry_node);
const tail = head.tail orelse unreachable;
// Push the waiter to the tail by replacing it and linking to the previous tail.
@@ -656,7 +656,7 @@ const PosixImpl = struct {
fn remove(treap: *Treap, address: usize, max_waiters: usize) WaitList {
// Find the wait queue associated with this address and get the head/tail if any.
var entry = treap.getEntryFor(address);
- var queue_head = if (entry.node) |node| @fieldParentPtr(Waiter, "node", node) else null;
+ var queue_head: ?*Waiter = if (entry.node) |node| @fieldParentPtr("node", node) else null;
const queue_tail = if (queue_head) |head| head.tail else null;
// Once we're done updating the head, fix it's tail pointer and update the treap's queue head as well.
@@ -699,7 +699,7 @@ const PosixImpl = struct {
};
// The queue head and tail must exist if we're removing a queued waiter.
- const head = @fieldParentPtr(Waiter, "node", entry.node orelse unreachable);
+ const head: *Waiter = @fieldParentPtr("node", entry.node orelse unreachable);
const tail = head.tail orelse unreachable;
// A waiter with a previous link is never the head of the queue.
diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig
index 3694f94be42a..a96b4255e200 100644
--- a/lib/std/Thread/Pool.zig
+++ b/lib/std/Thread/Pool.zig
@@ -88,8 +88,8 @@ pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
fn runFn(runnable: *Runnable) void {
- const run_node = @fieldParentPtr(RunQueue.Node, "data", runnable);
- const closure = @fieldParentPtr(@This(), "run_node", run_node);
+ const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
+ const closure: *@This() = @fieldParentPtr("run_node", run_node);
@call(.auto, func, closure.arguments);
// The thread pool's allocator is protected by the mutex.
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 9827ef649365..dfc0fd56ea00 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -1150,8 +1150,8 @@ pub const siginfo_t = extern struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with function name.
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,
diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig
index 0026549202a8..cc72aaa072eb 100644
--- a/lib/std/c/dragonfly.zig
+++ b/lib/std/c/dragonfly.zig
@@ -690,8 +690,8 @@ pub const empty_sigset = sigset_t{ .__bits = [_]c_uint{0} ** _SIG_WORDS };
pub const sig_atomic_t = c_int;
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
@@ -702,7 +702,7 @@ pub const Sigaction = extern struct {
mask: sigset_t,
};
-pub const sig_t = *const fn (c_int) callconv(.C) void;
+pub const sig_t = *const fn (i32) callconv(.C) void;
pub const SOCK = struct {
pub const STREAM = 1;
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index 5e2b6bd3155a..a60f5de525ee 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -1171,8 +1171,8 @@ const NSIG = 32;
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig
index f4c928c79c48..d75dd3bf0066 100644
--- a/lib/std/c/haiku.zig
+++ b/lib/std/c/haiku.zig
@@ -501,7 +501,7 @@ pub const siginfo_t = extern struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *allowzero anyopaque, ?*anyopaque) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index 61be065eaa64..3ec6de59b285 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -864,8 +864,8 @@ pub const SIG = struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index 98a93ac86fa2..bb82168ca359 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -842,8 +842,8 @@ pub const SIG = struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index c49751229972..4f08c32b0374 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -874,8 +874,8 @@ pub const SIG = struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal options
flags: c_uint,
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 824adc026164..d1d6201b807e 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -2570,7 +2570,7 @@ fn resetSegfaultHandler() void {
updateSegfaultHandler(&act) catch {};
}
-fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn {
+fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) noreturn {
// Reset to the default handler so that if a segfault happens in this handler it will crash
// the process. Also when this handler returns, the original instruction will be repeated
// and the resulting segfault will crash the process rather than continually dump stack traces.
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 0e70b839b4e0..2cec73f281d7 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -108,7 +108,7 @@ pub const ConnectionPool = struct {
pool.mutex.lock();
defer pool.mutex.unlock();
- const node = @fieldParentPtr(Node, "data", connection);
+ const node: *Node = @fieldParentPtr("data", connection);
pool.used.remove(node);
diff --git a/lib/std/os/emscripten.zig b/lib/std/os/emscripten.zig
index 76435de7e33d..95d550d7264f 100644
--- a/lib/std/os/emscripten.zig
+++ b/lib/std/os/emscripten.zig
@@ -695,8 +695,8 @@ pub const SIG = struct {
};
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index bcafa9fff3b5..69cef35e986c 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -4301,7 +4301,7 @@ pub const all_mask: sigset_t = [_]u32{0xffffffff} ** @typeInfo(sigset_t).Array.l
pub const app_mask: sigset_t = [2]u32{ 0xfffffffc, 0x7fffffff } ++ [_]u32{0xffffffff} ** 30;
const k_sigaction_funcs = struct {
- const handler = ?*align(1) const fn (c_int) callconv(.C) void;
+ const handler = ?*align(1) const fn (i32) callconv(.C) void;
const restorer = *const fn () callconv(.C) void;
};
@@ -4328,8 +4328,8 @@ pub const k_sigaction = switch (native_arch) {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
- pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,
diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig
index d44f228f3155..377b6d8c09af 100644
--- a/lib/std/os/plan9.zig
+++ b/lib/std/os/plan9.zig
@@ -186,8 +186,8 @@ pub const empty_sigset = 0;
pub const siginfo_t = c_long;
// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible.
pub const Sigaction = extern struct {
- pub const handler_fn = *const fn (c_int) callconv(.C) void;
- pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
+ pub const handler_fn = *const fn (i32) callconv(.C) void;
+ pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 3a2b0714f745..bcd39a27bf40 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -597,4 +597,4 @@ fn maybeIgnoreSigpipe() void {
}
}
-fn noopSigHandler(_: c_int) callconv(.C) void {}
+fn noopSigHandler(_: i32) callconv(.C) void {}
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index 0b2b29eebfa3..2101530a7fdb 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -1021,4 +1021,5 @@ test {
_ = string_literal;
_ = system;
_ = target;
+ _ = c_translation;
}
diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig
index 2cab0fe7ca91..a52007eabf24 100644
--- a/lib/std/zig/AstGen.zig
+++ b/lib/std/zig/AstGen.zig
@@ -316,8 +316,7 @@ const ResultInfo = struct {
};
/// Find the result type for a cast builtin given the result location.
- /// If the location does not have a known result type, emits an error on
- /// the given node.
+ /// If the location does not have a known result type, returns `null`.
fn resultType(rl: Loc, gz: *GenZir, node: Ast.Node.Index) !?Zir.Inst.Ref {
return switch (rl) {
.discard, .none, .ref, .inferred_ptr, .destructure => null,
@@ -330,6 +329,9 @@ const ResultInfo = struct {
};
}
+ /// Find the result type for a cast builtin given the result location.
+ /// If the location does not have a known result type, emits an error on
+ /// the given node.
fn resultTypeForCast(rl: Loc, gz: *GenZir, node: Ast.Node.Index, builtin_name: []const u8) !Zir.Inst.Ref {
const astgen = gz.astgen;
if (try rl.resultType(gz, node)) |ty| return ty;
@@ -2786,7 +2788,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.atomic_load,
.atomic_rmw,
.mul_add,
- .field_parent_ptr,
.max,
.min,
.c_import,
@@ -8853,6 +8854,7 @@ fn ptrCast(
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
var flags: Zir.Inst.FullPtrCastFlags = .{};
// Note that all pointer cast builtins have one parameter, so we only need
@@ -8870,36 +8872,62 @@ fn ptrCast(
}
if (node_datas[node].lhs == 0) break; // 0 args
- if (node_datas[node].rhs != 0) break; // 2 args
const builtin_token = main_tokens[node];
const builtin_name = tree.tokenSlice(builtin_token);
const info = BuiltinFn.list.get(builtin_name) orelse break;
- if (info.param_count != 1) break;
+ if (node_datas[node].rhs == 0) {
+ // 1 arg
+ if (info.param_count != 1) break;
+
+ switch (info.tag) {
+ else => break,
+ inline .ptr_cast,
+ .align_cast,
+ .addrspace_cast,
+ .const_cast,
+ .volatile_cast,
+ => |tag| {
+ if (@field(flags, @tagName(tag))) {
+ return astgen.failNode(node, "redundant {s}", .{builtin_name});
+ }
+ @field(flags, @tagName(tag)) = true;
+ },
+ }
- switch (info.tag) {
- else => break,
- inline .ptr_cast,
- .align_cast,
- .addrspace_cast,
- .const_cast,
- .volatile_cast,
- => |tag| {
- if (@field(flags, @tagName(tag))) {
- return astgen.failNode(node, "redundant {s}", .{builtin_name});
- }
- @field(flags, @tagName(tag)) = true;
- },
+ node = node_datas[node].lhs;
+ } else {
+ // 2 args
+ if (info.param_count != 2) break;
+
+ switch (info.tag) {
+ else => break,
+ .field_parent_ptr => {
+ if (flags.ptr_cast) break;
+
+ const flags_int: FlagsInt = @bitCast(flags);
+ const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node);
+ const parent_ptr_type = try ri.rl.resultTypeForCast(gz, root_node, "@alignCast");
+ const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, node_datas[node].lhs);
+ const field_ptr = try expr(gz, scope, .{ .rl = .none }, node_datas[node].rhs);
+ try emitDbgStmt(gz, cursor);
+ const result = try gz.addExtendedPayloadSmall(.field_parent_ptr, flags_int, Zir.Inst.FieldParentPtr{
+ .src_node = gz.nodeIndexToRelative(node),
+ .parent_ptr_type = parent_ptr_type,
+ .field_name = field_name,
+ .field_ptr = field_ptr,
+ });
+ return rvalue(gz, ri, result, root_node);
+ },
+ }
}
-
- node = node_datas[node].lhs;
}
- const flags_i: u5 = @bitCast(flags);
- assert(flags_i != 0);
+ const flags_int: FlagsInt = @bitCast(flags);
+ assert(flags_int != 0);
const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true };
- if (flags_i == @as(u5, @bitCast(ptr_only))) {
+ if (flags_int == @as(FlagsInt, @bitCast(ptr_only))) {
// Special case: simpler representation
return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast");
}
@@ -8908,12 +8936,12 @@ fn ptrCast(
.const_cast = true,
.volatile_cast = true,
};
- if ((flags_i & ~@as(u5, @bitCast(no_result_ty_flags))) == 0) {
+ if ((flags_int & ~@as(FlagsInt, @bitCast(no_result_ty_flags))) == 0) {
// Result type not needed
const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node);
const operand = try expr(gz, scope, .{ .rl = .none }, node);
try emitDbgStmt(gz, cursor);
- const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_i, Zir.Inst.UnNode{
+ const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_int, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(root_node),
.operand = operand,
});
@@ -8926,7 +8954,7 @@ fn ptrCast(
const result_type = try ri.rl.resultTypeForCast(gz, root_node, flags.needResultTypeBuiltinName());
const operand = try expr(gz, scope, .{ .rl = .none }, node);
try emitDbgStmt(gz, cursor);
- const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_i, Zir.Inst.BinNode{
+ const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_int, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(root_node),
.lhs = result_type,
.rhs = operand,
@@ -9379,7 +9407,7 @@ fn builtinCall(
try emitDbgNode(gz, node);
const result = try gz.addExtendedPayload(.error_cast, Zir.Inst.BinNode{
- .lhs = try ri.rl.resultTypeForCast(gz, node, "@errorCast"),
+ .lhs = try ri.rl.resultTypeForCast(gz, node, builtin_name),
.rhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
.node = gz.nodeIndexToRelative(node),
});
@@ -9452,7 +9480,7 @@ fn builtinCall(
},
.splat => {
- const result_type = try ri.rl.resultTypeForCast(gz, node, "@splat");
+ const result_type = try ri.rl.resultTypeForCast(gz, node, builtin_name);
const elem_type = try gz.addUnNode(.vector_elem_type, result_type, node);
const scalar = try expr(gz, scope, .{ .rl = .{ .ty = elem_type } }, params[0]);
const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{
@@ -9537,12 +9565,13 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.field_parent_ptr => {
- const parent_type = try typeExpr(gz, scope, params[0]);
- const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]);
- const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{
- .parent_type = parent_type,
+ const parent_ptr_type = try ri.rl.resultTypeForCast(gz, node, builtin_name);
+ const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0]);
+ const result = try gz.addExtendedPayloadSmall(.field_parent_ptr, 0, Zir.Inst.FieldParentPtr{
+ .src_node = gz.nodeIndexToRelative(node),
+ .parent_ptr_type = parent_ptr_type,
.field_name = field_name,
- .field_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]),
+ .field_ptr = try expr(gz, scope, .{ .rl = .none }, params[1]),
});
return rvalue(gz, ri, result, node);
},
@@ -11686,20 +11715,20 @@ const Scope = struct {
fn cast(base: *Scope, comptime T: type) ?*T {
if (T == Defer) {
switch (base.tag) {
- .defer_normal, .defer_error => return @fieldParentPtr(T, "base", base),
+ .defer_normal, .defer_error => return @alignCast(@fieldParentPtr("base", base)),
else => return null,
}
}
if (T == Namespace) {
switch (base.tag) {
- .namespace => return @fieldParentPtr(T, "base", base),
+ .namespace => return @alignCast(@fieldParentPtr("base", base)),
else => return null,
}
}
if (base.tag != T.base_tag)
return null;
- return @fieldParentPtr(T, "base", base);
+ return @alignCast(@fieldParentPtr("base", base));
}
fn parent(base: *Scope) ?*Scope {
diff --git a/lib/std/zig/AstRlAnnotate.zig b/lib/std/zig/AstRlAnnotate.zig
index 7e75b5c14810..4a1203ca09fc 100644
--- a/lib/std/zig/AstRlAnnotate.zig
+++ b/lib/std/zig/AstRlAnnotate.zig
@@ -911,6 +911,7 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
.work_item_id,
.work_group_size,
.work_group_id,
+ .field_parent_ptr,
=> {
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
return false;
@@ -976,7 +977,6 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
},
.bit_offset_of,
.offset_of,
- .field_parent_ptr,
.has_decl,
.has_field,
.field,
diff --git a/lib/std/zig/BuiltinFn.zig b/lib/std/zig/BuiltinFn.zig
index decb3cf7fd8e..11d6a17303c8 100644
--- a/lib/std/zig/BuiltinFn.zig
+++ b/lib/std/zig/BuiltinFn.zig
@@ -504,7 +504,7 @@ pub const list = list: {
"@fieldParentPtr",
.{
.tag = .field_parent_ptr,
- .param_count = 3,
+ .param_count = 2,
},
},
.{
diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig
index 8aa4c0c8c53e..db082b7f8e6f 100644
--- a/lib/std/zig/Zir.zig
+++ b/lib/std/zig/Zir.zig
@@ -940,9 +940,6 @@ pub const Inst = struct {
/// The addend communicates the type of the builtin.
/// The mulends need to be coerced to the same type.
mul_add,
- /// Implements the `@fieldParentPtr` builtin.
- /// Uses the `pl_node` union field with payload `FieldParentPtr`.
- field_parent_ptr,
/// Implements the `@memcpy` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
memcpy,
@@ -1230,7 +1227,6 @@ pub const Inst = struct {
.atomic_store,
.mul_add,
.builtin_call,
- .field_parent_ptr,
.max,
.memcpy,
.memset,
@@ -1522,7 +1518,6 @@ pub const Inst = struct {
.atomic_rmw,
.mul_add,
.builtin_call,
- .field_parent_ptr,
.max,
.min,
.c_import,
@@ -1794,7 +1789,6 @@ pub const Inst = struct {
.atomic_store = .pl_node,
.mul_add = .pl_node,
.builtin_call = .pl_node,
- .field_parent_ptr = .pl_node,
.max = .pl_node,
.memcpy = .pl_node,
.memset = .pl_node,
@@ -2064,6 +2058,12 @@ pub const Inst = struct {
/// with a specific value. For instance, this is used for the capture of an `errdefer`.
/// This should never appear in a body.
value_placeholder,
+ /// Implements the `@fieldParentPtr` builtin.
+ /// `operand` is payload index to `FieldParentPtr`.
+ /// `small` contains `FullPtrCastFlags`.
+ /// Guaranteed to not have the `ptr_cast` flag.
+ /// Uses the `pl_node` union field with payload `FieldParentPtr`.
+ field_parent_ptr,
pub const InstData = struct {
opcode: Extended,
@@ -3363,9 +3363,14 @@ pub const Inst = struct {
};
pub const FieldParentPtr = struct {
- parent_type: Ref,
+ src_node: i32,
+ parent_ptr_type: Ref,
field_name: Ref,
field_ptr: Ref,
+
+ pub fn src(self: FieldParentPtr) LazySrcLoc {
+ return LazySrcLoc.nodeOffset(self.src_node);
+ }
};
pub const Shuffle = struct {
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index 337149e97d7c..504f134b4158 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -414,7 +414,7 @@ pub const Macros = struct {
}
pub fn WL_CONTAINER_OF(ptr: anytype, sample: anytype, comptime member: []const u8) @TypeOf(sample) {
- return @fieldParentPtr(@TypeOf(sample.*), member, ptr);
+ return @fieldParentPtr(member, ptr);
}
/// A 2-argument function-like macro defined as #define FOO(A, B) (A)(B)
diff --git a/lib/zig.h b/lib/zig.h
index 7a1c69575a24..ec7508670d36 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -130,22 +130,18 @@ typedef char bool;
#define zig_restrict
#endif
-#if __STDC_VERSION__ >= 201112L
-#define zig_align(alignment) _Alignas(alignment)
-#elif zig_has_attribute(aligned)
-#define zig_align(alignment) __attribute__((aligned(alignment)))
+#if zig_has_attribute(aligned)
+#define zig_under_align(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
-#define zig_align(alignment) __declspec(align(alignment))
+#define zig_under_align(alignment) __declspec(align(alignment))
#else
-#define zig_align zig_align_unavailable
+#define zig_under_align zig_align_unavailable
#endif
-#if zig_has_attribute(aligned)
-#define zig_under_align(alignment) __attribute__((aligned(alignment)))
-#elif _MSC_VER
-#define zig_under_align(alignment) zig_align(alignment)
+#if __STDC_VERSION__ >= 201112L
+#define zig_align(alignment) _Alignas(alignment)
#else
-#define zig_align zig_align_unavailable
+#define zig_align(alignment) zig_under_align(alignment)
#endif
#if zig_has_attribute(aligned)
@@ -165,11 +161,14 @@ typedef char bool;
#endif
#if zig_has_attribute(section)
-#define zig_linksection(name, def, ...) def __attribute__((section(name)))
+#define zig_linksection(name) __attribute__((section(name)))
+#define zig_linksection_fn zig_linksection
#elif _MSC_VER
-#define zig_linksection(name, def, ...) __pragma(section(name, __VA_ARGS__)) __declspec(allocate(name)) def
+#define zig_linksection(name) __pragma(section(name, read, write)) __declspec(allocate(name))
+#define zig_linksection_fn(name) __pragma(section(name, read, execute)) __declspec(code_seg(name))
#else
-#define zig_linksection(name, def, ...) zig_linksection_unavailable
+#define zig_linksection(name) zig_linksection_unavailable
+#define zig_linksection_fn zig_linksection
#endif
#if zig_has_builtin(unreachable) || defined(zig_gnuc)
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 5a6e02d400d4..c533f2fae79d 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -3451,19 +3451,24 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
var dg: c_codegen.DeclGen = .{
.gpa = gpa,
- .module = module,
+ .zcu = module,
+ .mod = module.namespacePtr(decl.src_namespace).file_scope.mod,
.error_msg = null,
.pass = .{ .decl = decl_index },
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
- .ctypes = .{},
+ .ctype_pool = c_codegen.CType.Pool.empty,
+ .scratch = .{},
.anon_decl_deps = .{},
.aligned_anon_decls = .{},
};
defer {
- dg.ctypes.deinit(gpa);
- dg.fwd_decl.deinit();
+ fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
+ fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
+ dg.ctype_pool.deinit(gpa);
+ dg.scratch.deinit(gpa);
}
+ try dg.ctype_pool.init(gpa);
c_codegen.genHeader(&dg) catch |err| switch (err) {
error.AnalysisFail => {
@@ -3472,9 +3477,6 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
},
else => |e| return e,
};
-
- fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
- fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
},
}
},
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 63d29d376080..4edc32e86c1f 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -712,7 +712,7 @@ pub const Key = union(enum) {
pub fn fieldName(
self: AnonStructType,
ip: *const InternPool,
- index: u32,
+ index: usize,
) OptionalNullTerminatedString {
if (self.names.len == 0)
return .none;
@@ -3879,20 +3879,13 @@ pub const Alignment = enum(u6) {
none = std.math.maxInt(u6),
_,
- pub fn toByteUnitsOptional(a: Alignment) ?u64 {
+ pub fn toByteUnits(a: Alignment) ?u64 {
return switch (a) {
.none => null,
else => @as(u64, 1) << @intFromEnum(a),
};
}
- pub fn toByteUnits(a: Alignment, default: u64) u64 {
- return switch (a) {
- .none => default,
- else => @as(u64, 1) << @intFromEnum(a),
- };
- }
-
pub fn fromByteUnits(n: u64) Alignment {
if (n == 0) return .none;
assert(std.math.isPowerOfTwo(n));
@@ -5170,48 +5163,55 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.ptr => |ptr| {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
assert(ptr_type.flags.size != .Slice);
- switch (ptr.addr) {
- .decl => |decl| ip.items.appendAssumeCapacity(.{
+ ip.items.appendAssumeCapacity(switch (ptr.addr) {
+ .decl => |decl| .{
.tag = .ptr_decl,
.data = try ip.addExtra(gpa, PtrDecl{
.ty = ptr.ty,
.decl = decl,
}),
- }),
- .comptime_alloc => |alloc_index| ip.items.appendAssumeCapacity(.{
+ },
+ .comptime_alloc => |alloc_index| .{
.tag = .ptr_comptime_alloc,
.data = try ip.addExtra(gpa, PtrComptimeAlloc{
.ty = ptr.ty,
.index = alloc_index,
}),
- }),
- .anon_decl => |anon_decl| ip.items.appendAssumeCapacity(
- if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) .{
+ },
+ .anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: {
+ if (ptr.ty != anon_decl.orig_ty) {
+ _ = ip.map.pop();
+ var new_key = key;
+ new_key.ptr.addr.anon_decl.orig_ty = ptr.ty;
+ const new_gop = try ip.map.getOrPutAdapted(gpa, new_key, adapter);
+ if (new_gop.found_existing) return @enumFromInt(new_gop.index);
+ }
+ break :item .{
.tag = .ptr_anon_decl,
.data = try ip.addExtra(gpa, PtrAnonDecl{
.ty = ptr.ty,
.val = anon_decl.val,
}),
- } else .{
- .tag = .ptr_anon_decl_aligned,
- .data = try ip.addExtra(gpa, PtrAnonDeclAligned{
- .ty = ptr.ty,
- .val = anon_decl.val,
- .orig_ty = anon_decl.orig_ty,
- }),
- },
- ),
- .comptime_field => |field_val| {
+ };
+ } else .{
+ .tag = .ptr_anon_decl_aligned,
+ .data = try ip.addExtra(gpa, PtrAnonDeclAligned{
+ .ty = ptr.ty,
+ .val = anon_decl.val,
+ .orig_ty = anon_decl.orig_ty,
+ }),
+ },
+ .comptime_field => |field_val| item: {
assert(field_val != .none);
- ip.items.appendAssumeCapacity(.{
+ break :item .{
.tag = .ptr_comptime_field,
.data = try ip.addExtra(gpa, PtrComptimeField{
.ty = ptr.ty,
.field_val = field_val,
}),
- });
+ };
},
- .int, .eu_payload, .opt_payload => |base| {
+ .int, .eu_payload, .opt_payload => |base| item: {
switch (ptr.addr) {
.int => assert(ip.typeOf(base) == .usize_type),
.eu_payload => assert(ip.indexToKey(
@@ -5222,7 +5222,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
) == .opt_type),
else => unreachable,
}
- ip.items.appendAssumeCapacity(.{
+ break :item .{
.tag = switch (ptr.addr) {
.int => .ptr_int,
.eu_payload => .ptr_eu_payload,
@@ -5233,9 +5233,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.ty = ptr.ty,
.base = base,
}),
- });
+ };
},
- .elem, .field => |base_index| {
+ .elem, .field => |base_index| item: {
const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type;
switch (ptr.addr) {
.elem => assert(base_ptr_type.flags.size == .Many),
@@ -5272,7 +5272,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
} });
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
- ip.items.appendAssumeCapacity(.{
+ break :item .{
.tag = switch (ptr.addr) {
.elem => .ptr_elem,
.field => .ptr_field,
@@ -5283,9 +5283,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.base = base_index.base,
.index = index_index,
}),
- });
+ };
},
- }
+ });
},
.opt => |opt| {
diff --git a/src/Module.zig b/src/Module.zig
index d4a4522441cd..0399a2f85b53 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -299,7 +299,7 @@ const ValueArena = struct {
/// and must live until the matching call to release().
pub fn acquire(self: *ValueArena, child_allocator: Allocator, out_arena_allocator: *std.heap.ArenaAllocator) Allocator {
if (self.state_acquired) |state_acquired| {
- return @fieldParentPtr(std.heap.ArenaAllocator, "state", state_acquired).allocator();
+ return @as(*std.heap.ArenaAllocator, @fieldParentPtr("state", state_acquired)).allocator();
}
out_arena_allocator.* = self.state.promote(child_allocator);
@@ -309,7 +309,7 @@ const ValueArena = struct {
/// Releases the allocator acquired by `acquire. `arena_allocator` must match the one passed to `acquire`.
pub fn release(self: *ValueArena, arena_allocator: *std.heap.ArenaAllocator) void {
- if (@fieldParentPtr(std.heap.ArenaAllocator, "state", self.state_acquired.?) == arena_allocator) {
+ if (@as(*std.heap.ArenaAllocator, @fieldParentPtr("state", self.state_acquired.?)) == arena_allocator) {
self.state = self.state_acquired.?.*;
self.state_acquired = null;
}
@@ -5846,7 +5846,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
return @as(u16, @intCast(big.bitCountTwosComp()));
},
.lazy_align => |lazy_ty| {
- return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
+ return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits() orelse 0) + @intFromBool(sign);
},
.lazy_size => |lazy_ty| {
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(mod)) + @intFromBool(sign);
diff --git a/src/Sema.zig b/src/Sema.zig
index 7db354334a58..3c999e507e8d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1131,7 +1131,6 @@ fn analyzeBodyInner(
.atomic_rmw => try sema.zirAtomicRmw(block, inst),
.mul_add => try sema.zirMulAdd(block, inst),
.builtin_call => try sema.zirBuiltinCall(block, inst),
- .field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
.@"resume" => try sema.zirResume(block, inst),
.@"await" => try sema.zirAwait(block, inst),
.for_len => try sema.zirForLen(block, inst),
@@ -1296,6 +1295,7 @@ fn analyzeBodyInner(
continue;
},
.value_placeholder => unreachable, // never appears in a body
+ .field_parent_ptr => try sema.zirFieldParentPtr(block, extended),
};
},
@@ -6508,7 +6508,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) {
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
- alignment.toByteUnitsOptional().?,
+ alignment.toByteUnits().?,
});
}
@@ -17699,19 +17699,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = new_decl_ty.toIntern(),
.storage = .{ .elems = param_vals },
} });
- const ptr_ty = (try sema.ptrType(.{
+ const slice_ty = (try sema.ptrType(.{
.child = param_info_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern();
+ const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
break :v try mod.intern(.{ .slice = .{
- .ty = ptr_ty,
+ .ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
- .ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
+ .ty = manyptr_ty,
.addr = .{ .anon_decl = .{
- .orig_ty = ptr_ty,
+ .orig_ty = manyptr_ty,
.val = new_decl_val,
} },
} }),
@@ -17804,7 +17805,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.Pointer => {
const info = ty.ptrInfo(mod);
- const alignment = if (info.flags.alignment.toByteUnitsOptional()) |alignment|
+ const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
try mod.intValue(Type.comptime_int, alignment)
else
try Type.fromInterned(info.child).lazyAbiAlignment(mod);
@@ -18031,12 +18032,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = array_errors_ty.toIntern(),
.storage = .{ .elems = vals },
} });
+ const manyptr_errors_ty = slice_errors_ty.slicePtrFieldType(mod).toIntern();
break :v try mod.intern(.{ .slice = .{
.ty = slice_errors_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
- .ty = slice_errors_ty.slicePtrFieldType(mod).toIntern(),
+ .ty = manyptr_errors_ty,
.addr = .{ .anon_decl = .{
- .orig_ty = slice_errors_ty.toIntern(),
+ .orig_ty = manyptr_errors_ty,
.val = new_decl_val,
} },
} }),
@@ -18155,20 +18157,21 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = fields_array_ty.toIntern(),
.storage = .{ .elems = enum_field_vals },
} });
- const ptr_ty = (try sema.ptrType(.{
+ const slice_ty = (try sema.ptrType(.{
.child = enum_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern();
+ const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
break :v try mod.intern(.{ .slice = .{
- .ty = ptr_ty,
+ .ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
- .ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
+ .ty = manyptr_ty,
.addr = .{ .anon_decl = .{
.val = new_decl_val,
- .orig_ty = ptr_ty,
+ .orig_ty = manyptr_ty,
} },
} }),
.len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
@@ -18279,7 +18282,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: type,
field_ty,
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
+ (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = union_field_ty.toIntern(),
@@ -18296,19 +18299,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = array_fields_ty.toIntern(),
.storage = .{ .elems = union_field_vals },
} });
- const ptr_ty = (try sema.ptrType(.{
+ const slice_ty = (try sema.ptrType(.{
.child = union_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern();
+ const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
break :v try mod.intern(.{ .slice = .{
- .ty = ptr_ty,
+ .ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
- .ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
+ .ty = manyptr_ty,
.addr = .{ .anon_decl = .{
- .orig_ty = ptr_ty,
+ .orig_ty = manyptr_ty,
.val = new_decl_val,
} },
} }),
@@ -18436,7 +18440,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits(0))).toIntern(),
+ (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits() orelse 0)).toIntern(),
};
struct_field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -18505,7 +18509,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(field_is_comptime).toIntern(),
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
+ (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -18523,19 +18527,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = array_fields_ty.toIntern(),
.storage = .{ .elems = struct_field_vals },
} });
- const ptr_ty = (try sema.ptrType(.{
+ const slice_ty = (try sema.ptrType(.{
.child = struct_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern();
+ const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
break :v try mod.intern(.{ .slice = .{
- .ty = ptr_ty,
+ .ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
- .ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
+ .ty = manyptr_ty,
.addr = .{ .anon_decl = .{
- .orig_ty = ptr_ty,
+ .orig_ty = manyptr_ty,
.val = new_decl_val,
} },
} }),
@@ -18661,19 +18666,20 @@ fn typeInfoDecls(
.ty = array_decl_ty.toIntern(),
.storage = .{ .elems = decl_vals.items },
} });
- const ptr_ty = (try sema.ptrType(.{
+ const slice_ty = (try sema.ptrType(.{
.child = declaration_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern();
+ const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
return try mod.intern(.{ .slice = .{
- .ty = ptr_ty,
+ .ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
- .ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
+ .ty = manyptr_ty,
.addr = .{ .anon_decl = .{
- .orig_ty = ptr_ty,
+ .orig_ty = manyptr_ty,
.val = new_decl_val,
} },
} }),
@@ -19803,8 +19809,18 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
break :blk @intCast(host_size);
} else 0;
- if (host_size != 0 and bit_offset >= host_size * 8) {
- return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{});
+ if (host_size != 0) {
+ if (bit_offset >= host_size * 8) {
+ return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} starts {} bits after the end of a {} byte host integer", .{
+ elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size,
+ });
+ }
+ const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, sema);
+ if (elem_bit_size > host_size * 8 - bit_offset) {
+ return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{
+ elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size,
+ });
+ }
}
if (elem_ty.zigTypeTag(mod) == .Fn) {
@@ -22552,7 +22568,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align.compare(.gt, .@"1")) {
- const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
+ const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@@ -22572,7 +22588,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align.compare(.gt, .@"1")) {
- const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
+ const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@@ -22741,10 +22757,8 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
}
fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(
- @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?,
- @truncate(extended.small),
- ));
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_ptrcast_operand = extra.node };
@@ -22757,6 +22771,7 @@ fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDa
operand,
operand_src,
dest_ty,
+ flags.needResultTypeBuiltinName(),
);
}
@@ -22775,6 +22790,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
operand,
operand_src,
dest_ty,
+ "@ptrCast",
);
}
@@ -22786,6 +22802,7 @@ fn ptrCastFull(
operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
dest_ty: Type,
+ operation: []const u8,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
@@ -22818,7 +22835,7 @@ fn ptrCastFull(
};
const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(mod);
if (src_elem_size != dest_elem_size) {
- return sema.fail(block, src, "TODO: implement @ptrCast between slices changing the length", .{});
+ return sema.fail(block, src, "TODO: implement {s} between slices changing the length", .{operation});
}
}
@@ -22967,13 +22984,13 @@ fn ptrCastFull(
if (!flags.align_cast) {
if (dest_align.compare(.gt, src_align)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
+ const msg = try sema.errMsg(block, src, "{s} increases pointer alignment", .{operation});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{
- operand_ty.fmt(mod), src_align.toByteUnits(0),
+ operand_ty.fmt(mod), src_align.toByteUnits() orelse 0,
});
try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{
- dest_ty.fmt(mod), dest_align.toByteUnits(0),
+ dest_ty.fmt(mod), dest_align.toByteUnits() orelse 0,
});
try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{});
break :msg msg;
@@ -22984,7 +23001,7 @@ fn ptrCastFull(
if (!flags.addrspace_cast) {
if (src_info.flags.address_space != dest_info.flags.address_space) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
+ const msg = try sema.errMsg(block, src, "{s} changes pointer address space", .{operation});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, operand_src, msg, "'{}' has address space '{s}'", .{
operand_ty.fmt(mod), @tagName(src_info.flags.address_space),
@@ -23014,7 +23031,7 @@ fn ptrCastFull(
if (!flags.const_cast) {
if (src_info.flags.is_const and !dest_info.flags.is_const) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
+ const msg = try sema.errMsg(block, src, "{s} discards const qualifier", .{operation});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "use @constCast to discard const qualifier", .{});
break :msg msg;
@@ -23025,7 +23042,7 @@ fn ptrCastFull(
if (!flags.volatile_cast) {
if (src_info.flags.is_volatile and !dest_info.flags.is_volatile) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
+ const msg = try sema.errMsg(block, src, "{s} discards volatile qualifier", .{operation});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "use @volatileCast to discard volatile qualifier", .{});
break :msg msg;
@@ -23067,7 +23084,7 @@ fn ptrCastFull(
if (!dest_align.check(addr)) {
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
addr,
- dest_align.toByteUnitsOptional().?,
+ dest_align.toByteUnits().?,
});
}
}
@@ -23110,7 +23127,7 @@ fn ptrCastFull(
dest_align.compare(.gt, src_align) and
try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)))
{
- const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1;
+ const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
@@ -23171,10 +23188,8 @@ fn ptrCastFull(
fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
- const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(
- @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?,
- @truncate(extended.small),
- ));
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_ptrcast_operand = extra.node };
@@ -24843,107 +24858,151 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
);
}
-fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
- const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, inst_data.payload_index).data;
- const src = inst_data.src();
- const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
-
- const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type);
- const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, .{
- .needed_comptime_reason = "field name must be comptime-known",
- });
- const field_ptr = try sema.resolveInst(extra.field_ptr);
- const field_ptr_ty = sema.typeOf(field_ptr);
+fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
- if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) {
- return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)});
+ const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
+ assert(!flags.ptr_cast);
+ const inst_src = extra.src();
+ const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.src_node };
+ const field_ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.src_node };
+
+ const parent_ptr_ty = try sema.resolveDestType(block, inst_src, extra.parent_ptr_type, .remove_eu, "@fieldParentPtr");
+ try sema.checkPtrType(block, inst_src, parent_ptr_ty, true);
+ const parent_ptr_info = parent_ptr_ty.ptrInfo(mod);
+ if (parent_ptr_info.flags.size != .One) {
+ return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(sema.mod)});
+ }
+ const parent_ty = Type.fromInterned(parent_ptr_info.child);
+ switch (parent_ty.zigTypeTag(mod)) {
+ .Struct, .Union => {},
+ else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(sema.mod)}),
}
try sema.resolveTypeLayout(parent_ty);
+ const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{
+ .needed_comptime_reason = "field name must be comptime-known",
+ });
const field_index = switch (parent_ty.zigTypeTag(mod)) {
.Struct => blk: {
if (parent_ty.isTuple(mod)) {
if (ip.stringEqlSlice(field_name, "len")) {
- return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
+ return sema.fail(block, inst_src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
}
- break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src);
+ break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, field_name_src);
} else {
- break :blk try sema.structFieldIndex(block, parent_ty, field_name, name_src);
+ break :blk try sema.structFieldIndex(block, parent_ty, field_name, field_name_src);
}
},
- .Union => try sema.unionFieldIndex(block, parent_ty, field_name, name_src),
+ .Union => try sema.unionFieldIndex(block, parent_ty, field_name, field_name_src),
else => unreachable,
};
-
if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) {
- return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
+ return sema.fail(block, field_name_src, "cannot get @fieldParentPtr of a comptime field", .{});
}
- try sema.checkPtrOperand(block, ptr_src, field_ptr_ty);
- const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod);
+ const field_ptr = try sema.resolveInst(extra.field_ptr);
+ const field_ptr_ty = sema.typeOf(field_ptr);
+ try sema.checkPtrOperand(block, field_ptr_src, field_ptr_ty);
+ const field_ptr_info = field_ptr_ty.ptrInfo(mod);
- var ptr_ty_data: InternPool.Key.PtrType = .{
- .child = parent_ty.structFieldType(field_index, mod).toIntern(),
+ var actual_parent_ptr_info: InternPool.Key.PtrType = .{
+ .child = parent_ty.toIntern(),
.flags = .{
- .address_space = field_ptr_ty_info.flags.address_space,
- .is_const = field_ptr_ty_info.flags.is_const,
+ .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(mod, sema),
+ .is_const = field_ptr_info.flags.is_const,
+ .is_volatile = field_ptr_info.flags.is_volatile,
+ .is_allowzero = field_ptr_info.flags.is_allowzero,
+ .address_space = field_ptr_info.flags.address_space,
},
+ .packed_offset = parent_ptr_info.packed_offset,
};
+ const field_ty = parent_ty.structFieldType(field_index, mod);
+ var actual_field_ptr_info: InternPool.Key.PtrType = .{
+ .child = field_ty.toIntern(),
+ .flags = .{
+ .alignment = try field_ptr_ty.ptrAlignmentAdvanced(mod, sema),
+ .is_const = field_ptr_info.flags.is_const,
+ .is_volatile = field_ptr_info.flags.is_volatile,
+ .is_allowzero = field_ptr_info.flags.is_allowzero,
+ .address_space = field_ptr_info.flags.address_space,
+ },
+ .packed_offset = field_ptr_info.packed_offset,
+ };
+ switch (parent_ty.containerLayout(mod)) {
+ .auto => {
+ actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(
+ if (mod.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment(
+ struct_obj.fieldAlign(ip, field_index),
+ field_ty,
+ struct_obj.layout,
+ ) else if (mod.typeToUnion(parent_ty)) |union_obj|
+ try sema.unionFieldAlignment(union_obj, field_index)
+ else
+ actual_field_ptr_info.flags.alignment,
+ );
- if (parent_ty.containerLayout(mod) == .@"packed") {
- return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
- } else {
- ptr_ty_data.flags.alignment = blk: {
- if (mod.typeToStruct(parent_ty)) |struct_type| {
- break :blk struct_type.fieldAlign(ip, field_index);
- } else if (mod.typeToUnion(parent_ty)) |union_obj| {
- break :blk union_obj.fieldAlign(ip, field_index);
- } else {
- break :blk .none;
- }
- };
- }
-
- const actual_field_ptr_ty = try sema.ptrType(ptr_ty_data);
- const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src);
+ actual_parent_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
+ actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
+ },
+ .@"extern" => {
+ const field_offset = parent_ty.structFieldOffset(field_index, mod);
+ actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (field_offset > 0)
+ Alignment.fromLog2Units(@ctz(field_offset))
+ else
+ actual_field_ptr_info.flags.alignment);
- ptr_ty_data.child = parent_ty.toIntern();
- const result_ptr = try sema.ptrType(ptr_ty_data);
+ actual_parent_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
+ actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
+ },
+ .@"packed" => {
+ const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
+ (if (mod.typeToStruct(parent_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
+ actual_field_ptr_info.packed_offset.bit_offset), 8) catch
+ return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{});
+ actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0)
+ Alignment.fromLog2Units(@ctz(byte_offset))
+ else
+ actual_field_ptr_info.flags.alignment);
+ },
+ }
- if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
+ const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info);
+ const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src);
+ const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info);
+ const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: {
const field = switch (ip.indexToKey(field_ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.field => |field| field,
else => null,
},
else => null,
- } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{});
+ } orelse return sema.fail(block, field_ptr_src, "pointer value not based on parent struct", .{});
if (field.index != field_index) {
- return sema.fail(block, src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{
+ return sema.fail(block, inst_src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{
field_name.fmt(ip), field_index, field.index, parent_ty.fmt(sema.mod),
});
}
- return Air.internedToRef(field.base);
- }
-
- try sema.requireRuntimeBlock(block, src, ptr_src);
- try sema.queueFullTypeResolution(result_ptr);
- return block.addInst(.{
- .tag = .field_parent_ptr,
- .data = .{ .ty_pl = .{
- .ty = Air.internedToRef(result_ptr.toIntern()),
- .payload = try block.sema.addExtra(Air.FieldParentPtr{
- .field_ptr = casted_field_ptr,
- .field_index = @intCast(field_index),
- }),
- } },
- });
+ break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src);
+ } else result: {
+ try sema.requireRuntimeBlock(block, inst_src, field_ptr_src);
+ try sema.queueFullTypeResolution(parent_ty);
+ break :result try block.addInst(.{
+ .tag = .field_parent_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = Air.internedToRef(actual_parent_ptr_ty.toIntern()),
+ .payload = try block.sema.addExtra(Air.FieldParentPtr{
+ .field_ptr = casted_field_ptr,
+ .field_index = @intCast(field_index),
+ }),
+ } },
+ });
+ };
+ return sema.ptrCastFull(block, flags, inst_src, result, inst_src, parent_ptr_ty, "@fieldParentPtr");
}
fn zirMinMax(
@@ -27837,7 +27896,7 @@ fn structFieldPtrByIndex(
const elem_size_bits = Type.fromInterned(ptr_ty_data.child).bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
- const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?));
+ const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnits().?));
assert(new_align != .none);
ptr_ty_data.flags.alignment = new_align;
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
@@ -29132,7 +29191,7 @@ fn coerceExtra(
.addr = .{ .int = if (dest_info.flags.alignment != .none)
(try mod.intValue(
Type.usize,
- dest_info.flags.alignment.toByteUnitsOptional().?,
+ dest_info.flags.alignment.toByteUnits().?,
)).toIntern()
else
try mod.intern_pool.getCoercedInts(
@@ -29800,7 +29859,7 @@ const InMemoryCoercionResult = union(enum) {
},
.ptr_alignment => |pair| {
try sema.errNote(block, src, msg, "pointer alignment '{d}' cannot cast into pointer alignment '{d}'", .{
- pair.actual.toByteUnits(0), pair.wanted.toByteUnits(0),
+ pair.actual.toByteUnits() orelse 0, pair.wanted.toByteUnits() orelse 0,
});
break;
},
@@ -36066,7 +36125,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
// alignment is greater.
var size: u64 = 0;
var padding: u32 = 0;
- if (tag_align.compare(.gte, max_align)) {
+ if (tag_align.order(max_align).compare(.gte)) {
// {Tag, Payload}
size += tag_size;
size = max_align.forward(size);
@@ -36077,7 +36136,10 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
} else {
// {Payload, Tag}
size += max_size;
- size = tag_align.forward(size);
+ size = switch (mod.getTarget().ofmt) {
+ .c => max_align,
+ else => tag_align,
+ }.forward(size);
size += tag_size;
const prev_size = size;
size = max_align.forward(size);
diff --git a/src/Value.zig b/src/Value.zig
index f8f23667e252..7a9775e198b8 100644
--- a/src/Value.zig
+++ b/src/Value.zig
@@ -176,7 +176,7 @@ pub fn toBigIntAdvanced(
if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty));
const x = switch (int.storage) {
else => unreachable,
- .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0),
+ .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
.lazy_size => Type.fromInterned(ty).abiSize(mod),
};
return BigIntMutable.init(&space.limbs, x).toConst();
@@ -237,9 +237,9 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
.u64 => |x| x,
.i64 => |x| std.math.cast(u64, x),
.lazy_align => |ty| if (opt_sema) |sema|
- (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
+ (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0
else
- Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0),
+ Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
.lazy_size => |ty| if (opt_sema) |sema|
(try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar
else
@@ -289,7 +289,7 @@ pub fn toSignedInt(val: Value, mod: *Module) i64 {
.big_int => |big_int| big_int.to(i64) catch unreachable,
.i64 => |x| x,
.u64 => |x| @intCast(x),
- .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)),
+ .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0),
.lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)),
},
else => unreachable,
@@ -497,7 +497,7 @@ pub fn writeToPackedMemory(
inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian),
.big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian),
.lazy_align => |lazy_align| {
- const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits(0);
+ const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits() orelse 0;
std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
},
.lazy_size => |lazy_size| {
@@ -890,7 +890,7 @@ pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
}
return @floatFromInt(x);
},
- .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)),
+ .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0),
.lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)),
},
.float => |float| switch (float.storage) {
@@ -1529,9 +1529,9 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*
},
inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
.lazy_align => |ty| if (opt_sema) |sema| {
- return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
+ return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod);
} else {
- return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), float_ty, mod);
+ return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod);
},
.lazy_size => |ty| if (opt_sema) |sema| {
return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 6dc231672406..022f2f9bee7e 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1296,7 +1296,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// subtract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
// Get negative stack aligment
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } });
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnits().?)) * -1 } });
// Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@@ -2107,7 +2107,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnits().?),
});
},
else => try func.emitWValue(operand),
@@ -2384,7 +2384,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- @intCast(ty.abiAlignment(mod).toByteUnits(0)),
+ @intCast(ty.abiAlignment(mod).toByteUnits() orelse 0),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + lhs.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
},
);
}
@@ -2500,7 +2500,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
@@ -2518,7 +2518,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + operand.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
},
);
@@ -3456,7 +3456,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
- .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0))))),
+ .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0)))),
.lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))))),
};
}
@@ -4204,7 +4204,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
- .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?),
});
}
@@ -5141,7 +5141,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- @intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?),
+ @intCast(elem_ty.abiAlignment(mod).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
@@ -6552,7 +6552,7 @@ fn lowerTry(
const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?),
});
}
try func.addTag(.i32_eqz);
@@ -7499,7 +7499,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
@@ -7561,7 +7561,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7622,7 +7622,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
},
);
const select_res = try func.allocLocal(ty);
@@ -7682,7 +7682,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7781,7 +7781,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
} else {
try func.store(ptr, operand, ty, 0);
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 1b584bfe53c1..7a90eacf54bc 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -7920,17 +7920,14 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
const mod = self.bin_file.comp.module.?;
const ptr_field_ty = self.typeOfIndex(inst);
const ptr_container_ty = self.typeOf(operand);
- const ptr_container_ty_info = ptr_container_ty.ptrInfo(mod);
const container_ty = ptr_container_ty.childType(mod);
- const field_offset: i32 = if (mod.typeToPackedStruct(container_ty)) |struct_obj|
- if (ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0)
- @divExact(mod.structPackedFieldBitOffset(struct_obj, index) +
- ptr_container_ty_info.packed_offset.bit_offset, 8)
- else
- 0
- else
- @intCast(container_ty.structFieldOffset(index, mod));
+ const field_off: i32 = switch (container_ty.containerLayout(mod)) {
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, mod)),
+ .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(mod).packed_offset.bit_offset) +
+ (if (mod.typeToStruct(container_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, index) else 0) -
+ ptr_field_ty.ptrInfo(mod).packed_offset.bit_offset, 8),
+ };
const src_mcv = try self.resolveInst(operand);
const dst_mcv = if (switch (src_mcv) {
@@ -7938,7 +7935,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
.register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv),
else => false,
}) src_mcv else try self.copyToRegisterWithInstTracking(inst, ptr_field_ty, src_mcv);
- return dst_mcv.offset(field_offset);
+ return dst_mcv.offset(field_off);
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
@@ -7958,11 +7955,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(operand);
const field_off: u32 = switch (container_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, mod) * 8),
- .@"packed" => if (mod.typeToStruct(container_ty)) |struct_type|
- mod.structPackedFieldBitOffset(struct_type, index)
- else
- 0,
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, mod) * 8),
+ .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
};
switch (src_mcv) {
@@ -8239,7 +8233,12 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const inst_ty = self.typeOfIndex(inst);
const parent_ty = inst_ty.childType(mod);
- const field_offset: i32 = @intCast(parent_ty.structFieldOffset(extra.field_index, mod));
+ const field_off: i32 = switch (parent_ty.containerLayout(mod)) {
+ .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, mod)),
+ .@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(mod).packed_offset.bit_offset) +
+ (if (mod.typeToStruct(parent_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
+ self.typeOf(extra.field_ptr).ptrInfo(mod).packed_offset.bit_offset, 8),
+ };
const src_mcv = try self.resolveInst(extra.field_ptr);
const dst_mcv = if (src_mcv.isRegisterOffset() and
@@ -8247,7 +8246,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
src_mcv
else
try self.copyToRegisterWithInstTracking(inst, inst_ty, src_mcv);
- const result = dst_mcv.offset(-field_offset);
+ const result = dst_mcv.offset(-field_off);
return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none });
}
@@ -17950,7 +17949,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.Struct => {
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod));
if (result_ty.containerLayout(mod) == .@"packed") {
- const struct_type = mod.typeToStruct(result_ty).?;
+ const struct_obj = mod.typeToStruct(result_ty).?;
try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 },
@@ -17971,7 +17970,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = mod.structPackedFieldBitOffset(struct_type, elem_i);
+ const elem_off = mod.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@@ -18959,7 +18958,7 @@ fn resolveCallingConventionValues(
const param_size: u31 = @intCast(ty.abiSize(mod));
const param_align: u31 =
- @intCast(@max(ty.abiAlignment(mod).toByteUnitsOptional().?, 8));
+ @intCast(@max(ty.abiAlignment(mod).toByteUnits().?, 8));
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -19003,7 +19002,7 @@ fn resolveCallingConventionValues(
continue;
}
const param_size: u31 = @intCast(ty.abiSize(mod));
- const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
+ const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnits().?);
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -19096,7 +19095,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type {
.integer => switch (part_i) {
0 => Type.u64,
1 => part: {
- const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnitsOptional().?;
+ const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnits().?;
const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8));
break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) {
1 => elem_ty,
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index b909f9794aab..e4c2a39d18c4 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -848,9 +848,8 @@ const mnemonic_to_encodings_map = init: {
const final_storage = data_storage;
var final_map: [mnemonic_count][]const Data = .{&.{}} ** mnemonic_count;
storage_i = 0;
- for (&final_map, mnemonic_map) |*value, wip_value| {
- value.ptr = final_storage[storage_i..].ptr;
- value.len = wip_value.len;
+ for (&final_map, mnemonic_map) |*final_value, value| {
+ final_value.* = final_storage[storage_i..][0..value.len];
storage_i += value.len;
}
break :init final_map;
diff --git a/src/codegen.zig b/src/codegen.zig
index 004cf7a7be74..76be8be974c1 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -548,7 +548,7 @@ pub fn generateSymbol(
}
const size = struct_type.size(ip).*;
- const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
+ const alignment = struct_type.flagsPtr(ip).alignment.toByteUnits().?;
const padding = math.cast(
usize,
@@ -893,12 +893,12 @@ fn genDeclRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
- return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnitsOptional().? });
+ return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? });
}
} else if (ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
- return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnitsOptional().? });
+ return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? });
}
}
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 7ae5c87ee540..883f3f7c7765 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -5,12 +5,13 @@ const mem = std.mem;
const log = std.log.scoped(.c);
const link = @import("../link.zig");
-const Module = @import("../Module.zig");
+const Zcu = @import("../Module.zig");
+const Module = @import("../Package/Module.zig");
const Compilation = @import("../Compilation.zig");
const Value = @import("../Value.zig");
const Type = @import("../type.zig").Type;
const C = link.File.C;
-const Decl = Module.Decl;
+const Decl = Zcu.Decl;
const trace = @import("../tracy.zig").trace;
const LazySrcLoc = std.zig.LazySrcLoc;
const Air = @import("../Air.zig");
@@ -21,7 +22,7 @@ const Alignment = InternPool.Alignment;
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
-pub const CType = @import("c/type.zig").CType;
+pub const CType = @import("c/Type.zig");
pub const CValue = union(enum) {
none: void,
@@ -30,7 +31,7 @@ pub const CValue = union(enum) {
/// Address of a local.
local_ref: LocalIndex,
/// A constant instruction, to be rendered inline.
- constant: InternPool.Index,
+ constant: Value,
/// Index into the parameters
arg: usize,
/// The array field of a parameter
@@ -61,7 +62,7 @@ pub const LazyFnKey = union(enum) {
never_inline: InternPool.DeclIndex,
};
pub const LazyFnValue = struct {
- fn_name: []const u8,
+ fn_name: CType.String,
data: Data,
pub const Data = union {
@@ -72,18 +73,20 @@ pub const LazyFnValue = struct {
};
pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue);
-const LoopDepth = u16;
const Local = struct {
- cty_idx: CType.Index,
- alignas: CType.AlignAs,
+ ctype: CType,
+ flags: packed struct(u32) {
+ alignas: CType.AlignAs,
+ _: u20 = undefined,
+ },
pub fn getType(local: Local) LocalType {
- return .{ .cty_idx = local.cty_idx, .alignas = local.alignas };
+ return .{ .ctype = local.ctype, .alignas = local.flags.alignas };
}
};
const LocalIndex = u16;
-const LocalType = struct { cty_idx: CType.Index, alignas: CType.AlignAs };
+const LocalType = struct { ctype: CType, alignas: CType.AlignAs };
const LocalsList = std.AutoArrayHashMapUnmanaged(LocalIndex, void);
const LocalsMap = std.AutoArrayHashMapUnmanaged(LocalType, LocalsList);
@@ -190,6 +193,7 @@ const reserved_idents = std.ComptimeStringMap(void, .{
.{ "switch", {} },
.{ "thread_local", {} },
.{ "typedef", {} },
+ .{ "typeof", {} },
.{ "uint16_t", {} },
.{ "uint32_t", {} },
.{ "uint64_t", {} },
@@ -300,30 +304,32 @@ pub const Function = struct {
const gop = try f.value_map.getOrPut(ref);
if (gop.found_existing) return gop.value_ptr.*;
- const mod = f.object.dg.module;
- const val = (try f.air.value(ref, mod)).?;
+ const zcu = f.object.dg.zcu;
+ const val = (try f.air.value(ref, zcu)).?;
const ty = f.typeOf(ref);
- const result: CValue = if (lowersToArray(ty, mod)) result: {
+ const result: CValue = if (lowersToArray(ty, zcu)) result: {
const writer = f.object.codeHeaderWriter();
- const alignment: Alignment = .none;
- const decl_c_value = try f.allocLocalValue(ty, alignment);
+ const decl_c_value = try f.allocLocalValue(.{
+ .ctype = try f.ctypeFromType(ty, .complete),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)),
+ });
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
try writer.writeAll("static ");
- try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, Const, alignment, .complete);
+ try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, Const, .none, .complete);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, ty, val, .StaticInitializer);
+ try f.object.dg.renderValue(writer, val, .StaticInitializer);
try writer.writeAll(";\n ");
break :result decl_c_value;
- } else .{ .constant = val.toIntern() };
+ } else .{ .constant = val };
gop.value_ptr.* = result;
return result;
}
fn wantSafety(f: *Function) bool {
- return switch (f.object.dg.module.optimizeMode()) {
+ return switch (f.object.dg.zcu.optimizeMode()) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
};
@@ -332,159 +338,174 @@ pub const Function = struct {
/// Skips the reuse logic. This function should be used for any persistent allocation, i.e.
/// those which go into `allocs`. This function does not add the resulting local into `allocs`;
/// that responsibility lies with the caller.
- fn allocLocalValue(f: *Function, ty: Type, alignment: Alignment) !CValue {
- const mod = f.object.dg.module;
- const gpa = f.object.dg.gpa;
- try f.locals.append(gpa, .{
- .cty_idx = try f.typeToIndex(ty, .complete),
- .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
+ fn allocLocalValue(f: *Function, local_type: LocalType) !CValue {
+ try f.locals.ensureUnusedCapacity(f.object.dg.gpa, 1);
+ defer f.locals.appendAssumeCapacity(.{
+ .ctype = local_type.ctype,
+ .flags = .{ .alignas = local_type.alignas },
});
- return .{ .new_local = @intCast(f.locals.items.len - 1) };
+ return .{ .new_local = @intCast(f.locals.items.len) };
}
fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue {
- const result = try f.allocAlignedLocal(ty, .{}, .none);
- if (inst) |i| {
- log.debug("%{d}: allocating t{d}", .{ i, result.new_local });
- } else {
- log.debug("allocating t{d}", .{result.new_local});
- }
- return result;
+ return f.allocAlignedLocal(inst, .{
+ .ctype = try f.ctypeFromType(ty, .complete),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.zcu)),
+ });
}
/// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
/// not be used for persistent locals (i.e. those in `allocs`).
- fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: Alignment) !CValue {
- const mod = f.object.dg.module;
- if (f.free_locals_map.getPtr(.{
- .cty_idx = try f.typeToIndex(ty, .complete),
- .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
- })) |locals_list| {
- if (locals_list.popOrNull()) |local_entry| {
- return .{ .new_local = local_entry.key };
+ fn allocAlignedLocal(f: *Function, inst: ?Air.Inst.Index, local_type: LocalType) !CValue {
+ const result: CValue = result: {
+ if (f.free_locals_map.getPtr(local_type)) |locals_list| {
+ if (locals_list.popOrNull()) |local_entry| {
+ break :result .{ .new_local = local_entry.key };
+ }
}
+ break :result try f.allocLocalValue(local_type);
+ };
+ if (inst) |i| {
+ log.debug("%{d}: allocating t{d}", .{ i, result.new_local });
+ } else {
+ log.debug("allocating t{d}", .{result.new_local});
}
-
- return try f.allocLocalValue(ty, alignment);
+ return result;
}
fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void {
switch (c_value) {
- .constant => |val| try f.object.dg.renderValue(
- w,
- Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
- Value.fromInterned(val),
- location,
- ),
- .undef => |ty| try f.object.dg.renderValue(w, ty, Value.undef, location),
+ .none => unreachable,
+ .new_local, .local => |i| try w.print("t{d}", .{i}),
+ .local_ref => |i| {
+ const local = &f.locals.items[i];
+ if (local.flags.alignas.abiOrder().compare(.lt)) {
+ const gpa = f.object.dg.gpa;
+ const mod = f.object.dg.mod;
+ const ctype_pool = &f.object.dg.ctype_pool;
+
+ try w.writeByte('(');
+ try f.renderCType(w, try ctype_pool.getPointer(gpa, .{
+ .elem_ctype = try ctype_pool.fromIntInfo(gpa, .{
+ .signedness = .unsigned,
+ .bits = @min(
+ local.flags.alignas.toByteUnits(),
+ mod.resolved_target.result.maxIntAlignment(),
+ ) * 8,
+ }, mod, .forward),
+ }));
+ try w.writeByte(')');
+ }
+ try w.print("&t{d}", .{i});
+ },
+ .constant => |val| try f.object.dg.renderValue(w, val, location),
+ .arg => |i| try w.print("a{d}", .{i}),
+ .arg_array => |i| try f.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }),
+ .undef => |ty| try f.object.dg.renderUndefValue(w, ty, location),
else => try f.object.dg.writeCValue(w, c_value),
}
}
fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void {
switch (c_value) {
- .constant => |val| {
+ .none => unreachable,
+ .new_local, .local, .constant => {
try w.writeAll("(*");
- try f.object.dg.renderValue(
- w,
- Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
- Value.fromInterned(val),
- .Other,
- );
+ try f.writeCValue(w, c_value, .Other);
+ try w.writeByte(')');
+ },
+ .local_ref => |i| try w.print("t{d}", .{i}),
+ .arg => |i| try w.print("(*a{d})", .{i}),
+ .arg_array => |i| {
+ try w.writeAll("(*");
+ try f.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" });
try w.writeByte(')');
},
else => try f.object.dg.writeCValueDeref(w, c_value),
}
}
- fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
+ fn writeCValueMember(
+ f: *Function,
+ writer: anytype,
+ c_value: CValue,
+ member: CValue,
+ ) error{ OutOfMemory, AnalysisFail }!void {
switch (c_value) {
- .constant => |val| {
- try f.object.dg.renderValue(
- w,
- Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
- Value.fromInterned(val),
- .Other,
- );
- try w.writeByte('.');
- try f.writeCValue(w, member, .Other);
+ .new_local, .local, .local_ref, .constant, .arg, .arg_array => {
+ try f.writeCValue(writer, c_value, .Other);
+ try writer.writeByte('.');
+ try f.writeCValue(writer, member, .Other);
},
- else => try f.object.dg.writeCValueMember(w, c_value, member),
+ else => return f.object.dg.writeCValueMember(writer, c_value, member),
}
}
- fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
+ fn writeCValueDerefMember(f: *Function, writer: anytype, c_value: CValue, member: CValue) !void {
switch (c_value) {
- .constant => |val| {
- try w.writeByte('(');
- try f.object.dg.renderValue(
- w,
- Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
- Value.fromInterned(val),
- .Other,
- );
- try w.writeAll(")->");
- try f.writeCValue(w, member, .Other);
+ .new_local, .local, .arg, .arg_array => {
+ try f.writeCValue(writer, c_value, .Other);
+ try writer.writeAll("->");
+ },
+ .constant => {
+ try writer.writeByte('(');
+ try f.writeCValue(writer, c_value, .Other);
+ try writer.writeAll(")->");
},
- else => try f.object.dg.writeCValueDerefMember(w, c_value, member),
+ .local_ref => {
+ try f.writeCValueDeref(writer, c_value);
+ try writer.writeByte('.');
+ },
+ else => return f.object.dg.writeCValueDerefMember(writer, c_value, member),
}
+ try f.writeCValue(writer, member, .Other);
}
fn fail(f: *Function, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
return f.object.dg.fail(format, args);
}
- fn indexToCType(f: *Function, idx: CType.Index) CType {
- return f.object.dg.indexToCType(idx);
+ fn ctypeFromType(f: *Function, ty: Type, kind: CType.Kind) !CType {
+ return f.object.dg.ctypeFromType(ty, kind);
}
- fn typeToIndex(f: *Function, ty: Type, kind: CType.Kind) !CType.Index {
- return f.object.dg.typeToIndex(ty, kind);
+ fn byteSize(f: *Function, ctype: CType) u64 {
+ return f.object.dg.byteSize(ctype);
}
- fn typeToCType(f: *Function, ty: Type, kind: CType.Kind) !CType {
- return f.object.dg.typeToCType(ty, kind);
+ fn renderType(f: *Function, w: anytype, ctype: Type) !void {
+ return f.object.dg.renderType(w, ctype);
}
- fn byteSize(f: *Function, cty: CType) u64 {
- return f.object.dg.byteSize(cty);
- }
-
- fn renderType(f: *Function, w: anytype, t: Type) !void {
- return f.object.dg.renderType(w, t);
- }
-
- fn renderCType(f: *Function, w: anytype, t: CType.Index) !void {
- return f.object.dg.renderCType(w, t);
+ fn renderCType(f: *Function, w: anytype, ctype: CType) !void {
+ return f.object.dg.renderCType(w, ctype);
}
fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorize, src_ty: Type, location: ValueRenderLocation) !void {
return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location);
}
- fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
- return f.object.dg.fmtIntLiteral(ty, val, .Other);
+ fn fmtIntLiteral(f: *Function, val: Value) !std.fmt.Formatter(formatIntLiteral) {
+ return f.object.dg.fmtIntLiteral(val, .Other);
}
fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 {
const gpa = f.object.dg.gpa;
+ const zcu = f.object.dg.zcu;
+ const ctype_pool = &f.object.dg.ctype_pool;
+
const gop = try f.lazy_fns.getOrPut(gpa, key);
if (!gop.found_existing) {
errdefer _ = f.lazy_fns.pop();
- var promoted = f.object.dg.ctypes.promote(gpa);
- defer f.object.dg.ctypes.demote(promoted);
- const arena = promoted.arena.allocator();
- const mod = f.object.dg.module;
-
gop.value_ptr.* = .{
.fn_name = switch (key) {
.tag_name,
.never_tail,
.never_inline,
- => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{
+ => |owner_decl| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{
@tagName(key),
- fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)),
+ fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)),
@intFromEnum(owner_decl),
}),
},
@@ -495,7 +516,7 @@ pub const Function = struct {
},
};
}
- return gop.value_ptr.fn_name;
+ return gop.value_ptr.fn_name.slice(ctype_pool);
}
pub fn deinit(f: *Function) void {
@@ -506,21 +527,20 @@ pub const Function = struct {
f.blocks.deinit(gpa);
f.value_map.deinit();
f.lazy_fns.deinit(gpa);
- f.object.dg.ctypes.deinit(gpa);
}
fn typeOf(f: *Function, inst: Air.Inst.Ref) Type {
- const mod = f.object.dg.module;
- return f.air.typeOf(inst, &mod.intern_pool);
+ const zcu = f.object.dg.zcu;
+ return f.air.typeOf(inst, &zcu.intern_pool);
}
fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type {
- const mod = f.object.dg.module;
- return f.air.typeOfIndex(inst, &mod.intern_pool);
+ const zcu = f.object.dg.zcu;
+ return f.air.typeOfIndex(inst, &zcu.intern_pool);
}
};
-/// This data is available when outputting .c code for a `Module`.
+/// This data is available when outputting .c code for a `Zcu`.
/// It is not available when generating .h file.
pub const Object = struct {
dg: DeclGen,
@@ -542,13 +562,15 @@ pub const Object = struct {
/// This data is available both when outputting .c code and when outputting an .h file.
pub const DeclGen = struct {
gpa: mem.Allocator,
- module: *Module,
+ zcu: *Zcu,
+ mod: *Module,
pass: Pass,
is_naked_fn: bool,
/// This is a borrowed reference from `link.C`.
fwd_decl: std.ArrayList(u8),
- error_msg: ?*Module.ErrorMsg,
- ctypes: CType.Store,
+ error_msg: ?*Zcu.ErrorMsg,
+ ctype_pool: CType.Pool,
+ scratch: std.ArrayListUnmanaged(u32),
/// Keeps track of anonymous decls that need to be rendered before this
/// (named) Decl in the output C code.
anon_decl_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.DeclBlock),
@@ -566,75 +588,71 @@ pub const DeclGen = struct {
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
- const mod = dg.module;
+ const zcu = dg.zcu;
const decl_index = dg.pass.decl;
- const decl = mod.declPtr(decl_index);
- const src_loc = decl.srcLoc(mod);
- dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args);
+ const decl = zcu.declPtr(decl_index);
+ const src_loc = decl.srcLoc(zcu);
+ dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args);
return error.AnalysisFail;
}
fn renderAnonDeclValue(
dg: *DeclGen,
writer: anytype,
- ty: Type,
ptr_val: Value,
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const mod = dg.module;
- const ip = &mod.intern_pool;
- const decl_val = anon_decl.val;
- const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
+ const zcu = dg.zcu;
+ const ip = &zcu.intern_pool;
+ const ctype_pool = &dg.ctype_pool;
+ const decl_val = Value.fromInterned(anon_decl.val);
+ const decl_ty = decl_val.typeOf(zcu);
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
- if (ty.isPtrAtRuntime(mod) and !decl_ty.isFnOrHasRuntimeBits(mod)) {
- return dg.writeCValue(writer, .{ .undef = ty });
+ const ptr_ty = ptr_val.typeOf(zcu);
+ if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
+ return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
// Chase function values in order to be able to reference the original function.
- if (Value.fromInterned(decl_val).getFunction(mod)) |func| {
- _ = func;
- _ = ptr_val;
- _ = location;
- @panic("TODO");
- }
- if (Value.fromInterned(decl_val).getExternFunc(mod)) |extern_func| {
- _ = extern_func;
- _ = ptr_val;
- _ = location;
- @panic("TODO");
- }
+ if (decl_val.getFunction(zcu)) |func|
+ return dg.renderDeclValue(writer, ptr_val, func.owner_decl, location);
+ if (decl_val.getExternFunc(zcu)) |extern_func|
+ return dg.renderDeclValue(writer, ptr_val, extern_func.decl, location);
- assert(Value.fromInterned(decl_val).getVariable(mod) == null);
+ assert(decl_val.getVariable(zcu) == null);
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
- const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl_ty, mod);
- if (need_typecast) {
+ const elem_ctype = (try dg.ctypeFromType(ptr_ty, .complete)).info(ctype_pool).pointer.elem_ctype;
+ const decl_ctype = try dg.ctypeFromType(decl_ty, .complete);
+ const need_cast = !elem_ctype.eql(decl_ctype) and
+ (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function);
+ if (need_cast) {
try writer.writeAll("((");
- try dg.renderType(writer, ty);
+ try dg.renderType(writer, ptr_ty);
try writer.writeByte(')');
}
try writer.writeByte('&');
try renderAnonDeclName(writer, decl_val);
- if (need_typecast) try writer.writeByte(')');
+ if (need_cast) try writer.writeByte(')');
// Indicate that the anon decl should be rendered to the output so that
// our reference above is not undefined.
const ptr_type = ip.indexToKey(anon_decl.orig_ty).ptr_type;
- const gop = try dg.anon_decl_deps.getOrPut(dg.gpa, decl_val);
+ const gop = try dg.anon_decl_deps.getOrPut(dg.gpa, anon_decl.val);
if (!gop.found_existing) gop.value_ptr.* = .{};
// Only insert an alignment entry if the alignment is greater than ABI
// alignment. If there is already an entry, keep the greater alignment.
const explicit_alignment = ptr_type.flags.alignment;
if (explicit_alignment != .none) {
- const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(mod);
- if (explicit_alignment.compareStrict(.gt, abi_alignment)) {
- const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, decl_val);
+ const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
+ if (explicit_alignment.order(abi_alignment).compare(.gt)) {
+ const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
aligned_gop.value_ptr.maxStrict(explicit_alignment)
else
@@ -646,41 +664,46 @@ pub const DeclGen = struct {
fn renderDeclValue(
dg: *DeclGen,
writer: anytype,
- ty: Type,
val: Value,
decl_index: InternPool.DeclIndex,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const mod = dg.module;
- const decl = mod.declPtr(decl_index);
+ const zcu = dg.zcu;
+ const ctype_pool = &dg.ctype_pool;
+ const decl = zcu.declPtr(decl_index);
assert(decl.has_tv);
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
- if (ty.isPtrAtRuntime(mod) and !decl.typeOf(mod).isFnOrHasRuntimeBits(mod)) {
+ const ty = val.typeOf(zcu);
+ const decl_ty = decl.typeOf(zcu);
+ if (ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ty });
}
// Chase function values in order to be able to reference the original function.
- if (decl.val.getFunction(mod)) |func| if (func.owner_decl != decl_index)
- return dg.renderDeclValue(writer, ty, val, func.owner_decl, location);
- if (decl.val.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index)
- return dg.renderDeclValue(writer, ty, val, extern_func.decl, location);
+ if (decl.val.getFunction(zcu)) |func| if (func.owner_decl != decl_index)
+ return dg.renderDeclValue(writer, val, func.owner_decl, location);
+ if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index)
+ return dg.renderDeclValue(writer, val, extern_func.decl, location);
- if (decl.val.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative);
+ if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative);
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
- const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.typeOf(mod), mod);
- if (need_typecast) {
+ const elem_ctype = (try dg.ctypeFromType(ty, .complete)).info(ctype_pool).pointer.elem_ctype;
+ const decl_ctype = try dg.ctypeFromType(decl_ty, .complete);
+ const need_cast = !elem_ctype.eql(decl_ctype) and
+ (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function);
+ if (need_cast) {
try writer.writeAll("((");
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
try writer.writeByte('&');
try dg.renderDeclName(writer, decl_index, 0);
- if (need_typecast) try writer.writeByte(')');
+ if (need_cast) try writer.writeByte(')');
}
/// Renders a "parent" pointer by recursing to the root decl/variable
@@ -691,33 +714,34 @@ pub const DeclGen = struct {
ptr_val: InternPool.Index,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const mod = dg.module;
- const ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(ptr_val));
- const ptr_cty = try dg.typeToIndex(ptr_ty, .complete);
- const ptr = mod.intern_pool.indexToKey(ptr_val).ptr;
+ const zcu = dg.zcu;
+ const ip = &zcu.intern_pool;
+ const ptr_ty = Type.fromInterned(ip.typeOf(ptr_val));
+ const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete);
+ const ptr_child_ctype = ptr_ctype.info(&dg.ctype_pool).pointer.elem_ctype;
+ const ptr = ip.indexToKey(ptr_val).ptr;
switch (ptr.addr) {
- .decl => |d| try dg.renderDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), d, location),
- .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), anon_decl, location),
+ .decl => |d| try dg.renderDeclValue(writer, Value.fromInterned(ptr_val), d, location),
+ .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, Value.fromInterned(ptr_val), anon_decl, location),
.int => |int| {
try writer.writeByte('(');
- try dg.renderCType(writer, ptr_cty);
- try writer.print("){x}", .{try dg.fmtIntLiteral(Type.usize, Value.fromInterned(int), .Other)});
+ try dg.renderCType(writer, ptr_ctype);
+ try writer.print("){x}", .{try dg.fmtIntLiteral(Value.fromInterned(int), .Other)});
},
.eu_payload, .opt_payload => |base| {
- const ptr_base_ty = Type.fromInterned(mod.intern_pool.typeOf(base));
- const base_ty = ptr_base_ty.childType(mod);
+ const ptr_base_ty = Type.fromInterned(ip.typeOf(base));
+ const base_ty = ptr_base_ty.childType(zcu);
// Ensure complete type definition is visible before accessing fields.
- _ = try dg.typeToIndex(base_ty, .complete);
+ _ = try dg.ctypeFromType(base_ty, .complete);
const payload_ty = switch (ptr.addr) {
- .eu_payload => base_ty.errorUnionPayload(mod),
- .opt_payload => base_ty.optionalChild(mod),
+ .eu_payload => base_ty.errorUnionPayload(zcu),
+ .opt_payload => base_ty.optionalChild(zcu),
else => unreachable,
};
- const ptr_payload_ty = try mod.adjustPtrTypeChild(ptr_base_ty, payload_ty);
- const ptr_payload_cty = try dg.typeToIndex(ptr_payload_ty, .complete);
- if (ptr_cty != ptr_payload_cty) {
+ const payload_ctype = try dg.ctypeFromType(payload_ty, .forward);
+ if (!ptr_child_ctype.eql(payload_ctype)) {
try writer.writeByte('(');
- try dg.renderCType(writer, ptr_cty);
+ try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try writer.writeAll("&(");
@@ -725,70 +749,90 @@ pub const DeclGen = struct {
try writer.writeAll(")->payload");
},
.elem => |elem| {
- const ptr_base_ty = Type.fromInterned(mod.intern_pool.typeOf(elem.base));
- const elem_ty = ptr_base_ty.elemType2(mod);
- const ptr_elem_ty = try mod.adjustPtrTypeChild(ptr_base_ty, elem_ty);
- const ptr_elem_cty = try dg.typeToIndex(ptr_elem_ty, .complete);
- if (ptr_cty != ptr_elem_cty) {
+ const ptr_base_ty = Type.fromInterned(ip.typeOf(elem.base));
+ const elem_ty = ptr_base_ty.elemType2(zcu);
+ const elem_ctype = try dg.ctypeFromType(elem_ty, .forward);
+ if (!ptr_child_ctype.eql(elem_ctype)) {
try writer.writeByte('(');
- try dg.renderCType(writer, ptr_cty);
+ try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try writer.writeAll("&(");
- if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One)
+ if (ip.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One)
try writer.writeByte('*');
try dg.renderParentPtr(writer, elem.base, location);
try writer.print(")[{d}]", .{elem.index});
},
.field => |field| {
- const ptr_base_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
- const base_ty = ptr_base_ty.childType(mod);
- // Ensure complete type definition is visible before accessing fields.
- _ = try dg.typeToIndex(base_ty, .complete);
- const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) {
- .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@as(usize, @intCast(field.index)), mod),
- .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
- .One, .Many, .C => unreachable,
- .Slice => switch (field.index) {
- Value.slice_ptr_index => base_ty.slicePtrFieldType(mod),
- Value.slice_len_index => Type.usize,
- else => unreachable,
- },
+ const ptr_base_ty = Type.fromInterned(ip.typeOf(field.base));
+ const base_ty = ptr_base_ty.childType(zcu);
+ // Ensure complete type definition is available before accessing fields.
+ _ = try dg.ctypeFromType(base_ty, .complete);
+ switch (fieldLocation(ptr_base_ty, ptr_ty, @as(u32, @intCast(field.index)), zcu)) {
+ .begin => {
+ const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete);
+ if (!ptr_ctype.eql(ptr_base_ctype)) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_ctype);
+ try writer.writeByte(')');
+ }
+ try dg.renderParentPtr(writer, field.base, location);
},
- else => unreachable,
- };
- const ptr_field_ty = try mod.adjustPtrTypeChild(ptr_base_ty, field_ty);
- const ptr_field_cty = try dg.typeToIndex(ptr_field_ty, .complete);
- if (ptr_cty != ptr_field_cty) {
- try writer.writeByte('(');
- try dg.renderCType(writer, ptr_cty);
- try writer.writeByte(')');
- }
- switch (fieldLocation(ptr_base_ty, ptr_ty, @as(u32, @intCast(field.index)), mod)) {
- .begin => try dg.renderParentPtr(writer, field.base, location),
.field => |name| {
+ const field_ty = switch (ip.indexToKey(base_ty.toIntern())) {
+ .anon_struct_type,
+ .struct_type,
+ .union_type,
+ => base_ty.structFieldType(@as(usize, @intCast(field.index)), zcu),
+ .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+ .One, .Many, .C => unreachable,
+ .Slice => switch (field.index) {
+ Value.slice_ptr_index => base_ty.slicePtrFieldType(zcu),
+ Value.slice_len_index => Type.usize,
+ else => unreachable,
+ },
+ },
+ else => unreachable,
+ };
+ const field_ctype = try dg.ctypeFromType(field_ty, .forward);
+ if (!ptr_child_ctype.eql(field_ctype)) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_ctype);
+ try writer.writeByte(')');
+ }
try writer.writeAll("&(");
try dg.renderParentPtr(writer, field.base, location);
try writer.writeAll(")->");
try dg.writeCValue(writer, name);
},
.byte_offset => |byte_offset| {
- const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8);
- const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
+ const u8_ptr_ty = try zcu.adjustPtrTypeChild(ptr_ty, Type.u8);
+ const u8_ptr_ctype = try dg.ctypeFromType(u8_ptr_ty, .complete);
+ if (!ptr_ctype.eql(u8_ptr_ctype)) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_ctype);
+ try writer.writeByte(')');
+ }
try writer.writeAll("((");
- try dg.renderType(writer, u8_ptr_ty);
+ try dg.renderCType(writer, u8_ptr_ctype);
try writer.writeByte(')');
try dg.renderParentPtr(writer, field.base, location);
try writer.print(" + {})", .{
- try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other),
+ try dg.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset), .Other),
});
},
.end => {
+ const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete);
+ if (!ptr_ctype.eql(ptr_base_ctype)) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_ctype);
+ try writer.writeByte(')');
+ }
try writer.writeAll("((");
try dg.renderParentPtr(writer, field.base, location);
try writer.print(") + {})", .{
- try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other),
+ try dg.fmtIntLiteral(try zcu.intValue(Type.usize, 1), .Other),
});
},
}
@@ -800,215 +844,21 @@ pub const DeclGen = struct {
fn renderValue(
dg: *DeclGen,
writer: anytype,
- ty: Type,
val: Value,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const mod = dg.module;
- const ip = &mod.intern_pool;
+ const zcu = dg.zcu;
+ const ip = &zcu.intern_pool;
+ const target = &dg.mod.resolved_target.result;
- const target = mod.getTarget();
const initializer_type: ValueRenderLocation = switch (location) {
.StaticInitializer => .StaticInitializer,
else => .Initializer,
};
- const safety_on = switch (mod.optimizeMode()) {
- .Debug, .ReleaseSafe => true,
- .ReleaseFast, .ReleaseSmall => false,
- };
-
- if (val.isUndefDeep(mod)) {
- switch (ty.zigTypeTag(mod)) {
- .Bool => {
- if (safety_on) {
- return writer.writeAll("0xaa");
- } else {
- return writer.writeAll("false");
- }
- },
- .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}),
- .Float => {
- const bits = ty.floatBits(target);
- // All unsigned ints matching float types are pre-allocated.
- const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
-
- try writer.writeAll("zig_make_");
- try dg.renderTypeForBuiltinFnName(writer, ty);
- try writer.writeByte('(');
- switch (bits) {
- 16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}),
- 32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}),
- 64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}),
- 80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}),
- 128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}),
- else => unreachable,
- }
- try writer.writeAll(", ");
- try dg.renderValue(writer, repr_ty, Value.undef, .FunctionArgument);
- return writer.writeByte(')');
- },
- .Pointer => if (ty.isSlice(mod)) {
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeAll("{(");
- const ptr_ty = ty.slicePtrFieldType(mod);
- try dg.renderType(writer, ptr_ty);
- return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
- } else {
- try writer.writeAll("((");
- try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
- },
- .Optional => {
- const payload_ty = ty.optionalChild(mod);
-
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return dg.renderValue(writer, Type.bool, val, location);
- }
-
- if (ty.optionalReprIsPayload(mod)) {
- return dg.renderValue(writer, payload_ty, val, location);
- }
-
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, val, initializer_type);
- try writer.writeAll(", .is_null = ");
- try dg.renderValue(writer, Type.bool, val, initializer_type);
- return writer.writeAll(" }");
- },
- .Struct => switch (ty.containerLayout(mod)) {
- .auto, .@"extern" => {
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeByte('{');
- var empty = true;
- for (0..ty.structFieldCount(mod)) |field_index| {
- if (ty.structFieldIsComptime(field_index, mod)) continue;
- const field_ty = ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBits(mod)) continue;
-
- if (!empty) try writer.writeByte(',');
- try dg.renderValue(writer, field_ty, val, initializer_type);
-
- empty = false;
- }
-
- return writer.writeByte('}');
- },
- .@"packed" => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
- },
- .Union => {
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeByte('{');
- if (ty.unionTagTypeSafety(mod)) |tag_ty| {
- const layout = ty.unionGetLayout(mod);
- if (layout.tag_size != 0) {
- try writer.writeAll(" .tag = ");
- try dg.renderValue(writer, tag_ty, val, initializer_type);
- }
- if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
- if (layout.tag_size != 0) try writer.writeByte(',');
- try writer.writeAll(" .payload = {");
- }
- const union_obj = mod.typeToUnion(ty).?;
- for (0..union_obj.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBits(mod)) continue;
- try dg.renderValue(writer, field_ty, val, initializer_type);
- break;
- }
- if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
- return writer.writeByte('}');
- },
- .ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(mod);
- const error_ty = ty.errorUnionSet(mod);
-
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return dg.renderValue(writer, error_ty, val, location);
- }
-
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, val, initializer_type);
- try writer.writeAll(", .error = ");
- try dg.renderValue(writer, error_ty, val, initializer_type);
- return writer.writeAll(" }");
- },
- .Array, .Vector => {
- const ai = ty.arrayInfo(mod);
- if (ai.elem_type.eql(Type.u8, mod)) {
- const c_len = ty.arrayLenIncludingSentinel(mod);
- var literal = stringLiteral(writer, c_len);
- try literal.start();
- var index: u64 = 0;
- while (index < c_len) : (index += 1)
- try literal.writeChar(0xaa);
- return literal.end();
- } else {
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeByte('{');
- const c_len = ty.arrayLenIncludingSentinel(mod);
- var index: u64 = 0;
- while (index < c_len) : (index += 1) {
- if (index > 0) try writer.writeAll(", ");
- try dg.renderValue(writer, ty.childType(mod), val, initializer_type);
- }
- return writer.writeByte('}');
- }
- },
- .ComptimeInt,
- .ComptimeFloat,
- .Type,
- .EnumLiteral,
- .Void,
- .NoReturn,
- .Undefined,
- .Null,
- .Opaque,
- => unreachable,
-
- .Fn,
- .Frame,
- .AnyFrame,
- => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
- @tagName(tag),
- }),
- }
- unreachable;
- }
-
- switch (ip.indexToKey(val.ip_index)) {
+ const ty = val.typeOf(zcu);
+ if (val.isUndefDeep(zcu)) return dg.renderUndefValue(writer, ty, location);
+ switch (ip.indexToKey(val.toIntern())) {
// types, not values
.int_type,
.ptr_type,
@@ -1050,26 +900,28 @@ pub const DeclGen = struct {
.empty_enum_value,
=> unreachable, // non-runtime values
.int => |int| switch (int.storage) {
- .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
+ .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(val, location)}),
.lazy_align, .lazy_size => {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
+ try writer.print("){x})", .{try dg.fmtIntLiteral(
+ try zcu.intValue(Type.usize, val.toUnsignedInt(zcu)),
+ .Other,
+ )});
},
},
.err => |err| try writer.print("zig_error_{}", .{
fmtIdent(ip.stringToSlice(err.name)),
}),
.error_union => |error_union| {
- const payload_ty = ty.errorUnionPayload(mod);
- const error_ty = ty.errorUnionSet(mod);
- const err_int_ty = try mod.errorIntType();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const payload_ty = ty.errorUnionPayload(zcu);
+ const error_ty = ty.errorUnionSet(zcu);
+ const err_int_ty = try zcu.errorIntType();
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
switch (error_union.val) {
.err_name => |err_name| return dg.renderValue(
writer,
- error_ty,
- Value.fromInterned((try mod.intern(.{ .err = .{
+ Value.fromInterned((try zcu.intern(.{ .err = .{
.ty = error_ty.toIntern(),
.name = err_name,
} }))),
@@ -1077,8 +929,7 @@ pub const DeclGen = struct {
),
.payload => return dg.renderValue(
writer,
- err_int_ty,
- try mod.intValue(err_int_ty, 0),
+ try zcu.intValue(err_int_ty, 0),
location,
),
}
@@ -1093,9 +944,8 @@ pub const DeclGen = struct {
try writer.writeAll("{ .payload = ");
try dg.renderValue(
writer,
- payload_ty,
Value.fromInterned(switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .err_name => (try zcu.undefValue(payload_ty)).toIntern(),
.payload => |payload| payload,
}),
initializer_type,
@@ -1104,8 +954,7 @@ pub const DeclGen = struct {
switch (error_union.val) {
.err_name => |err_name| try dg.renderValue(
writer,
- error_ty,
- Value.fromInterned((try mod.intern(.{ .err = .{
+ Value.fromInterned((try zcu.intern(.{ .err = .{
.ty = error_ty.toIntern(),
.name = err_name,
} }))),
@@ -1113,24 +962,23 @@ pub const DeclGen = struct {
),
.payload => try dg.renderValue(
writer,
- err_int_ty,
- try mod.intValue(err_int_ty, 0),
+ try zcu.intValue(err_int_ty, 0),
location,
),
}
try writer.writeAll(" }");
},
- .enum_tag => {
- const enum_tag = ip.indexToKey(val.ip_index).enum_tag;
- const int_tag_ty = ip.typeOf(enum_tag.int);
- try dg.renderValue(writer, Type.fromInterned(int_tag_ty), Value.fromInterned(enum_tag.int), location);
- },
+ .enum_tag => |enum_tag| try dg.renderValue(
+ writer,
+ Value.fromInterned(enum_tag.int),
+ location,
+ ),
.float => {
- const bits = ty.floatBits(target);
- const f128_val = val.toFloat(f128, mod);
+ const bits = ty.floatBits(target.*);
+ const f128_val = val.toFloat(f128, zcu);
// All unsigned ints matching float types are pre-allocated.
- const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
+ const repr_ty = zcu.intType(.unsigned, bits) catch unreachable;
assert(bits <= 128);
var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
@@ -1141,26 +989,24 @@ pub const DeclGen = struct {
};
switch (bits) {
- 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, mod)))),
- 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, mod)))),
- 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, mod)))),
- 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, mod)))),
+ 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))),
+ 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))),
+ 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))),
+ 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))),
128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
else => unreachable,
}
- const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst());
-
var empty = true;
if (std.math.isFinite(f128_val)) {
try writer.writeAll("zig_make_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
- 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}),
- 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}),
- 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}),
- 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}),
+ 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}),
+ 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}),
+ 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}),
+ 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}),
128 => try writer.print("{x}", .{f128_val}),
else => unreachable,
}
@@ -1200,17 +1046,20 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, mod)))}),
- 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, mod)))}),
- 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, mod)))}),
- 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, mod)))}),
+ 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}),
+ 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}),
+ 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}),
+ 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}),
128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
else => unreachable,
};
try writer.writeAll(", ");
empty = false;
}
- try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)});
+ try writer.print("{x}", .{try dg.fmtIntLiteral(
+ try zcu.intValue_big(repr_ty, repr_val_big.toConst()),
+ location,
+ )});
if (!empty) try writer.writeByte(')');
},
.slice => |slice| {
@@ -1220,42 +1069,39 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
try writer.writeByte('{');
- try dg.renderValue(writer, ty.slicePtrFieldType(mod), Value.fromInterned(slice.ptr), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(slice.ptr), initializer_type);
try writer.writeAll(", ");
- try dg.renderValue(writer, Type.usize, Value.fromInterned(slice.len), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(slice.len), initializer_type);
try writer.writeByte('}');
},
.ptr => |ptr| switch (ptr.addr) {
- .decl => |d| try dg.renderDeclValue(writer, ty, val, d, location),
- .anon_decl => |decl_val| try dg.renderAnonDeclValue(writer, ty, val, decl_val, location),
+ .decl => |d| try dg.renderDeclValue(writer, val, d, location),
+ .anon_decl => |decl_val| try dg.renderAnonDeclValue(writer, val, decl_val, location),
.int => |int| {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- try writer.print("){x})", .{
- try dg.fmtIntLiteral(Type.usize, Value.fromInterned(int), location),
- });
+ try writer.print("){x})", .{try dg.fmtIntLiteral(Value.fromInterned(int), location)});
},
.eu_payload,
.opt_payload,
.elem,
.field,
- => try dg.renderParentPtr(writer, val.ip_index, location),
+ => try dg.renderParentPtr(writer, val.toIntern(), location),
.comptime_field, .comptime_alloc => unreachable,
},
.opt => |opt| {
- const payload_ty = ty.optionalChild(mod);
+ const payload_ty = ty.optionalChild(zcu);
const is_null_val = Value.makeBool(opt.val == .none);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
- return dg.renderValue(writer, Type.bool, is_null_val, location);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ return dg.renderValue(writer, is_null_val, location);
- if (ty.optionalReprIsPayload(mod)) return dg.renderValue(
+ if (ty.optionalReprIsPayload(zcu)) return dg.renderValue(
writer,
- payload_ty,
switch (opt.val) {
- .none => switch (payload_ty.zigTypeTag(mod)) {
- .ErrorSet => try mod.intValue(try mod.errorIntType(), 0),
- .Pointer => try mod.getCoerced(val, payload_ty),
+ .none => switch (payload_ty.zigTypeTag(zcu)) {
+ .ErrorSet => try zcu.intValue(try zcu.errorIntType(), 0),
+ .Pointer => try zcu.getCoerced(val, payload_ty),
else => unreachable,
},
else => |payload| Value.fromInterned(payload),
@@ -1270,15 +1116,19 @@ pub const DeclGen = struct {
}
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, Value.fromInterned(switch (opt.val) {
- .none => try mod.intern(.{ .undef = payload_ty.ip_index }),
- else => |payload| payload,
- }), initializer_type);
+ switch (opt.val) {
+ .none => try dg.renderUndefValue(writer, payload_ty, initializer_type),
+ else => |payload| try dg.renderValue(
+ writer,
+ Value.fromInterned(payload),
+ initializer_type,
+ ),
+ }
try writer.writeAll(", .is_null = ");
- try dg.renderValue(writer, Type.bool, is_null_val, initializer_type);
+ try dg.renderValue(writer, is_null_val, initializer_type);
try writer.writeAll(" }");
},
- .aggregate => switch (ip.indexToKey(ty.ip_index)) {
+ .aggregate => switch (ip.indexToKey(ty.toIntern())) {
.array_type, .vector_type => {
if (location == .FunctionArgument) {
try writer.writeByte('(');
@@ -1287,21 +1137,21 @@ pub const DeclGen = struct {
}
// Fall back to generic implementation.
- const ai = ty.arrayInfo(mod);
- if (ai.elem_type.eql(Type.u8, mod)) {
- var literal = stringLiteral(writer, ty.arrayLenIncludingSentinel(mod));
+ const ai = ty.arrayInfo(zcu);
+ if (ai.elem_type.eql(Type.u8, zcu)) {
+ var literal = stringLiteral(writer, ty.arrayLenIncludingSentinel(zcu));
try literal.start();
var index: usize = 0;
while (index < ai.len) : (index += 1) {
- const elem_val = try val.elemValue(mod, index);
- const elem_val_u8: u8 = if (elem_val.isUndef(mod))
+ const elem_val = try val.elemValue(zcu, index);
+ const elem_val_u8: u8 = if (elem_val.isUndef(zcu))
undefPattern(u8)
else
- @intCast(elem_val.toUnsignedInt(mod));
+ @intCast(elem_val.toUnsignedInt(zcu));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
- const s_u8: u8 = @intCast(s.toUnsignedInt(mod));
+ const s_u8: u8 = @intCast(s.toUnsignedInt(zcu));
if (s_u8 != 0) try literal.writeChar(s_u8);
}
try literal.end();
@@ -1310,12 +1160,12 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
- const elem_val = try val.elemValue(mod, index);
- try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type);
+ const elem_val = try val.elemValue(zcu, index);
+ try dg.renderValue(writer, elem_val, initializer_type);
}
if (ai.sentinel) |s| {
if (index != 0) try writer.writeByte(',');
- try dg.renderValue(writer, ai.elem_type, s, initializer_type);
+ try dg.renderValue(writer, s, initializer_type);
}
try writer.writeByte('}');
}
@@ -1333,27 +1183,29 @@ pub const DeclGen = struct {
const comptime_val = tuple.values.get(ip)[field_index];
if (comptime_val != .none) continue;
const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeByte(',');
- const field_val = Value.fromInterned(switch (ip.indexToKey(val.ip_index).aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
- .ty = field_ty.toIntern(),
- .storage = .{ .u64 = bytes[field_index] },
- } }),
- .elems => |elems| elems[field_index],
- .repeated_elem => |elem| elem,
- });
- try dg.renderValue(writer, field_ty, field_val, initializer_type);
+ const field_val = Value.fromInterned(
+ switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
+ .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
+ .ty = field_ty.toIntern(),
+ .storage = .{ .u64 = bytes[field_index] },
+ } }),
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |elem| elem,
+ },
+ );
+ try dg.renderValue(writer, field_val, initializer_type);
empty = false;
}
try writer.writeByte('}');
},
.struct_type => {
- const struct_type = ip.loadStructType(ty.toIntern());
- switch (struct_type.layout) {
+ const loaded_struct = ip.loadStructType(ty.toIntern());
+ switch (loaded_struct.layout) {
.auto, .@"extern" => {
if (!location.isInitializer()) {
try writer.writeByte('(');
@@ -1362,47 +1214,46 @@ pub const DeclGen = struct {
}
try writer.writeByte('{');
- var empty = true;
- for (0..struct_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (struct_type.fieldIsComptime(ip, field_index)) continue;
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- if (!empty) try writer.writeByte(',');
- const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ var need_comma = false;
+ while (field_it.next()) |field_index| {
+ const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ if (need_comma) try writer.writeByte(',');
+ need_comma = true;
+ const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
+ .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
- try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
-
- empty = false;
+ try dg.renderValue(writer, Value.fromInterned(field_val), initializer_type);
}
try writer.writeByte('}');
},
.@"packed" => {
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
- const bit_offset_ty = try mod.intType(.unsigned, bits);
+ const bit_offset_ty = try zcu.intType(.unsigned, bits);
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
- for (0..struct_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ for (0..loaded_struct.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
eff_num_fields += 1;
}
if (eff_num_fields == 0) {
try writer.writeByte('(');
- try dg.renderValue(writer, ty, Value.undef, initializer_type);
+ try dg.renderUndefValue(writer, ty, initializer_type);
try writer.writeByte(')');
- } else if (ty.bitSize(mod) > 64) {
+ } else if (ty.bitSize(zcu) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
@@ -1413,12 +1264,12 @@ pub const DeclGen = struct {
var eff_index: usize = 0;
var needs_closing_paren = false;
- for (0..struct_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ for (0..loaded_struct.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
- const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
+ .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
@@ -1432,8 +1283,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
try writer.writeAll(", ");
- const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
- try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
@@ -1442,7 +1292,7 @@ pub const DeclGen = struct {
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
- bit_offset += field_ty.bitSize(mod);
+ bit_offset += field_ty.bitSize(zcu);
needs_closing_paren = true;
eff_index += 1;
}
@@ -1450,17 +1300,17 @@ pub const DeclGen = struct {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
- for (0..struct_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ for (0..loaded_struct.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
- const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
+ .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
@@ -1469,15 +1319,14 @@ pub const DeclGen = struct {
};
if (bit_offset != 0) {
- try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
+ try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
- const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
- try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
} else {
- try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
+ try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
}
- bit_offset += field_ty.bitSize(mod);
+ bit_offset += field_ty.bitSize(zcu);
empty = false;
}
try writer.writeByte(')');
@@ -1488,30 +1337,30 @@ pub const DeclGen = struct {
else => unreachable,
},
.un => |un| {
- const union_obj = mod.typeToUnion(ty).?;
+ const loaded_union = ip.loadUnionType(ty.toIntern());
if (un.tag == .none) {
- const backing_ty = try ty.unionBackingType(mod);
- switch (union_obj.getLayout(ip)) {
+ const backing_ty = try ty.unionBackingType(zcu);
+ switch (loaded_union.getLayout(ip)) {
.@"packed" => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, backing_ty);
try writer.writeByte(')');
}
- try dg.renderValue(writer, backing_ty, Value.fromInterned(un.val), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(un.val), initializer_type);
},
.@"extern" => {
if (location == .StaticInitializer) {
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
}
- const ptr_ty = try mod.singleConstPtrType(ty);
+ const ptr_ty = try zcu.singleConstPtrType(ty);
try writer.writeAll("*((");
try dg.renderType(writer, ptr_ty);
try writer.writeAll(")(");
try dg.renderType(writer, backing_ty);
try writer.writeAll("){");
- try dg.renderValue(writer, backing_ty, Value.fromInterned(un.val), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(un.val), initializer_type);
try writer.writeAll("})");
},
else => unreachable,
@@ -1523,21 +1372,21 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
- const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
- if (union_obj.getLayout(ip) == .@"packed") {
- if (field_ty.hasRuntimeBits(mod)) {
- if (field_ty.isPtrAtRuntime(mod)) {
+ const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?;
+ const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
+ const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
+ if (loaded_union.getLayout(ip) == .@"packed") {
+ if (field_ty.hasRuntimeBits(zcu)) {
+ if (field_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
- } else if (field_ty.zigTypeTag(mod) == .Float) {
+ } else if (field_ty.zigTypeTag(zcu) == .Float) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
- try dg.renderValue(writer, field_ty, Value.fromInterned(un.val), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(un.val), initializer_type);
} else {
try writer.writeAll("0");
}
@@ -1545,33 +1394,291 @@ pub const DeclGen = struct {
}
try writer.writeByte('{');
- if (ty.unionTagTypeSafety(mod)) |tag_ty| {
- const layout = mod.getUnionLayout(union_obj);
+ if (ty.unionTagTypeSafety(zcu)) |_| {
+ const layout = zcu.getUnionLayout(loaded_union);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
- try dg.renderValue(writer, tag_ty, Value.fromInterned(un.tag), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(un.tag), initializer_type);
}
- if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
+ if (ty.unionHasAllZeroBitFieldTypes(zcu)) return try writer.writeByte('}');
if (layout.tag_size != 0) try writer.writeByte(',');
try writer.writeAll(" .payload = {");
}
- if (field_ty.hasRuntimeBits(mod)) {
+ if (field_ty.hasRuntimeBits(zcu)) {
try writer.print(" .{ } = ", .{fmtIdent(ip.stringToSlice(field_name))});
- try dg.renderValue(writer, field_ty, Value.fromInterned(un.val), initializer_type);
+ try dg.renderValue(writer, Value.fromInterned(un.val), initializer_type);
try writer.writeByte(' ');
- } else for (0..union_obj.field_types.len) |this_field_index| {
- const this_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[this_field_index]);
- if (!this_field_ty.hasRuntimeBits(mod)) continue;
- try dg.renderValue(writer, this_field_ty, Value.undef, initializer_type);
+ } else for (0..loaded_union.field_types.len) |this_field_index| {
+ const this_field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[this_field_index]);
+ if (!this_field_ty.hasRuntimeBits(zcu)) continue;
+ try dg.renderUndefValue(writer, this_field_ty, initializer_type);
break;
}
- if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
+ if (ty.unionTagTypeSafety(zcu)) |_| try writer.writeByte('}');
try writer.writeByte('}');
}
},
}
}
+ fn renderUndefValue(
+ dg: *DeclGen,
+ writer: anytype,
+ ty: Type,
+ location: ValueRenderLocation,
+ ) error{ OutOfMemory, AnalysisFail }!void {
+ const zcu = dg.zcu;
+ const ip = &zcu.intern_pool;
+ const target = &dg.mod.resolved_target.result;
+
+ const initializer_type: ValueRenderLocation = switch (location) {
+ .StaticInitializer => .StaticInitializer,
+ else => .Initializer,
+ };
+
+ const safety_on = switch (zcu.optimizeMode()) {
+ .Debug, .ReleaseSafe => true,
+ .ReleaseFast, .ReleaseSmall => false,
+ };
+
+ switch (ty.toIntern()) {
+ .c_longdouble_type,
+ .f16_type,
+ .f32_type,
+ .f64_type,
+ .f80_type,
+ .f128_type,
+ => {
+ const bits = ty.floatBits(target.*);
+ // All unsigned ints matching float types are pre-allocated.
+ const repr_ty = zcu.intType(.unsigned, bits) catch unreachable;
+
+ try writer.writeAll("zig_make_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ switch (bits) {
+ 16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}),
+ 32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}),
+ 64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}),
+ 80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}),
+ 128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}),
+ else => unreachable,
+ }
+ try writer.writeAll(", ");
+ try dg.renderUndefValue(writer, repr_ty, .FunctionArgument);
+ return writer.writeByte(')');
+ },
+ .bool_type => try writer.writeAll(if (safety_on) "0xaa" else "false"),
+ else => switch (ip.indexToKey(ty.toIntern())) {
+ .simple_type,
+ .int_type,
+ .enum_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => return writer.print("{x}", .{
+ try dg.fmtIntLiteral(try zcu.undefValue(ty), location),
+ }),
+ .ptr_type => if (ty.isSlice(zcu)) {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeAll("{(");
+ const ptr_ty = ty.slicePtrFieldType(zcu);
+ try dg.renderType(writer, ptr_ty);
+ return writer.print("){x}, {0x}}}", .{
+ try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other),
+ });
+ } else {
+ try writer.writeAll("((");
+ try dg.renderType(writer, ty);
+ return writer.print("){x})", .{
+ try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other),
+ });
+ },
+ .opt_type => {
+ const payload_ty = ty.optionalChild(zcu);
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ return dg.renderUndefValue(writer, Type.bool, location);
+ }
+
+ if (ty.optionalReprIsPayload(zcu)) {
+ return dg.renderUndefValue(writer, payload_ty, location);
+ }
+
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeAll("{ .payload = ");
+ try dg.renderUndefValue(writer, payload_ty, initializer_type);
+ try writer.writeAll(", .is_null = ");
+ try dg.renderUndefValue(writer, Type.bool, initializer_type);
+ return writer.writeAll(" }");
+ },
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(ty.toIntern());
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeByte('{');
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ var need_comma = false;
+ while (field_it.next()) |field_index| {
+ const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ if (need_comma) try writer.writeByte(',');
+ need_comma = true;
+ try dg.renderUndefValue(writer, field_ty, initializer_type);
+ }
+ return writer.writeByte('}');
+ },
+ .@"packed" => return writer.print("{x}", .{
+ try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other),
+ }),
+ }
+ },
+ .anon_struct_type => |anon_struct_info| {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeByte('{');
+ var need_comma = false;
+ for (0..anon_struct_info.types.len) |field_index| {
+ if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
+ const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ if (need_comma) try writer.writeByte(',');
+ need_comma = true;
+ try dg.renderUndefValue(writer, field_ty, initializer_type);
+ }
+ return writer.writeByte('}');
+ },
+ .union_type => {
+ const loaded_union = ip.loadUnionType(ty.toIntern());
+ switch (loaded_union.getLayout(ip)) {
+ .auto, .@"extern" => {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeByte('{');
+ if (ty.unionTagTypeSafety(zcu)) |tag_ty| {
+ const layout = ty.unionGetLayout(zcu);
+ if (layout.tag_size != 0) {
+ try writer.writeAll(" .tag = ");
+ try dg.renderUndefValue(writer, tag_ty, initializer_type);
+ }
+ if (ty.unionHasAllZeroBitFieldTypes(zcu)) return try writer.writeByte('}');
+ if (layout.tag_size != 0) try writer.writeByte(',');
+ try writer.writeAll(" .payload = {");
+ }
+ for (0..loaded_union.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBits(zcu)) continue;
+ try dg.renderUndefValue(writer, field_ty, initializer_type);
+ break;
+ }
+ if (ty.unionTagTypeSafety(zcu)) |_| try writer.writeByte('}');
+ return writer.writeByte('}');
+ },
+ .@"packed" => return writer.print("{x}", .{
+ try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other),
+ }),
+ }
+ },
+ .error_union_type => {
+ const payload_ty = ty.errorUnionPayload(zcu);
+ const error_ty = ty.errorUnionSet(zcu);
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ return dg.renderUndefValue(writer, error_ty, location);
+ }
+
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeAll("{ .payload = ");
+ try dg.renderUndefValue(writer, payload_ty, initializer_type);
+ try writer.writeAll(", .error = ");
+ try dg.renderUndefValue(writer, error_ty, initializer_type);
+ return writer.writeAll(" }");
+ },
+ .array_type, .vector_type => {
+ const ai = ty.arrayInfo(zcu);
+ if (ai.elem_type.eql(Type.u8, zcu)) {
+ const c_len = ty.arrayLenIncludingSentinel(zcu);
+ var literal = stringLiteral(writer, c_len);
+ try literal.start();
+ var index: u64 = 0;
+ while (index < c_len) : (index += 1)
+ try literal.writeChar(0xaa);
+ return literal.end();
+ } else {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeByte('{');
+ const c_len = ty.arrayLenIncludingSentinel(zcu);
+ var index: u64 = 0;
+ while (index < c_len) : (index += 1) {
+ if (index > 0) try writer.writeAll(", ");
+ try dg.renderUndefValue(writer, ty.childType(zcu), initializer_type);
+ }
+ return writer.writeByte('}');
+ }
+ },
+ .anyframe_type,
+ .opaque_type,
+ .func_type,
+ => unreachable,
+
+ .undef,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .slice,
+ .opt,
+ .aggregate,
+ .un,
+ .memoized_call,
+ => unreachable,
+ },
+ }
+ }
+
fn renderFunctionSignature(
dg: *DeclGen,
w: anytype,
@@ -1582,15 +1689,14 @@ pub const DeclGen = struct {
ident: []const u8,
},
) !void {
- const store = &dg.ctypes.set;
- const mod = dg.module;
- const ip = &mod.intern_pool;
+ const zcu = dg.zcu;
+ const ip = &zcu.intern_pool;
- const fn_decl = mod.declPtr(fn_decl_index);
- const fn_ty = fn_decl.typeOf(mod);
- const fn_cty_idx = try dg.typeToIndex(fn_ty, kind);
+ const fn_decl = zcu.declPtr(fn_decl_index);
+ const fn_ty = fn_decl.typeOf(zcu);
+ const fn_ctype = try dg.ctypeFromType(fn_ty, kind);
- const fn_info = mod.typeToFunc(fn_ty).?;
+ const fn_info = zcu.typeToFunc(fn_ty).?;
if (fn_info.cc == .Naked) {
switch (kind) {
.forward => try w.writeAll("zig_naked_decl "),
@@ -1598,11 +1704,11 @@ pub const DeclGen = struct {
else => unreachable,
}
}
- if (fn_decl.val.getFunction(mod)) |func| if (func.analysis(ip).is_cold)
+ if (fn_decl.val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold)
try w.writeAll("zig_cold ");
if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
- var trailing = try renderTypePrefix(dg.pass, store.*, mod, w, fn_cty_idx, .suffix, .{});
+ var trailing = try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, fn_ctype, .suffix, .{});
if (toCallingConvention(fn_info.cc)) |call_conv| {
try w.print("{}zig_callconv({s})", .{ trailing, call_conv });
@@ -1611,7 +1717,7 @@ pub const DeclGen = struct {
switch (kind) {
.forward => {},
- .complete => if (fn_decl.alignment.toByteUnitsOptional()) |a| {
+ .complete => if (fn_decl.alignment.toByteUnits()) |a| {
try w.print("{}zig_align_fn({})", .{ trailing, a });
trailing = .maybe_space;
},
@@ -1628,10 +1734,10 @@ pub const DeclGen = struct {
try renderTypeSuffix(
dg.pass,
- store.*,
- mod,
+ &dg.ctype_pool,
+ zcu,
w,
- fn_cty_idx,
+ fn_ctype,
.suffix,
CQualifiers.init(.{ .@"const" = switch (kind) {
.forward => false,
@@ -1642,16 +1748,16 @@ pub const DeclGen = struct {
switch (kind) {
.forward => {
- if (fn_decl.alignment.toByteUnitsOptional()) |a| {
+ if (fn_decl.alignment.toByteUnits()) |a| {
try w.print(" zig_align_fn({})", .{a});
}
switch (name) {
.export_index => |export_index| mangled: {
- const maybe_exports = mod.decl_exports.get(fn_decl_index);
+ const maybe_exports = zcu.decl_exports.get(fn_decl_index);
const external_name = ip.stringToSlice(
if (maybe_exports) |exports|
exports.items[export_index].opts.name
- else if (fn_decl.isExtern(mod))
+ else if (fn_decl.isExtern(zcu))
fn_decl.name
else
break :mangled,
@@ -1689,20 +1795,13 @@ pub const DeclGen = struct {
}
}
- fn indexToCType(dg: *DeclGen, idx: CType.Index) CType {
- return dg.ctypes.indexToCType(idx);
+ fn ctypeFromType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType {
+ defer std.debug.assert(dg.scratch.items.len == 0);
+ return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.zcu, dg.mod, kind);
}
- fn typeToIndex(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType.Index {
- return dg.ctypes.typeToIndex(dg.gpa, ty, dg.module, kind);
- }
-
- fn typeToCType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType {
- return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind);
- }
-
- fn byteSize(dg: *DeclGen, cty: CType) u64 {
- return cty.byteSize(dg.ctypes.set, dg.module.getTarget());
+ fn byteSize(dg: *DeclGen, ctype: CType) u64 {
+ return ctype.byteSize(&dg.ctype_pool, dg.mod);
}
/// Renders a type as a single identifier, generating intermediate typedefs
@@ -1717,14 +1816,12 @@ pub const DeclGen = struct {
/// | `renderType` | "uint8_t *" | "uint8_t *[10]" |
///
fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void {
- try dg.renderCType(w, try dg.typeToIndex(t, .complete));
+ try dg.renderCType(w, try dg.ctypeFromType(t, .complete));
}
- fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void {
- const store = &dg.ctypes.set;
- const mod = dg.module;
- _ = try renderTypePrefix(dg.pass, store.*, mod, w, idx, .suffix, .{});
- try renderTypeSuffix(dg.pass, store.*, mod, w, idx, .suffix, .{});
+ fn renderCType(dg: *DeclGen, w: anytype, ctype: CType) error{ OutOfMemory, AnalysisFail }!void {
+ _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{});
+ try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{});
}
const IntCastContext = union(enum) {
@@ -1737,15 +1834,13 @@ pub const DeclGen = struct {
value: Value,
},
- pub fn writeValue(self: *const IntCastContext, dg: *DeclGen, w: anytype, value_ty: Type, location: ValueRenderLocation) !void {
+ pub fn writeValue(self: *const IntCastContext, dg: *DeclGen, w: anytype, location: ValueRenderLocation) !void {
switch (self.*) {
.c_value => |v| {
try v.f.writeCValue(w, v.value, location);
try v.v.elem(v.f, w);
},
- .value => |v| {
- try dg.renderValue(w, value_ty, v.value, location);
- },
+ .value => |v| try dg.renderValue(w, v.value, location),
}
}
};
@@ -1764,18 +1859,18 @@ pub const DeclGen = struct {
/// | > 64 bit integer | < 64 bit integer | zig_make_(0, src)
/// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src))
fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void {
- const mod = dg.module;
- const dest_bits = dest_ty.bitSize(mod);
- const dest_int_info = dest_ty.intInfo(mod);
+ const zcu = dg.zcu;
+ const dest_bits = dest_ty.bitSize(zcu);
+ const dest_int_info = dest_ty.intInfo(zcu);
- const src_is_ptr = src_ty.isPtrAtRuntime(mod);
+ const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) {
.unsigned => Type.usize,
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(mod);
- const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null;
+ const src_bits = src_eff_ty.bitSize(zcu);
+ const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
(toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or
@@ -1791,7 +1886,7 @@ pub const DeclGen = struct {
try dg.renderType(w, src_eff_ty);
try w.writeByte(')');
}
- try context.writeValue(dg, w, src_ty, location);
+ try context.writeValue(dg, w, location);
} else if (dest_bits <= 64 and src_bits > 64) {
assert(!src_is_ptr);
if (dest_bits < 64) {
@@ -1802,7 +1897,7 @@ pub const DeclGen = struct {
try w.writeAll("zig_lo_");
try dg.renderTypeForBuiltinFnName(w, src_eff_ty);
try w.writeByte('(');
- try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try context.writeValue(dg, w, .FunctionArgument);
try w.writeByte(')');
} else if (dest_bits > 64 and src_bits <= 64) {
try w.writeAll("zig_make_");
@@ -1813,7 +1908,7 @@ pub const DeclGen = struct {
try dg.renderType(w, src_eff_ty);
try w.writeByte(')');
}
- try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try context.writeValue(dg, w, .FunctionArgument);
try w.writeByte(')');
} else {
assert(!src_is_ptr);
@@ -1822,11 +1917,11 @@ pub const DeclGen = struct {
try w.writeAll("(zig_hi_");
try dg.renderTypeForBuiltinFnName(w, src_eff_ty);
try w.writeByte('(');
- try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try context.writeValue(dg, w, .FunctionArgument);
try w.writeAll("), zig_lo_");
try dg.renderTypeForBuiltinFnName(w, src_eff_ty);
try w.writeByte('(');
- try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try context.writeValue(dg, w, .FunctionArgument);
try w.writeAll("))");
}
}
@@ -1848,61 +1943,73 @@ pub const DeclGen = struct {
alignment: Alignment,
kind: CType.Kind,
) error{ OutOfMemory, AnalysisFail }!void {
- const mod = dg.module;
- const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod));
- try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas);
+ try dg.renderCTypeAndName(
+ w,
+ try dg.ctypeFromType(ty, kind),
+ name,
+ qualifiers,
+ CType.AlignAs.fromAlignment(.{
+ .@"align" = alignment,
+ .abi = ty.abiAlignment(dg.zcu),
+ }),
+ );
}
fn renderCTypeAndName(
dg: *DeclGen,
w: anytype,
- cty_idx: CType.Index,
+ ctype: CType,
name: CValue,
qualifiers: CQualifiers,
alignas: CType.AlignAs,
) error{ OutOfMemory, AnalysisFail }!void {
- const store = &dg.ctypes.set;
- const mod = dg.module;
-
switch (alignas.abiOrder()) {
.lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}),
.eq => {},
.gt => try w.print("zig_align({}) ", .{alignas.toByteUnits()}),
}
- const trailing = try renderTypePrefix(dg.pass, store.*, mod, w, cty_idx, .suffix, qualifiers);
- try w.print("{}", .{trailing});
- try dg.writeCValue(w, name);
- try renderTypeSuffix(dg.pass, store.*, mod, w, cty_idx, .suffix, .{});
+ try w.print("{}", .{
+ try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, qualifiers),
+ });
+ try dg.writeName(w, name);
+ try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{});
}
fn declIsGlobal(dg: *DeclGen, val: Value) bool {
- const mod = dg.module;
- return switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .variable => |variable| mod.decl_exports.contains(variable.decl),
+ const zcu = dg.zcu;
+ return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
+ .variable => |variable| zcu.decl_exports.contains(variable.decl),
.extern_func => true,
- .func => |func| mod.decl_exports.contains(func.owner_decl),
+ .func => |func| zcu.decl_exports.contains(func.owner_decl),
else => unreachable,
};
}
+ fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void {
+ switch (c_value) {
+ .new_local, .local => |i| try w.print("t{d}", .{i}),
+ .constant => |val| try renderAnonDeclName(w, val),
+ .decl => |decl| try dg.renderDeclName(w, decl, 0),
+ .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}),
+ else => unreachable,
+ }
+ }
+
fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void {
switch (c_value) {
- .none => unreachable,
- .local, .new_local => |i| return w.print("t{d}", .{i}),
- .local_ref => |i| return w.print("&t{d}", .{i}),
- .constant => |val| return renderAnonDeclName(w, val),
- .arg => |i| return w.print("a{d}", .{i}),
- .arg_array => |i| return dg.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }),
- .field => |i| return w.print("f{d}", .{i}),
- .decl => |decl| return dg.renderDeclName(w, decl, 0),
+ .none, .new_local, .local, .local_ref => unreachable,
+ .constant => |val| try renderAnonDeclName(w, val),
+ .arg, .arg_array => unreachable,
+ .field => |i| try w.print("f{d}", .{i}),
+ .decl => |decl| try dg.renderDeclName(w, decl, 0),
.decl_ref => |decl| {
try w.writeByte('&');
- return dg.renderDeclName(w, decl, 0);
+ try dg.renderDeclName(w, decl, 0);
},
- .undef => |ty| return dg.renderValue(w, ty, Value.undef, .Other),
- .identifier => |ident| return w.print("{ }", .{fmtIdent(ident)}),
- .payload_identifier => |ident| return w.print("{ }.{ }", .{
+ .undef => |ty| try dg.renderUndefValue(w, ty, .Other),
+ .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}),
+ .payload_identifier => |ident| try w.print("{ }.{ }", .{
fmtIdent("payload"),
fmtIdent(ident),
}),
@@ -1911,26 +2018,17 @@ pub const DeclGen = struct {
fn writeCValueDeref(dg: *DeclGen, w: anytype, c_value: CValue) !void {
switch (c_value) {
- .none => unreachable,
- .local, .new_local => |i| return w.print("(*t{d})", .{i}),
- .local_ref => |i| return w.print("t{d}", .{i}),
- .constant => unreachable,
- .arg => |i| return w.print("(*a{d})", .{i}),
- .arg_array => |i| {
- try w.writeAll("(*");
- try dg.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" });
- return w.writeByte(')');
- },
- .field => |i| return w.print("f{d}", .{i}),
+ .none, .new_local, .local, .local_ref, .constant, .arg, .arg_array => unreachable,
+ .field => |i| try w.print("f{d}", .{i}),
.decl => |decl| {
try w.writeAll("(*");
try dg.renderDeclName(w, decl, 0);
- return w.writeByte(')');
+ try w.writeByte(')');
},
- .decl_ref => |decl| return dg.renderDeclName(w, decl, 0),
+ .decl_ref => |decl| try dg.renderDeclName(w, decl, 0),
.undef => unreachable,
- .identifier => |ident| return w.print("(*{ })", .{fmtIdent(ident)}),
- .payload_identifier => |ident| return w.print("(*{ }.{ })", .{
+ .identifier => |ident| try w.print("(*{ })", .{fmtIdent(ident)}),
+ .payload_identifier => |ident| try w.print("(*{ }.{ })", .{
fmtIdent("payload"),
fmtIdent(ident),
}),
@@ -1950,12 +2048,12 @@ pub const DeclGen = struct {
fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void {
switch (c_value) {
- .none, .constant, .field, .undef => unreachable,
- .new_local, .local, .arg, .arg_array, .decl, .identifier, .payload_identifier => {
+ .none, .new_local, .local, .local_ref, .constant, .field, .undef, .arg, .arg_array => unreachable,
+ .decl, .identifier, .payload_identifier => {
try dg.writeCValue(writer, c_value);
try writer.writeAll("->");
},
- .local_ref, .decl_ref => {
+ .decl_ref => {
try dg.writeCValueDeref(writer, c_value);
try writer.writeByte('.');
},
@@ -1969,11 +2067,12 @@ pub const DeclGen = struct {
variable: InternPool.Key.Variable,
fwd_kind: enum { tentative, final },
) !void {
- const decl = dg.module.declPtr(decl_index);
+ const zcu = dg.zcu;
+ const decl = zcu.declPtr(decl_index);
const fwd = dg.fwdDeclWriter();
const is_global = variable.is_extern or dg.declIsGlobal(decl.val);
try fwd.writeAll(if (is_global) "zig_extern " else "static ");
- const maybe_exports = dg.module.decl_exports.get(decl_index);
+ const maybe_exports = zcu.decl_exports.get(decl_index);
const export_weak_linkage = if (maybe_exports) |exports|
exports.items[0].opts.linkage == .weak
else
@@ -1982,14 +2081,14 @@ pub const DeclGen = struct {
if (variable.is_threadlocal) try fwd.writeAll("zig_threadlocal ");
try dg.renderTypeAndName(
fwd,
- decl.typeOf(dg.module),
+ decl.typeOf(zcu),
.{ .decl = decl_index },
CQualifiers.init(.{ .@"const" = variable.is_const }),
decl.alignment,
.complete,
);
mangled: {
- const external_name = dg.module.intern_pool.stringToSlice(if (maybe_exports) |exports|
+ const external_name = zcu.intern_pool.stringToSlice(if (maybe_exports) |exports|
exports.items[0].opts.name
else if (variable.is_extern)
decl.name
@@ -2007,23 +2106,23 @@ pub const DeclGen = struct {
}
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex, export_index: u32) !void {
- const mod = dg.module;
- const decl = mod.declPtr(decl_index);
+ const zcu = dg.zcu;
+ const decl = zcu.declPtr(decl_index);
- if (mod.decl_exports.get(decl_index)) |exports| {
+ if (zcu.decl_exports.get(decl_index)) |exports| {
try writer.print("{ }", .{
- fmtIdent(mod.intern_pool.stringToSlice(exports.items[export_index].opts.name)),
+ fmtIdent(zcu.intern_pool.stringToSlice(exports.items[export_index].opts.name)),
});
- } else if (decl.getExternDecl(mod).unwrap()) |extern_decl_index| {
+ } else if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| {
try writer.print("{ }", .{
- fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(extern_decl_index).name)),
+ fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(extern_decl_index).name)),
});
} else {
// MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
// expand to 3x the length of its input, but let's cut it off at a much shorter limit.
var name: [100]u8 = undefined;
var name_stream = std.io.fixedBufferStream(&name);
- decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) {
+ decl.renderFullyQualifiedName(zcu, name_stream.writer()) catch |err| switch (err) {
error.NoSpaceLeft => {},
};
try writer.print("{}__{d}", .{
@@ -2033,77 +2132,71 @@ pub const DeclGen = struct {
}
}
- fn renderAnonDeclName(writer: anytype, anon_decl_val: InternPool.Index) !void {
- return writer.print("__anon_{d}", .{@intFromEnum(anon_decl_val)});
+ fn renderAnonDeclName(writer: anytype, anon_decl_val: Value) !void {
+ try writer.print("__anon_{d}", .{@intFromEnum(anon_decl_val.toIntern())});
}
fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void {
- try dg.renderCTypeForBuiltinFnName(writer, try dg.typeToCType(ty, .complete));
+ try dg.renderCTypeForBuiltinFnName(writer, try dg.ctypeFromType(ty, .complete));
}
- fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, cty: CType) !void {
- switch (cty.tag()) {
- else => try writer.print("{c}{d}", .{
- if (cty.isBool())
+ fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ctype: CType) !void {
+ switch (ctype.info(&dg.ctype_pool)) {
+ else => |ctype_info| try writer.print("{c}{d}", .{
+ if (ctype.isBool())
signAbbrev(.unsigned)
- else if (cty.isInteger())
- signAbbrev(cty.signedness(dg.module.getTarget()))
- else if (cty.isFloat())
+ else if (ctype.isInteger())
+ signAbbrev(ctype.signedness(dg.mod))
+ else if (ctype.isFloat())
@as(u8, 'f')
- else if (cty.isPointer())
+ else if (ctype_info == .pointer)
@as(u8, 'p')
else
- return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
- cty.tag(),
- }),
- if (cty.isFloat()) cty.floatActiveBits(dg.module.getTarget()) else dg.byteSize(cty) * 8,
+ return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for {s} type", .{@tagName(ctype_info)}),
+ if (ctype.isFloat()) ctype.floatActiveBits(dg.mod) else dg.byteSize(ctype) * 8,
}),
.array => try writer.writeAll("big"),
}
}
fn renderBuiltinInfo(dg: *DeclGen, writer: anytype, ty: Type, info: BuiltinInfo) !void {
- const cty = try dg.typeToCType(ty, .complete);
- const is_big = cty.tag() == .array;
-
+ const ctype = try dg.ctypeFromType(ty, .complete);
+ const is_big = ctype.info(&dg.ctype_pool) == .array;
switch (info) {
.none => if (!is_big) return,
.bits => {},
}
- const mod = dg.module;
- const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
+ const zcu = dg.zcu;
+ const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @as(u16, @intCast(ty.bitSize(mod))),
+ .bits = @as(u16, @intCast(ty.bitSize(zcu))),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
-
- const bits_ty = if (is_big) Type.u16 else Type.u8;
try writer.print(", {}", .{try dg.fmtIntLiteral(
- bits_ty,
- try mod.intValue(bits_ty, int_info.bits),
+ try zcu.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits),
.FunctionArgument,
)});
}
fn fmtIntLiteral(
dg: *DeclGen,
- ty: Type,
val: Value,
loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
- const mod = dg.module;
+ const zcu = dg.zcu;
const kind: CType.Kind = switch (loc) {
.FunctionArgument => .parameter,
.Initializer, .Other => .complete,
.StaticInitializer => .global,
};
+ const ty = val.typeOf(zcu);
return std.fmt.Formatter(formatIntLiteral){ .data = .{
.dg = dg,
- .int_info = ty.intInfo(mod),
+ .int_info = ty.intInfo(zcu),
.kind = kind,
- .cty = try dg.typeToCType(ty, kind),
+ .ctype = try dg.ctypeFromType(ty, kind),
.val = val,
} };
}
@@ -2132,122 +2225,74 @@ const RenderCTypeTrailing = enum {
}
}
};
-fn renderTypeName(
- mod: *Module,
+fn renderAlignedTypeName(w: anytype, ctype: CType) !void {
+ try w.print("anon__aligned_{d}", .{@intFromEnum(ctype.index)});
+}
+fn renderFwdDeclTypeName(
+ zcu: *Zcu,
w: anytype,
- idx: CType.Index,
- cty: CType,
+ ctype: CType,
+ fwd_decl: CType.Info.FwdDecl,
attributes: []const u8,
) !void {
- switch (cty.tag()) {
- else => unreachable,
-
- .fwd_anon_struct,
- .fwd_anon_union,
- => |tag| try w.print("{s} {s}anon__lazy_{d}", .{
- @tagName(tag)["fwd_anon_".len..],
- attributes,
- idx,
+ try w.print("{s} {s}", .{ @tagName(fwd_decl.tag), attributes });
+ switch (fwd_decl.name) {
+ .anon => try w.print("anon__lazy_{d}", .{@intFromEnum(ctype.index)}),
+ .owner_decl => |owner_decl| try w.print("{}__{d}", .{
+ fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)),
+ @intFromEnum(owner_decl),
}),
-
- .fwd_struct,
- .fwd_union,
- => |tag| {
- const owner_decl = cty.cast(CType.Payload.FwdDecl).?.data;
- try w.print("{s} {s}{}__{d}", .{
- @tagName(tag)["fwd_".len..],
- attributes,
- fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)),
- @intFromEnum(owner_decl),
- });
- },
}
}
fn renderTypePrefix(
pass: DeclGen.Pass,
- store: CType.Store.Set,
- mod: *Module,
+ ctype_pool: *const CType.Pool,
+ zcu: *Zcu,
w: anytype,
- idx: CType.Index,
+ ctype: CType,
parent_fix: CTypeFix,
qualifiers: CQualifiers,
) @TypeOf(w).Error!RenderCTypeTrailing {
var trailing = RenderCTypeTrailing.maybe_space;
+ switch (ctype.info(ctype_pool)) {
+ .basic => |basic_info| try w.writeAll(@tagName(basic_info)),
- const cty = store.indexToCType(idx);
- switch (cty.tag()) {
- .void,
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- ._Bool,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => |tag| try w.writeAll(@tagName(tag)),
-
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => |tag| {
- const child_idx = cty.cast(CType.Payload.Child).?.data;
- const child_trailing = try renderTypePrefix(
+ .pointer => |pointer_info| {
+ try w.print("{}*", .{try renderTypePrefix(
pass,
- store,
- mod,
+ ctype_pool,
+ zcu,
w,
- child_idx,
+ pointer_info.elem_ctype,
.prefix,
- CQualifiers.init(.{ .@"const" = switch (tag) {
- .pointer, .pointer_volatile => false,
- .pointer_const, .pointer_const_volatile => true,
- else => unreachable,
- }, .@"volatile" = switch (tag) {
- .pointer, .pointer_const => false,
- .pointer_volatile, .pointer_const_volatile => true,
- else => unreachable,
- } }),
- );
- try w.print("{}*", .{child_trailing});
+ CQualifiers.init(.{
+ .@"const" = pointer_info.@"const",
+ .@"volatile" = pointer_info.@"volatile",
+ }),
+ )});
trailing = .no_space;
},
- .array,
- .vector,
- => {
- const child_idx = cty.cast(CType.Payload.Sequence).?.data.elem_type;
- const child_trailing =
- try renderTypePrefix(pass, store, mod, w, child_idx, .suffix, qualifiers);
+ .aligned => switch (pass) {
+ .decl => |decl_index| try w.print("decl__{d}_{d}", .{
+ @intFromEnum(decl_index), @intFromEnum(ctype.index),
+ }),
+ .anon => |anon_decl| try w.print("anon__{d}_{d}", .{
+ @intFromEnum(anon_decl), @intFromEnum(ctype.index),
+ }),
+ .flush => try renderAlignedTypeName(w, ctype),
+ },
+
+ .array, .vector => |sequence_info| {
+ const child_trailing = try renderTypePrefix(
+ pass,
+ ctype_pool,
+ zcu,
+ w,
+ sequence_info.elem_ctype,
+ .suffix,
+ qualifiers,
+ );
switch (parent_fix) {
.prefix => {
try w.print("{}(", .{child_trailing});
@@ -2257,56 +2302,46 @@ fn renderTypePrefix(
}
},
- .fwd_anon_struct,
- .fwd_anon_union,
- => switch (pass) {
- .decl => |decl_index| try w.print("decl__{d}_{d}", .{ @intFromEnum(decl_index), idx }),
- .anon => |anon_decl| try w.print("anon__{d}_{d}", .{ @intFromEnum(anon_decl), idx }),
- .flush => try renderTypeName(mod, w, idx, cty, ""),
+ .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) {
+ .anon => switch (pass) {
+ .decl => |decl_index| try w.print("decl__{d}_{d}", .{
+ @intFromEnum(decl_index), @intFromEnum(ctype.index),
+ }),
+ .anon => |anon_decl| try w.print("anon__{d}_{d}", .{
+ @intFromEnum(anon_decl), @intFromEnum(ctype.index),
+ }),
+ .flush => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""),
+ },
+ .owner_decl => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""),
},
- .fwd_struct,
- .fwd_union,
- => try renderTypeName(mod, w, idx, cty, ""),
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => |tag| {
- try w.print("{s} {s}", .{
- @tagName(tag)["unnamed_".len..],
- if (cty.isPacked()) "zig_packed(" else "",
- });
- try renderAggregateFields(mod, w, store, cty, 1);
- if (cty.isPacked()) try w.writeByte(')');
+ .aggregate => |aggregate_info| switch (aggregate_info.name) {
+ .anon => {
+ try w.print("{s} {s}", .{
+ @tagName(aggregate_info.tag),
+ if (aggregate_info.@"packed") "zig_packed(" else "",
+ });
+ try renderFields(zcu, w, ctype_pool, aggregate_info, 1);
+ if (aggregate_info.@"packed") try w.writeByte(')');
+ },
+ .fwd_decl => |fwd_decl| return renderTypePrefix(
+ pass,
+ ctype_pool,
+ zcu,
+ w,
+ fwd_decl,
+ parent_fix,
+ qualifiers,
+ ),
},
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => return renderTypePrefix(
- pass,
- store,
- mod,
- w,
- cty.cast(CType.Payload.Aggregate).?.data.fwd_decl,
- parent_fix,
- qualifiers,
- ),
-
- .function,
- .varargs_function,
- => {
+ .function => |function_info| {
const child_trailing = try renderTypePrefix(
pass,
- store,
- mod,
+ ctype_pool,
+ zcu,
w,
- cty.cast(CType.Payload.Function).?.data.return_type,
+ function_info.return_ctype,
.suffix,
.{},
);
@@ -2319,170 +2354,107 @@ fn renderTypePrefix(
}
},
}
-
var qualifier_it = qualifiers.iterator();
while (qualifier_it.next()) |qualifier| {
try w.print("{}{s}", .{ trailing, @tagName(qualifier) });
trailing = .maybe_space;
}
-
return trailing;
}
fn renderTypeSuffix(
pass: DeclGen.Pass,
- store: CType.Store.Set,
- mod: *Module,
+ ctype_pool: *const CType.Pool,
+ zcu: *Zcu,
w: anytype,
- idx: CType.Index,
+ ctype: CType,
parent_fix: CTypeFix,
qualifiers: CQualifiers,
) @TypeOf(w).Error!void {
- const cty = store.indexToCType(idx);
- switch (cty.tag()) {
- .void,
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- ._Bool,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => {},
-
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => try renderTypeSuffix(
+ switch (ctype.info(ctype_pool)) {
+ .basic, .aligned, .fwd_decl, .aggregate => {},
+ .pointer => |pointer_info| try renderTypeSuffix(
pass,
- store,
- mod,
+ ctype_pool,
+ zcu,
w,
- cty.cast(CType.Payload.Child).?.data,
+ pointer_info.elem_ctype,
.prefix,
.{},
),
-
- .array,
- .vector,
- => {
+ .array, .vector => |sequence_info| {
switch (parent_fix) {
.prefix => try w.writeByte(')'),
.suffix => {},
}
- try w.print("[{}]", .{cty.cast(CType.Payload.Sequence).?.data.len});
- try renderTypeSuffix(
- pass,
- store,
- mod,
- w,
- cty.cast(CType.Payload.Sequence).?.data.elem_type,
- .suffix,
- .{},
- );
+ try w.print("[{}]", .{sequence_info.len});
+ try renderTypeSuffix(pass, ctype_pool, zcu, w, sequence_info.elem_ctype, .suffix, .{});
},
-
- .fwd_anon_struct,
- .fwd_anon_union,
- .fwd_struct,
- .fwd_union,
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => {},
-
- .function,
- .varargs_function,
- => |tag| {
+ .function => |function_info| {
switch (parent_fix) {
.prefix => try w.writeByte(')'),
.suffix => {},
}
- const data = cty.cast(CType.Payload.Function).?.data;
-
try w.writeByte('(');
var need_comma = false;
- for (data.param_types, 0..) |param_type, param_i| {
+ for (0..function_info.param_ctypes.len) |param_index| {
+ const param_type = function_info.param_ctypes.at(param_index, ctype_pool);
if (need_comma) try w.writeAll(", ");
need_comma = true;
const trailing =
- try renderTypePrefix(pass, store, mod, w, param_type, .suffix, qualifiers);
- if (qualifiers.contains(.@"const")) try w.print("{}a{d}", .{ trailing, param_i });
- try renderTypeSuffix(pass, store, mod, w, param_type, .suffix, .{});
+ try renderTypePrefix(pass, ctype_pool, zcu, w, param_type, .suffix, qualifiers);
+ if (qualifiers.contains(.@"const")) try w.print("{}a{d}", .{ trailing, param_index });
+ try renderTypeSuffix(pass, ctype_pool, zcu, w, param_type, .suffix, .{});
}
- switch (tag) {
- .function => {},
- .varargs_function => {
- if (need_comma) try w.writeAll(", ");
- need_comma = true;
- try w.writeAll("...");
- },
- else => unreachable,
+ if (function_info.varargs) {
+ if (need_comma) try w.writeAll(", ");
+ need_comma = true;
+ try w.writeAll("...");
}
if (!need_comma) try w.writeAll("void");
try w.writeByte(')');
- try renderTypeSuffix(pass, store, mod, w, data.return_type, .suffix, .{});
+ try renderTypeSuffix(pass, ctype_pool, zcu, w, function_info.return_ctype, .suffix, .{});
},
}
}
-fn renderAggregateFields(
- mod: *Module,
+fn renderFields(
+ zcu: *Zcu,
writer: anytype,
- store: CType.Store.Set,
- cty: CType,
+ ctype_pool: *const CType.Pool,
+ aggregate_info: CType.Info.Aggregate,
indent: usize,
) !void {
try writer.writeAll("{\n");
- const fields = cty.fields();
- for (fields) |field| {
+ for (0..aggregate_info.fields.len) |field_index| {
+ const field_info = aggregate_info.fields.at(field_index, ctype_pool);
try writer.writeByteNTimes(' ', indent + 1);
- switch (field.alignas.abiOrder()) {
- .lt => try writer.print("zig_under_align({}) ", .{field.alignas.toByteUnits()}),
- .eq => {},
- .gt => try writer.print("zig_align({}) ", .{field.alignas.toByteUnits()}),
+ switch (field_info.alignas.abiOrder()) {
+ .lt => {
+ std.debug.assert(aggregate_info.@"packed");
+ if (field_info.alignas.@"align" != .@"1") try writer.print("zig_under_align({}) ", .{
+ field_info.alignas.toByteUnits(),
+ });
+ },
+ .eq => if (aggregate_info.@"packed" and field_info.alignas.@"align" != .@"1")
+ try writer.print("zig_align({}) ", .{field_info.alignas.toByteUnits()}),
+ .gt => {
+ std.debug.assert(field_info.alignas.@"align" != .@"1");
+ try writer.print("zig_align({}) ", .{field_info.alignas.toByteUnits()});
+ },
}
- const trailing = try renderTypePrefix(.flush, store, mod, writer, field.type, .suffix, .{});
- try writer.print("{}{ }", .{ trailing, fmtIdent(mem.span(field.name)) });
- try renderTypeSuffix(.flush, store, mod, writer, field.type, .suffix, .{});
+ const trailing = try renderTypePrefix(
+ .flush,
+ ctype_pool,
+ zcu,
+ writer,
+ field_info.ctype,
+ .suffix,
+ .{},
+ );
+ try writer.print("{}{ }", .{ trailing, fmtIdent(field_info.name.slice(ctype_pool)) });
+ try renderTypeSuffix(.flush, ctype_pool, zcu, writer, field_info.ctype, .suffix, .{});
try writer.writeAll(";\n");
}
try writer.writeByteNTimes(' ', indent);
@@ -2490,106 +2462,112 @@ fn renderAggregateFields(
}
pub fn genTypeDecl(
- mod: *Module,
+ zcu: *Zcu,
writer: anytype,
- global_store: CType.Store.Set,
- global_idx: CType.Index,
+ global_ctype_pool: *const CType.Pool,
+ global_ctype: CType,
pass: DeclGen.Pass,
- decl_store: CType.Store.Set,
- decl_idx: CType.Index,
+ decl_ctype_pool: *const CType.Pool,
+ decl_ctype: CType,
found_existing: bool,
) !void {
- const global_cty = global_store.indexToCType(global_idx);
- switch (global_cty.tag()) {
- .fwd_anon_struct => if (pass != .flush) {
- try writer.writeAll("typedef ");
- _ = try renderTypePrefix(.flush, global_store, mod, writer, global_idx, .suffix, .{});
- try writer.writeByte(' ');
- _ = try renderTypePrefix(pass, decl_store, mod, writer, decl_idx, .suffix, .{});
- try writer.writeAll(";\n");
- },
-
- .fwd_struct,
- .fwd_union,
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => |tag| if (!found_existing) {
- switch (tag) {
- .fwd_struct,
- .fwd_union,
- => {
- const owner_decl = global_cty.cast(CType.Payload.FwdDecl).?.data;
- _ = try renderTypePrefix(
- .flush,
- global_store,
- mod,
- writer,
- global_idx,
- .suffix,
- .{},
- );
- try writer.writeAll("; /* ");
- try mod.declPtr(owner_decl).renderFullyQualifiedName(mod, writer);
- try writer.writeAll(" */\n");
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => {
- const fwd_idx = global_cty.cast(CType.Payload.Aggregate).?.data.fwd_decl;
- try renderTypeName(
- mod,
- writer,
- fwd_idx,
- global_store.indexToCType(fwd_idx),
- if (global_cty.isPacked()) "zig_packed(" else "",
- );
+ switch (global_ctype.info(global_ctype_pool)) {
+ .basic, .pointer, .array, .vector, .function => {},
+ .aligned => |aligned_info| {
+ if (!found_existing) {
+ std.debug.assert(aligned_info.alignas.abiOrder().compare(.lt));
+ try writer.print("typedef zig_under_align({d}) ", .{aligned_info.alignas.toByteUnits()});
+ try writer.print("{}", .{try renderTypePrefix(
+ .flush,
+ global_ctype_pool,
+ zcu,
+ writer,
+ aligned_info.ctype,
+ .suffix,
+ .{},
+ )});
+ try renderAlignedTypeName(writer, global_ctype);
+ try renderTypeSuffix(.flush, global_ctype_pool, zcu, writer, aligned_info.ctype, .suffix, .{});
+ try writer.writeAll(";\n");
+ }
+ switch (pass) {
+ .decl, .anon => {
+ try writer.writeAll("typedef ");
+ _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
try writer.writeByte(' ');
- try renderAggregateFields(mod, writer, global_store, global_cty, 0);
- if (global_cty.isPacked()) try writer.writeByte(')');
+ _ = try renderTypePrefix(pass, decl_ctype_pool, zcu, writer, decl_ctype, .suffix, .{});
try writer.writeAll(";\n");
},
-
- else => unreachable,
+ .flush => {},
}
},
-
- else => {},
+ .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) {
+ .anon => switch (pass) {
+ .decl, .anon => {
+ try writer.writeAll("typedef ");
+ _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
+ try writer.writeByte(' ');
+ _ = try renderTypePrefix(pass, decl_ctype_pool, zcu, writer, decl_ctype, .suffix, .{});
+ try writer.writeAll(";\n");
+ },
+ .flush => {},
+ },
+ .owner_decl => |owner_decl_index| if (!found_existing) {
+ _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
+ try writer.writeByte(';');
+ const owner_decl = zcu.declPtr(owner_decl_index);
+ const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).file_scope.mod;
+ if (!owner_mod.strip) {
+ try writer.writeAll(" /* ");
+ try owner_decl.renderFullyQualifiedName(zcu, writer);
+ try writer.writeAll(" */");
+ }
+ try writer.writeByte('\n');
+ },
+ },
+ .aggregate => |aggregate_info| switch (aggregate_info.name) {
+ .anon => {},
+ .fwd_decl => |fwd_decl| if (!found_existing) {
+ try renderFwdDeclTypeName(
+ zcu,
+ writer,
+ fwd_decl,
+ fwd_decl.info(global_ctype_pool).fwd_decl,
+ if (aggregate_info.@"packed") "zig_packed(" else "",
+ );
+ try writer.writeByte(' ');
+ try renderFields(zcu, writer, global_ctype_pool, aggregate_info, 0);
+ if (aggregate_info.@"packed") try writer.writeByte(')');
+ try writer.writeAll(";\n");
+ },
+ },
}
}
-pub fn genGlobalAsm(mod: *Module, writer: anytype) !void {
- for (mod.global_assembly.values()) |asm_source| {
+pub fn genGlobalAsm(zcu: *Zcu, writer: anytype) !void {
+ for (zcu.global_assembly.values()) |asm_source| {
try writer.print("__asm({s});\n", .{fmtStringLiteral(asm_source, null)});
}
}
pub fn genErrDecls(o: *Object) !void {
- const mod = o.dg.module;
- const ip = &mod.intern_pool;
+ const zcu = o.dg.zcu;
+ const ip = &zcu.intern_pool;
const writer = o.writer();
var max_name_len: usize = 0;
// do not generate an invalid empty enum when the global error set is empty
- if (mod.global_error_set.keys().len > 1) {
+ if (zcu.global_error_set.keys().len > 1) {
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
- for (mod.global_error_set.keys()[1..], 1..) |name_nts, value| {
+ for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| {
const name = ip.stringToSlice(name_nts);
max_name_len = @max(name.len, max_name_len);
- const err_val = try mod.intern(.{ .err = .{
+ const err_val = try zcu.intern(.{ .err = .{
.ty = .anyerror_type,
.name = name_nts,
} });
- try o.dg.renderValue(writer, Type.anyerror, Value.fromInterned(err_val), .Other);
+ try o.dg.renderValue(writer, Value.fromInterned(err_val), .Other);
try writer.print(" = {d}u,\n", .{value});
}
o.indent_writer.popIndent();
@@ -2601,44 +2579,56 @@ pub fn genErrDecls(o: *Object) !void {
defer o.dg.gpa.free(name_buf);
@memcpy(name_buf[0..name_prefix.len], name_prefix);
- for (mod.global_error_set.keys()) |name_ip| {
+ for (zcu.global_error_set.keys()) |name_ip| {
const name = ip.stringToSlice(name_ip);
@memcpy(name_buf[name_prefix.len..][0..name.len], name);
const identifier = name_buf[0 .. name_prefix.len + name.len];
- const name_ty = try mod.arrayType(.{
+ const name_ty = try zcu.arrayType(.{
.len = name.len,
.child = .u8_type,
.sentinel = .zero_u8,
});
- const name_val = try mod.intern(.{ .aggregate = .{
+ const name_val = try zcu.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = name },
} });
try writer.writeAll("static ");
- try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, .none, .complete);
+ try o.dg.renderTypeAndName(
+ writer,
+ name_ty,
+ .{ .identifier = identifier },
+ Const,
+ .none,
+ .complete,
+ );
try writer.writeAll(" = ");
- try o.dg.renderValue(writer, name_ty, Value.fromInterned(name_val), .StaticInitializer);
+ try o.dg.renderValue(writer, Value.fromInterned(name_val), .StaticInitializer);
try writer.writeAll(";\n");
}
- const name_array_ty = try mod.arrayType(.{
- .len = mod.global_error_set.count(),
+ const name_array_ty = try zcu.arrayType(.{
+ .len = zcu.global_error_set.count(),
.child = .slice_const_u8_sentinel_0_type,
});
try writer.writeAll("static ");
- try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, .none, .complete);
+ try o.dg.renderTypeAndName(
+ writer,
+ name_array_ty,
+ .{ .identifier = array_identifier },
+ Const,
+ .none,
+ .complete,
+ );
try writer.writeAll(" = {");
- for (mod.global_error_set.keys(), 0..) |name_nts, value| {
+ for (zcu.global_error_set.keys(), 0..) |name_nts, value| {
const name = ip.stringToSlice(name_nts);
if (value != 0) try writer.writeByte(',');
-
- const len_val = try mod.intValue(Type.usize, name.len);
-
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
- fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .StaticInitializer),
+ fmtIdent(name),
+ try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, name.len), .StaticInitializer),
});
}
try writer.writeAll("};\n");
@@ -2648,16 +2638,16 @@ fn genExports(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
- const mod = o.dg.module;
- const ip = &mod.intern_pool;
+ const zcu = o.dg.zcu;
+ const ip = &zcu.intern_pool;
const decl_index = switch (o.dg.pass) {
.decl => |decl| decl,
.anon, .flush => return,
};
- const decl = mod.declPtr(decl_index);
+ const decl = zcu.declPtr(decl_index);
const fwd = o.dg.fwdDeclWriter();
- const exports = mod.decl_exports.get(decl_index) orelse return;
+ const exports = zcu.decl_exports.get(decl_index) orelse return;
if (exports.items.len < 2) return;
const is_variable_const = switch (ip.indexToKey(decl.val.toIntern())) {
@@ -2685,7 +2675,7 @@ fn genExports(o: *Object) !void {
const export_name = ip.stringToSlice(@"export".opts.name);
try o.dg.renderTypeAndName(
fwd,
- decl.typeOf(mod),
+ decl.typeOf(zcu),
.{ .identifier = export_name },
CQualifiers.init(.{ .@"const" = is_variable_const }),
decl.alignment,
@@ -2707,13 +2697,13 @@ fn genExports(o: *Object) !void {
}
}
-pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
- const mod = o.dg.module;
- const ip = &mod.intern_pool;
+pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void {
+ const zcu = o.dg.zcu;
+ const ip = &zcu.intern_pool;
+ const ctype_pool = &o.dg.ctype_pool;
const w = o.writer();
const key = lazy_fn.key_ptr.*;
const val = lazy_fn.value_ptr;
- const fn_name = val.fn_name;
switch (key) {
.tag_name => {
const enum_ty = val.data.tag_name;
@@ -2723,52 +2713,51 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.writeAll("static ");
try o.dg.renderType(w, name_slice_ty);
try w.writeByte(' ');
- try w.writeAll(fn_name);
+ try w.writeAll(val.fn_name.slice(lazy_ctype_pool));
try w.writeByte('(');
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, .none, .complete);
try w.writeAll(") {\n switch (tag) {\n");
- const tag_names = enum_ty.enumFields(mod);
+ const tag_names = enum_ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = ip.stringToSlice(tag_names.get(ip)[tag_index]);
- const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index));
+ const tag_val = try zcu.enumValueFieldIndex(enum_ty, @intCast(tag_index));
- const int_val = try tag_val.intFromEnum(enum_ty, mod);
-
- const name_ty = try mod.arrayType(.{
+ const name_ty = try zcu.arrayType(.{
.len = tag_name.len,
.child = .u8_type,
.sentinel = .zero_u8,
});
- const name_val = try mod.intern(.{ .aggregate = .{
+ const name_val = try zcu.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name },
} });
- const len_val = try mod.intValue(Type.usize, tag_name.len);
try w.print(" case {}: {{\n static ", .{
- try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
+ try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, zcu), .Other),
});
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
try w.writeAll(" = ");
- try o.dg.renderValue(w, name_ty, Value.fromInterned(name_val), .Initializer);
+ try o.dg.renderValue(w, Value.fromInterned(name_val), .Initializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
- fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
+ fmtIdent("name"),
+ try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, tag_name.len), .Other),
});
try w.writeAll(" }\n");
}
try w.writeAll(" }\n while (");
- try o.dg.renderValue(w, Type.bool, Value.true, .Other);
+ try o.dg.renderValue(w, Value.true, .Other);
try w.writeAll(") ");
_ = try airBreakpoint(w);
try w.writeAll("}\n");
},
.never_tail, .never_inline => |fn_decl_index| {
- const fn_decl = mod.declPtr(fn_decl_index);
- const fn_cty = try o.dg.typeToCType(fn_decl.typeOf(mod), .complete);
- const fn_info = fn_cty.cast(CType.Payload.Function).?.data;
+ const fn_decl = zcu.declPtr(fn_decl_index);
+ const fn_ctype = try o.dg.ctypeFromType(fn_decl.typeOf(zcu), .complete);
+ const fn_info = fn_ctype.info(ctype_pool).function;
+ const fn_name = val.fn_name.slice(lazy_ctype_pool);
const fwd_decl_writer = o.dg.fwdDeclWriter();
try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)});
@@ -2781,11 +2770,13 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try fwd_decl_writer.writeAll(";\n");
try w.print("static zig_{s} ", .{@tagName(key)});
- try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ .ident = fn_name });
+ try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{
+ .ident = fn_name,
+ });
try w.writeAll(" {\n return ");
try o.dg.renderDeclName(w, fn_decl_index, 0);
try w.writeByte('(');
- for (0..fn_info.param_types.len) |arg| {
+ for (0..fn_info.param_ctypes.len) |arg| {
if (arg > 0) try w.writeAll(", ");
try o.dg.writeCValue(w, .{ .arg = arg });
}
@@ -2799,10 +2790,10 @@ pub fn genFunc(f: *Function) !void {
defer tracy.end();
const o = &f.object;
- const mod = o.dg.module;
+ const zcu = o.dg.zcu;
const gpa = o.dg.gpa;
const decl_index = o.dg.pass.decl;
- const decl = mod.declPtr(decl_index);
+ const decl = zcu.declPtr(decl_index);
o.code_header = std.ArrayList(u8).init(gpa);
defer o.code_header.deinit();
@@ -2811,7 +2802,7 @@ pub fn genFunc(f: *Function) !void {
const fwd_decl_writer = o.dg.fwdDeclWriter();
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
- if (mod.decl_exports.get(decl_index)) |exports|
+ if (zcu.decl_exports.get(decl_index)) |exports|
if (exports.items[0].opts.linkage == .weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn ");
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 });
try fwd_decl_writer.writeAll(";\n");
@@ -2819,6 +2810,8 @@ pub fn genFunc(f: *Function) !void {
try o.indent_writer.insertNewline();
if (!is_global) try o.writer().writeAll("static ");
+ if (zcu.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
+ try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)});
try o.dg.renderFunctionSignature(o.writer(), decl_index, .complete, .{ .export_index = 0 });
try o.writer().writeByte(' ');
@@ -2867,7 +2860,7 @@ pub fn genFunc(f: *Function) !void {
for (free_locals.values()) |list| {
for (list.keys()) |local_index| {
const local = f.locals.items[local_index];
- try o.dg.renderCTypeAndName(w, local.cty_idx, .{ .local = local_index }, .{}, local.alignas);
+ try o.dg.renderCTypeAndName(w, local.ctype, .{ .local = local_index }, .{}, local.flags.alignas);
try w.writeAll(";\n ");
}
}
@@ -2884,43 +2877,41 @@ pub fn genDecl(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
- const mod = o.dg.module;
+ const zcu = o.dg.zcu;
const decl_index = o.dg.pass.decl;
- const decl = mod.declPtr(decl_index);
- const decl_val = decl.val;
- const decl_ty = decl_val.typeOf(mod);
+ const decl = zcu.declPtr(decl_index);
+ const decl_ty = decl.typeOf(zcu);
- if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
- if (decl_val.getExternFunc(mod)) |_| {
+ if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
+ if (decl.val.getExternFunc(zcu)) |_| {
const fwd_decl_writer = o.dg.fwdDeclWriter();
try fwd_decl_writer.writeAll("zig_extern ");
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 });
try fwd_decl_writer.writeAll(";\n");
try genExports(o);
- } else if (decl_val.getVariable(mod)) |variable| {
+ } else if (decl.val.getVariable(zcu)) |variable| {
try o.dg.renderFwdDecl(decl_index, variable, .final);
try genExports(o);
if (variable.is_extern) return;
- const is_global = variable.is_extern or o.dg.declIsGlobal(decl_val);
+ const is_global = variable.is_extern or o.dg.declIsGlobal(decl.val);
const w = o.writer();
if (!is_global) try w.writeAll("static ");
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
if (variable.is_threadlocal) try w.writeAll("zig_threadlocal ");
- if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
- try w.print("zig_linksection(\"{s}\", ", .{s});
+ if (zcu.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
+ try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)});
const decl_c_value = .{ .decl = decl_index };
try o.dg.renderTypeAndName(w, decl_ty, decl_c_value, .{}, decl.alignment, .complete);
- if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
try w.writeAll(" = ");
- try o.dg.renderValue(w, decl_ty, Value.fromInterned(variable.init), .StaticInitializer);
+ try o.dg.renderValue(w, Value.fromInterned(variable.init), .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
- const is_global = o.dg.module.decl_exports.contains(decl_index);
+ const is_global = o.dg.zcu.decl_exports.contains(decl_index);
const decl_c_value = .{ .decl = decl_index };
- try genDeclValue(o, decl_val, is_global, decl_c_value, decl.alignment, decl.@"linksection");
+ try genDeclValue(o, decl.val, is_global, decl_c_value, decl.alignment, decl.@"linksection");
}
}
@@ -2930,19 +2921,19 @@ pub fn genDeclValue(
is_global: bool,
decl_c_value: CValue,
alignment: Alignment,
- link_section: InternPool.OptionalNullTerminatedString,
+ @"linksection": InternPool.OptionalNullTerminatedString,
) !void {
- const mod = o.dg.module;
+ const zcu = o.dg.zcu;
const fwd_decl_writer = o.dg.fwdDeclWriter();
- const ty = val.typeOf(mod);
+ const ty = val.typeOf(zcu);
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
try o.dg.renderTypeAndName(fwd_decl_writer, ty, decl_c_value, Const, alignment, .complete);
switch (o.dg.pass) {
.decl => |decl_index| {
- if (mod.decl_exports.get(decl_index)) |exports| {
- const export_name = mod.intern_pool.stringToSlice(exports.items[0].opts.name);
+ if (zcu.decl_exports.get(decl_index)) |exports| {
+ const export_name = zcu.intern_pool.stringToSlice(exports.items[0].opts.name);
if (isMangledIdent(export_name, true)) {
try fwd_decl_writer.print(" zig_mangled_final({ }, {s})", .{
fmtIdent(export_name), fmtStringLiteral(export_name, null),
@@ -2958,13 +2949,11 @@ pub fn genDeclValue(
const w = o.writer();
if (!is_global) try w.writeAll("static ");
-
- if (mod.intern_pool.stringToSliceUnwrap(link_section)) |s|
- try w.print("zig_linksection(\"{s}\", ", .{s});
+ if (zcu.intern_pool.stringToSliceUnwrap(@"linksection")) |s|
+ try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)});
try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete);
- if (link_section != .none) try w.writeAll(", read)");
try w.writeAll(" = ");
- try o.dg.renderValue(w, ty, val, .StaticInitializer);
+ try o.dg.renderValue(w, val, .StaticInitializer);
try w.writeAll(";\n");
}
@@ -2972,12 +2961,12 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
const tracy = trace(@src());
defer tracy.end();
- const mod = dg.module;
+ const zcu = dg.zcu;
const decl_index = dg.pass.decl;
- const decl = mod.declPtr(decl_index);
+ const decl = zcu.declPtr(decl_index);
const writer = dg.fwdDeclWriter();
- switch (decl.val.typeOf(mod).zigTypeTag(mod)) {
+ switch (decl.typeOf(zcu).zigTypeTag(zcu)) {
.Fn => if (dg.declIsGlobal(decl.val)) {
try writer.writeAll("zig_extern ");
try dg.renderFunctionSignature(writer, dg.pass.decl, .complete, .{ .export_index = 0 });
@@ -3060,8 +3049,8 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
}
fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
- const mod = f.object.dg.module;
- const ip = &mod.intern_pool;
+ const zcu = f.object.dg.zcu;
+ const ip = &zcu.intern_pool;
const air_tags = f.air.instructions.items(.tag);
for (body) |inst| {
@@ -3096,10 +3085,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(mod);
+ const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(zcu);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
- break :blk if (lhs_scalar_ty.isInt(mod))
+ break :blk if (lhs_scalar_ty.isInt(zcu))
try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
@@ -3359,10 +3348,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
}
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3385,14 +3374,13 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType(mod);
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3407,7 +3395,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
if (elem_has_bits) try writer.writeByte('&');
- if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) {
+ if (elem_has_bits and ptr_ty.ptrSize(zcu) == .One) {
// It's a pointer to an array, so we need to de-reference.
try f.writeCValueDeref(writer, ptr);
} else try f.writeCValue(writer, ptr, .Other);
@@ -3421,10 +3409,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3447,14 +3435,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
const slice_ty = f.typeOf(bin_op.lhs);
- const elem_ty = slice_ty.elemType2(mod);
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const elem_ty = slice_ty.elemType2(zcu);
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
const slice = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3477,10 +3465,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3503,47 +3491,53 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const inst_ty = f.typeOfIndex(inst);
- const elem_type = inst_ty.childType(mod);
- if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
-
- const local = try f.allocLocalValue(
- elem_type,
- inst_ty.ptrAlignment(mod),
- );
+ const elem_ty = inst_ty.childType(zcu);
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
+
+ const local = try f.allocLocalValue(.{
+ .ctype = try f.ctypeFromType(elem_ty, .complete),
+ .alignas = CType.AlignAs.fromAlignment(.{
+ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
+ .abi = elem_ty.abiAlignment(zcu),
+ }),
+ });
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
- const gpa = f.object.dg.module.gpa;
+ const gpa = f.object.dg.zcu.gpa;
try f.allocs.put(gpa, local.new_local, true);
return .{ .local_ref = local.new_local };
}
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const inst_ty = f.typeOfIndex(inst);
- const elem_ty = inst_ty.childType(mod);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
-
- const local = try f.allocLocalValue(
- elem_ty,
- inst_ty.ptrAlignment(mod),
- );
+ const elem_ty = inst_ty.childType(zcu);
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
+
+ const local = try f.allocLocalValue(.{
+ .ctype = try f.ctypeFromType(elem_ty, .complete),
+ .alignas = CType.AlignAs.fromAlignment(.{
+ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
+ .abi = elem_ty.abiAlignment(zcu),
+ }),
+ });
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
- const gpa = f.object.dg.module.gpa;
+ const gpa = f.object.dg.zcu.gpa;
try f.allocs.put(gpa, local.new_local, true);
return .{ .local_ref = local.new_local };
}
fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
- const inst_cty = try f.typeToIndex(inst_ty, .parameter);
+ const inst_ctype = try f.ctypeFromType(inst_ty, .parameter);
const i = f.next_arg_index;
f.next_arg_index += 1;
- const result: CValue = if (inst_cty != try f.typeToIndex(inst_ty, .complete))
- .{ .arg_array = i }
+ const result: CValue = if (inst_ctype.eql(try f.ctypeFromType(inst_ty, .complete)))
+ .{ .arg = i }
else
- .{ .arg = i };
+ .{ .arg_array = i };
if (f.liveness.isUnused(inst)) {
const writer = f.object.writer();
@@ -3559,15 +3553,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = f.typeOf(ty_op.operand);
- const ptr_scalar_ty = ptr_ty.scalarType(mod);
- const ptr_info = ptr_scalar_ty.ptrInfo(mod);
+ const ptr_scalar_ty = ptr_ty.scalarType(zcu);
+ const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
const src_ty = Type.fromInterned(ptr_info.child);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
@@ -3577,10 +3571,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const is_aligned = if (ptr_info.flags.alignment != .none)
- ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
+ ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
- const is_array = lowersToArray(src_ty, mod);
+ const is_array = lowersToArray(src_ty, zcu);
const need_memcpy = !is_aligned or is_array;
const writer = f.object.writer();
@@ -3600,12 +3594,12 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("))");
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits: u16 = ptr_info.packed_offset.host_size * 8;
- const host_ty = try mod.intType(.unsigned, host_bits);
+ const host_ty = try zcu.intType(.unsigned, host_bits);
- const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
- const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
+ const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
+ const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const field_ty = try mod.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(mod))));
+ const field_ty = try zcu.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3616,9 +3610,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("((");
try f.renderType(writer, field_ty);
try writer.writeByte(')');
- const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64;
+ const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) {
- if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3628,7 +3622,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte('(');
try f.writeCValueDeref(writer, operand);
try v.elem(f, writer);
- try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
+ try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_val)});
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_ty, .bits);
try writer.writeByte(')');
@@ -3646,24 +3640,27 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const writer = f.object.writer();
const op_inst = un_op.toIndex();
const op_ty = f.typeOf(un_op);
- const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty;
- const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
+ const ret_ty = if (is_ptr) op_ty.childType(zcu) else op_ty;
+ const ret_ctype = try f.ctypeFromType(ret_ty, .parameter);
if (op_inst != null and f.air.instructions.items(.tag)[@intFromEnum(op_inst.?)] == .call_always_tail) {
try reap(f, inst, &.{un_op});
_ = try airCall(f, op_inst.?, .always_tail);
- } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (ret_ctype.index != .void) {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
- const is_array = lowersToArray(ret_ty, mod);
+ const is_array = lowersToArray(ret_ty, zcu);
const ret_val = if (is_array) ret_val: {
- const array_local = try f.allocLocal(inst, lowered_ret_ty);
+ const array_local = try f.allocAlignedLocal(inst, .{
+ .ctype = ret_ctype,
+ .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(f.object.dg.zcu)),
+ });
try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
try writer.writeAll(", ");
@@ -3696,16 +3693,16 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
}
fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const operand_ty = f.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3722,20 +3719,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
- const dest_int_info = inst_scalar_ty.intInfo(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
+ const dest_int_info = inst_scalar_ty.intInfo(zcu);
const dest_bits = dest_int_info.bits;
const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
const operand_ty = f.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType(mod);
- const scalar_int_info = scalar_ty.intInfo(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
+ const scalar_int_info = scalar_ty.intInfo(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3763,18 +3760,19 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try v.elem(f, writer);
} else switch (dest_int_info.signedness) {
.unsigned => {
- const mask_val = try inst_scalar_ty.maxIntScalar(mod, scalar_ty);
try writer.writeAll("zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
- try writer.print(", {x})", .{try f.fmtIntLiteral(scalar_ty, mask_val)});
+ try writer.print(", {x})", .{
+ try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(zcu, scalar_ty)),
+ });
},
.signed => {
const c_bits = toCIntBits(scalar_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- const shift_val = try mod.intValue(Type.u8, c_bits - dest_bits);
+ const shift_val = try zcu.intValue(Type.u8, c_bits - dest_bits);
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
@@ -3792,9 +3790,9 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
if (c_bits == 128) try writer.writeByte(')');
- try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
+ try writer.print(", {})", .{try f.fmtIntLiteral(shift_val)});
if (c_bits == 128) try writer.writeByte(')');
- try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
+ try writer.print(", {})", .{try f.fmtIntLiteral(shift_val)});
},
}
@@ -3821,18 +3819,18 @@ fn airIntFromBool(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
// *a = b;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = f.typeOf(bin_op.lhs);
- const ptr_scalar_ty = ptr_ty.scalarType(mod);
- const ptr_info = ptr_scalar_ty.ptrInfo(mod);
+ const ptr_scalar_ty = ptr_ty.scalarType(zcu);
+ const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.typeOf(bin_op.rhs);
- const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep(mod) else false;
+ const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |v| v.isUndefDeep(zcu) else false;
if (val_is_undef) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -3848,10 +3846,10 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
const is_aligned = if (ptr_info.flags.alignment != .none)
- ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
+ ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
- const is_array = lowersToArray(Type.fromInterned(ptr_info.child), mod);
+ const is_array = lowersToArray(Type.fromInterned(ptr_info.child), zcu);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -3863,7 +3861,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
- assert(src_ty.eql(Type.fromInterned(ptr_info.child), f.object.dg.module));
+ assert(src_ty.eql(Type.fromInterned(ptr_info.child), f.object.dg.zcu));
// If the source is a constant, writeCValue will emit a brace initialization
// so work around this by initializing into new local.
@@ -3893,12 +3891,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits = ptr_info.packed_offset.host_size * 8;
- const host_ty = try mod.intType(.unsigned, host_bits);
+ const host_ty = try zcu.intType(.unsigned, host_bits);
- const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
- const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
+ const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
+ const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const src_bits = src_ty.bitSize(mod);
+ const src_bits = src_ty.bitSize(zcu);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) =
@@ -3911,7 +3909,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
- const mask_val = try mod.intValue_big(host_ty, mask.toConst());
+ const mask_val = try zcu.intValue_big(host_ty, mask.toConst());
try f.writeCValueDeref(writer, ptr_val);
try v.elem(f, writer);
@@ -3922,12 +3920,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeByte('(');
try f.writeCValueDeref(writer, ptr_val);
try v.elem(f, writer);
- try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
+ try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
- const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64;
+ const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) {
- if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(0, ");
@@ -3937,7 +3935,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeByte(')');
}
- if (src_ty.isPtrAtRuntime(mod)) {
+ if (src_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
@@ -3945,7 +3943,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try f.writeCValue(writer, src_val, .Other);
try v.elem(f, writer);
if (cant_cast) try writer.writeByte(')');
- try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
+ try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_val)});
} else {
try f.writeCValueDeref(writer, ptr_val);
try v.elem(f, writer);
@@ -3960,7 +3958,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3970,7 +3968,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
const inst_ty = f.typeOfIndex(inst);
const operand_ty = f.typeOf(bin_op.lhs);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
const w = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3998,11 +3996,11 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = f.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType(mod);
- if (scalar_ty.ip_index != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits);
+ const scalar_ty = operand_ty.scalarType(zcu);
+ if (scalar_ty.toIntern() != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits);
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -4031,11 +4029,11 @@ fn airBinOp(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
- const scalar_ty = operand_ty.scalarType(mod);
- if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat())
+ const scalar_ty = operand_ty.scalarType(zcu);
+ if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4069,12 +4067,12 @@ fn airCmpOp(
data: anytype,
operator: std.math.CompareOperator,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const lhs_ty = f.typeOf(data.lhs);
- const scalar_ty = lhs_ty.scalarType(mod);
+ const scalar_ty = lhs_ty.scalarType(zcu);
- const scalar_bits = scalar_ty.bitSize(mod);
- if (scalar_ty.isInt(mod) and scalar_bits > 64)
+ const scalar_bits = scalar_ty.bitSize(zcu);
+ if (scalar_ty.isInt(zcu) and scalar_bits > 64)
return airCmpBuiltinCall(
f,
inst,
@@ -4092,7 +4090,7 @@ fn airCmpOp(
try reap(f, inst, &.{ data.lhs, data.rhs });
const rhs_ty = f.typeOf(data.rhs);
- const need_cast = lhs_ty.isSinglePointer(mod) or rhs_ty.isSinglePointer(mod);
+ const need_cast = lhs_ty.isSinglePointer(zcu) or rhs_ty.isSinglePointer(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
const v = try Vectorize.start(f, inst, writer, lhs_ty);
@@ -4117,12 +4115,12 @@ fn airEquality(
inst: Air.Inst.Index,
operator: std.math.CompareOperator,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
- const operand_bits = operand_ty.bitSize(mod);
- if (operand_ty.isInt(mod) and operand_bits > 64)
+ const operand_bits = operand_ty.bitSize(zcu);
+ if (operand_ty.isInt(zcu) and operand_bits > 64)
return airCmpBuiltinCall(
f,
inst,
@@ -4145,7 +4143,7 @@ fn airEquality(
try f.writeCValue(writer, local, .Other);
try a.assign(f, writer);
- if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) {
+ if (operand_ty.zigTypeTag(zcu) == .Optional and !operand_ty.optionalReprIsPayload(zcu)) {
try f.writeCValueMember(writer, lhs, .{ .identifier = "is_null" });
try writer.writeAll(" || ");
try f.writeCValueMember(writer, rhs, .{ .identifier = "is_null" });
@@ -4184,7 +4182,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4193,8 +4191,8 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
- const elem_ty = inst_scalar_ty.elemType2(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
+ const elem_ty = inst_scalar_ty.elemType2(zcu);
const local = try f.allocLocal(inst, inst_ty);
const writer = f.object.writer();
@@ -4203,7 +4201,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try v.elem(f, writer);
try writer.writeAll(" = ");
- if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// We must convert to and from integer types to prevent UB if the operation
// results in a NULL pointer, or if LHS is NULL. The operation is only UB
// if the result is NULL and then dereferenced.
@@ -4232,13 +4230,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
}
fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
- if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64)
+ if (inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64)
return try airBinBuiltinCall(f, inst, operation[1..], .none);
if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
@@ -4274,7 +4272,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
}
fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4283,7 +4281,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.typeOfIndex(inst);
- const ptr_ty = inst_ty.slicePtrFieldType(mod);
+ const ptr_ty = inst_ty.slicePtrFieldType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -4291,9 +4289,6 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, ptr_ty);
try f.writeCValueMember(writer, local, .{ .identifier = "ptr" });
try a.assign(f, writer);
- try writer.writeByte('(');
- try f.renderType(writer, ptr_ty);
- try writer.writeByte(')');
try f.writeCValue(writer, ptr, .Other);
try a.end(f, writer);
}
@@ -4301,7 +4296,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, Type.usize);
try f.writeCValueMember(writer, local, .{ .identifier = "len" });
try a.assign(f, writer);
- try f.writeCValue(writer, len, .Other);
+ try f.writeCValue(writer, len, .Initializer);
try a.end(f, writer);
}
return local;
@@ -4312,7 +4307,7 @@ fn airCall(
inst: Air.Inst.Index,
modifier: std.builtin.CallModifier,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
// Not even allowed to call panic in a naked function.
if (f.object.dg.is_naked_fn) return .none;
@@ -4327,22 +4322,23 @@ fn airCall(
defer gpa.free(resolved_args);
for (resolved_args, args) |*resolved_arg, arg| {
const arg_ty = f.typeOf(arg);
- const arg_cty = try f.typeToIndex(arg_ty, .parameter);
- if (f.indexToCType(arg_cty).tag() == .void) {
+ const arg_ctype = try f.ctypeFromType(arg_ty, .parameter);
+ if (arg_ctype.index == .void) {
resolved_arg.* = .none;
continue;
}
resolved_arg.* = try f.resolveInst(arg);
- if (arg_cty != try f.typeToIndex(arg_ty, .complete)) {
- const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod);
-
- const array_local = try f.allocLocal(inst, lowered_arg_ty);
+ if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) {
+ const array_local = try f.allocAlignedLocal(inst, .{
+ .ctype = arg_ctype,
+ .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)),
+ });
try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
try writer.writeAll(", ");
try f.writeCValue(writer, resolved_arg.*, .FunctionArgument);
try writer.writeAll(", sizeof(");
- try f.renderType(writer, lowered_arg_ty);
+ try f.renderCType(writer, arg_ctype);
try writer.writeAll("));\n");
resolved_arg.* = array_local;
}
@@ -4357,28 +4353,33 @@ fn airCall(
}
const callee_ty = f.typeOf(pl_op.operand);
- const fn_ty = switch (callee_ty.zigTypeTag(mod)) {
+ const fn_info = zcu.typeToFunc(switch (callee_ty.zigTypeTag(zcu)) {
.Fn => callee_ty,
- .Pointer => callee_ty.childType(mod),
+ .Pointer => callee_ty.childType(zcu),
else => unreachable,
- };
-
- const ret_ty = fn_ty.fnReturnType(mod);
- const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
+ }).?;
+ const ret_ty = Type.fromInterned(fn_info.return_type);
+ const ret_ctype: CType = if (ret_ty.isNoReturn(zcu))
+ .{ .index = .void }
+ else
+ try f.ctypeFromType(ret_ty, .parameter);
const result_local = result: {
if (modifier == .always_tail) {
try writer.writeAll("zig_always_tail return ");
break :result .none;
- } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (ret_ctype.index == .void) {
break :result .none;
} else if (f.liveness.isUnused(inst)) {
try writer.writeByte('(');
- try f.renderType(writer, Type.void);
+ try f.renderCType(writer, .{ .index = .void });
try writer.writeByte(')');
break :result .none;
} else {
- const local = try f.allocLocal(inst, lowered_ret_ty);
+ const local = try f.allocAlignedLocal(inst, .{
+ .ctype = ret_ctype,
+ .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
+ });
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
break :result local;
@@ -4388,8 +4389,8 @@ fn airCall(
callee: {
known: {
const fn_decl = fn_decl: {
- const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known;
- break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) {
+ const callee_val = (try f.air.value(pl_op.operand, zcu)) orelse break :known;
+ break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) {
.extern_func => |extern_func| extern_func.decl,
.func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
@@ -4420,18 +4421,21 @@ fn airCall(
}
try writer.writeByte('(');
- var args_written: usize = 0;
+ var need_comma = false;
for (resolved_args) |resolved_arg| {
if (resolved_arg == .none) continue;
- if (args_written != 0) try writer.writeAll(", ");
+ if (need_comma) try writer.writeAll(", ");
+ need_comma = true;
try f.writeCValue(writer, resolved_arg, .FunctionArgument);
- if (resolved_arg == .new_local) try freeLocal(f, inst, resolved_arg.new_local, null);
- args_written += 1;
+ switch (resolved_arg) {
+ .new_local => |local| try freeLocal(f, inst, local, null),
+ else => {},
+ }
}
try writer.writeAll(");\n");
const result = result: {
- if (result_local == .none or !lowersToArray(ret_ty, mod))
+ if (result_local == .none or !lowersToArray(ret_ty, zcu))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@@ -4465,22 +4469,22 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
- const owner_decl = mod.funcOwnerDeclPtr(extra.data.func);
+ const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func);
const writer = f.object.writer();
try writer.writeAll("/* ");
- try owner_decl.renderFullyQualifiedName(mod, writer);
+ try owner_decl.renderFullyQualifiedName(zcu, writer);
try writer.writeAll(" */ ");
return lowerBlock(f, inst, @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload);
- const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep(mod) else false;
+ const operand_is_undef = if (try f.air.value(pl_op.operand, zcu)) |v| v.isUndefDeep(zcu) else false;
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4496,7 +4500,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const liveness_block = f.liveness.getBlock(inst);
const block_id: usize = f.next_block_index;
@@ -4504,7 +4508,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst);
- const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !f.liveness.isUnused(inst))
+ const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst))
try f.allocLocal(inst, inst_ty)
else
.none;
@@ -4526,7 +4530,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
try f.object.indent_writer.insertNewline();
// noreturn blocks have no `br` instructions reaching them, so we don't want a label
- if (!f.typeOfIndex(inst).isNoReturn(mod)) {
+ if (!f.typeOfIndex(inst).isNoReturn(zcu)) {
// label must be followed by an expression, include an empty one.
try writer.print("zig_block_{d}:;\n", .{block_id});
}
@@ -4543,11 +4547,11 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.TryPtr, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]);
- const err_union_ty = f.typeOf(extra.data.ptr).childType(mod);
+ const err_union_ty = f.typeOf(extra.data.ptr).childType(zcu);
return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true);
}
@@ -4559,15 +4563,15 @@ fn lowerTry(
err_union_ty: Type,
is_ptr: bool,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const err_union = try f.resolveInst(operand);
const inst_ty = f.typeOfIndex(inst);
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
- const payload_ty = err_union_ty.errorUnionPayload(mod);
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
- if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
try writer.writeAll("if (");
if (!payload_has_bits) {
if (is_ptr)
@@ -4661,7 +4665,7 @@ const LocalResult = struct {
need_free: bool,
fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
if (lr.need_free) {
// Move the freshly allocated local to be owned by this instruction,
@@ -4673,7 +4677,7 @@ const LocalResult = struct {
try lr.free(f);
const writer = f.object.writer();
try f.writeCValue(writer, local, .Other);
- if (dest_ty.isAbiInt(mod)) {
+ if (dest_ty.isAbiInt(zcu)) {
try writer.writeAll(" = ");
} else {
try writer.writeAll(" = (");
@@ -4693,13 +4697,14 @@ const LocalResult = struct {
};
fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult {
- const mod = f.object.dg.module;
- const target = mod.getTarget();
+ const zcu = f.object.dg.zcu;
+ const target = &f.object.dg.mod.resolved_target.result;
+ const ctype_pool = &f.object.dg.ctype_pool;
const writer = f.object.writer();
- if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) {
- const src_info = dest_ty.intInfo(mod);
- const dest_info = operand_ty.intInfo(mod);
+ if (operand_ty.isAbiInt(zcu) and dest_ty.isAbiInt(zcu)) {
+ const src_info = dest_ty.intInfo(zcu);
+ const dest_info = operand_ty.intInfo(zcu);
if (src_info.signedness == dest_info.signedness and
src_info.bits == dest_info.bits)
{
@@ -4710,7 +4715,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
}
}
- if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) {
+ if (dest_ty.isPtrAtRuntime(zcu) and operand_ty.isPtrAtRuntime(zcu)) {
const local = try f.allocLocal(null, dest_ty);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = (");
@@ -4727,7 +4732,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
const operand_lval = if (operand == .constant) blk: {
const operand_local = try f.allocLocal(null, operand_ty);
try f.writeCValue(writer, operand_local, .Other);
- if (operand_ty.isAbiInt(mod)) {
+ if (operand_ty.isAbiInt(zcu)) {
try writer.writeAll(" = ");
} else {
try writer.writeAll(" = (");
@@ -4747,55 +4752,60 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
try writer.writeAll(", sizeof(");
try f.renderType(
writer,
- if (dest_ty.abiSize(mod) <= operand_ty.abiSize(mod)) dest_ty else operand_ty,
+ if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty,
);
try writer.writeAll("));\n");
// Ensure padding bits have the expected value.
- if (dest_ty.isAbiInt(mod)) {
- const dest_cty = try f.typeToCType(dest_ty, .complete);
- const dest_info = dest_ty.intInfo(mod);
+ if (dest_ty.isAbiInt(zcu)) {
+ const dest_ctype = try f.ctypeFromType(dest_ty, .complete);
+ const dest_info = dest_ty.intInfo(zcu);
var bits: u16 = dest_info.bits;
- var wrap_cty: ?CType = null;
+ var wrap_ctype: ?CType = null;
var need_bitcasts = false;
try f.writeCValue(writer, local, .Other);
- if (dest_cty.castTag(.array)) |pl| {
- try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
- .little => pl.data.len - 1,
- .big => 0,
- }});
- const elem_cty = f.indexToCType(pl.data.elem_type);
- wrap_cty = elem_cty.toSignedness(dest_info.signedness);
- need_bitcasts = wrap_cty.?.tag() == .zig_i128;
- bits -= 1;
- bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8));
- bits += 1;
+ switch (dest_ctype.info(ctype_pool)) {
+ else => {},
+ .array => |array_info| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .little => array_info.len - 1,
+ .big => 0,
+ }});
+ wrap_ctype = array_info.elem_ctype.toSignedness(dest_info.signedness);
+ need_bitcasts = wrap_ctype.?.index == .zig_i128;
+ bits -= 1;
+ bits %= @as(u16, @intCast(f.byteSize(array_info.elem_ctype) * 8));
+ bits += 1;
+ },
}
try writer.writeAll(" = ");
if (need_bitcasts) {
try writer.writeAll("zig_bitCast_");
- try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?.toUnsigned());
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_ctype.?.toUnsigned());
try writer.writeByte('(');
}
try writer.writeAll("zig_wrap_");
- const info_ty = try mod.intType(dest_info.signedness, bits);
- if (wrap_cty) |cty|
- try f.object.dg.renderCTypeForBuiltinFnName(writer, cty)
+ const info_ty = try zcu.intType(dest_info.signedness, bits);
+ if (wrap_ctype) |ctype|
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, ctype)
else
try f.object.dg.renderTypeForBuiltinFnName(writer, info_ty);
try writer.writeByte('(');
if (need_bitcasts) {
try writer.writeAll("zig_bitCast_");
- try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?);
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_ctype.?);
try writer.writeByte('(');
}
try f.writeCValue(writer, local, .Other);
- if (dest_cty.castTag(.array)) |pl| {
- try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
- .little => pl.data.len - 1,
- .big => 0,
- }});
+ switch (dest_ctype.info(ctype_pool)) {
+ else => {},
+ .array => |array_info| try writer.print("[{d}]", .{
+ switch (target.cpu.arch.endian()) {
+ .little => array_info.len - 1,
+ .big => 0,
+ },
+ }),
}
if (need_bitcasts) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, info_ty, .bits);
@@ -4912,7 +4922,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const condition = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4921,11 +4931,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
try writer.writeAll("switch (");
- if (condition_ty.zigTypeTag(mod) == .Bool) {
+ if (condition_ty.zigTypeTag(zcu) == .Bool) {
try writer.writeByte('(');
try f.renderType(writer, Type.u1);
try writer.writeByte(')');
- } else if (condition_ty.isPtrAtRuntime(mod)) {
+ } else if (condition_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
@@ -4952,12 +4962,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
for (items) |item| {
try f.object.indent_writer.insertNewline();
try writer.writeAll("case ");
- if (condition_ty.isPtrAtRuntime(mod)) {
+ if (condition_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
}
- try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other);
+ try f.object.dg.renderValue(writer, (try f.air.value(item, zcu)).?, .Other);
try writer.writeByte(':');
}
try writer.writeByte(' ');
@@ -4994,13 +5004,13 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool {
- const target = f.object.dg.module.getTarget();
+ const target = &f.object.dg.mod.resolved_target.result;
return switch (constraint[0]) {
'{' => true,
'i', 'r' => false,
'I' => !target.cpu.arch.isArmOrThumb(),
else => switch (value) {
- .constant => |val| switch (f.object.dg.module.intern_pool.indexToKey(val)) {
+ .constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => false,
else => true,
@@ -5013,7 +5023,7 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool
}
fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
@@ -5028,15 +5038,18 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const result = result: {
const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst);
- const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: {
- const local = try f.allocLocal(inst, inst_ty);
+ const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: {
+ const inst_local = try f.allocLocalValue(.{
+ .ctype = try f.ctypeFromType(inst_ty, .complete),
+ .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)),
+ });
if (f.wantSafety()) {
- try f.writeCValue(writer, local, .Other);
+ try f.writeCValue(writer, inst_local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, .{ .undef = inst_ty }, .Other);
try writer.writeAll(";\n");
}
- break :local local;
+ break :local inst_local;
} else .none;
const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len));
@@ -5057,12 +5070,14 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_reg = constraint[1] == '{';
if (is_reg) {
- const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod);
+ const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(zcu);
try writer.writeAll("register ");
- const alignment: Alignment = .none;
- const local_value = try f.allocLocalValue(output_ty, alignment);
- try f.allocs.put(gpa, local_value.new_local, false);
- try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete);
+ const output_local = try f.allocLocalValue(.{
+ .ctype = try f.ctypeFromType(output_ty, .complete),
+ .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)),
+ });
+ try f.allocs.put(gpa, output_local.new_local, false);
+ try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete);
try writer.writeAll(" __asm(\"");
try writer.writeAll(constraint["={".len .. constraint.len - "}".len]);
try writer.writeAll("\")");
@@ -5092,10 +5107,12 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (asmInputNeedsLocal(f, constraint, input_val)) {
const input_ty = f.typeOf(input);
if (is_reg) try writer.writeAll("register ");
- const alignment: Alignment = .none;
- const local_value = try f.allocLocalValue(input_ty, alignment);
- try f.allocs.put(gpa, local_value.new_local, false);
- try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete);
+ const input_local = try f.allocLocalValue(.{
+ .ctype = try f.ctypeFromType(input_ty, .complete),
+ .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)),
+ });
+ try f.allocs.put(gpa, input_local.new_local, false);
+ try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete);
if (is_reg) {
try writer.writeAll(" __asm(\"");
try writer.writeAll(constraint["{".len .. constraint.len - "}".len]);
@@ -5188,7 +5205,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, .{ .local = locals_index }, .Other);
locals_index += 1;
} else if (output == .none) {
- try f.writeCValue(writer, local, .FunctionArgument);
+ try f.writeCValue(writer, inst_local, .FunctionArgument);
} else {
try f.writeCValueDeref(writer, try f.resolveInst(output));
}
@@ -5244,7 +5261,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_reg = constraint[1] == '{';
if (is_reg) {
try f.writeCValueDeref(writer, if (output == .none)
- .{ .local_ref = local.new_local }
+ .{ .local_ref = inst_local.new_local }
else
try f.resolveInst(output));
try writer.writeAll(" = ");
@@ -5254,7 +5271,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
}
- break :result if (f.liveness.isUnused(inst)) .none else local;
+ break :result if (f.liveness.isUnused(inst)) .none else inst_local;
};
var bt = iterateBigTomb(f, inst);
@@ -5275,7 +5292,7 @@ fn airIsNull(
operator: []const u8,
is_ptr: bool,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const writer = f.object.writer();
@@ -5292,22 +5309,22 @@ fn airIsNull(
}
const operand_ty = f.typeOf(un_op);
- const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
- const payload_ty = optional_ty.optionalChild(mod);
- const err_int_ty = try mod.errorIntType();
+ const optional_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
+ const payload_ty = optional_ty.optionalChild(zcu);
+ const err_int_ty = try zcu.errorIntType();
- const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
Value.true
- else if (optional_ty.isPtrLikeOptional(mod))
+ else if (optional_ty.isPtrLikeOptional(zcu))
// operand is a regular pointer, test `operand !=/== NULL`
- try mod.getCoerced(Value.null, optional_ty)
- else if (payload_ty.zigTypeTag(mod) == .ErrorSet)
- try mod.intValue(err_int_ty, 0)
- else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: {
+ try zcu.getCoerced(Value.null, optional_ty)
+ else if (payload_ty.zigTypeTag(zcu) == .ErrorSet)
+ try zcu.intValue(err_int_ty, 0)
+ else if (payload_ty.isSlice(zcu) and optional_ty.optionalReprIsPayload(zcu)) rhs: {
try writer.writeAll(".ptr");
- const slice_ptr_ty = payload_ty.slicePtrFieldType(mod);
- const opt_slice_ptr_ty = try mod.optionalType(slice_ptr_ty.toIntern());
- break :rhs try mod.nullValue(opt_slice_ptr_ty);
+ const slice_ptr_ty = payload_ty.slicePtrFieldType(zcu);
+ const opt_slice_ptr_ty = try zcu.optionalType(slice_ptr_ty.toIntern());
+ break :rhs try zcu.nullValue(opt_slice_ptr_ty);
} else rhs: {
try writer.writeAll(".is_null");
break :rhs Value.true;
@@ -5315,22 +5332,22 @@ fn airIsNull(
try writer.writeByte(' ');
try writer.writeAll(operator);
try writer.writeByte(' ');
- try f.object.dg.renderValue(writer, rhs.typeOf(mod), rhs, .Other);
+ try f.object.dg.renderValue(writer, rhs, .Other);
try writer.writeAll(";\n");
return local;
}
fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const opt_ty = f.typeOf(ty_op.operand);
- const payload_ty = opt_ty.optionalChild(mod);
+ const payload_ty = opt_ty.optionalChild(zcu);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return .none;
}
@@ -5338,7 +5355,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- if (opt_ty.optionalReprIsPayload(mod)) {
+ if (opt_ty.optionalReprIsPayload(zcu)) {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, operand, .Other);
@@ -5355,24 +5372,24 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const ptr_ty = f.typeOf(ty_op.operand);
- const opt_ty = ptr_ty.childType(mod);
+ const opt_ty = ptr_ty.childType(zcu);
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!inst_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) {
return .{ .undef = inst_ty };
}
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
- if (opt_ty.optionalReprIsPayload(mod)) {
+ if (opt_ty.optionalReprIsPayload(zcu)) {
// the operand is just a regular pointer, no need to do anything special.
// *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C
try writer.writeAll(" = ");
@@ -5386,18 +5403,18 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const operand_ty = f.typeOf(ty_op.operand);
- const opt_ty = operand_ty.childType(mod);
+ const opt_ty = operand_ty.childType(zcu);
const inst_ty = f.typeOfIndex(inst);
- if (opt_ty.optionalReprIsPayload(mod)) {
+ if (opt_ty.optionalReprIsPayload(zcu)) {
if (f.liveness.isUnused(inst)) {
return .none;
}
@@ -5412,7 +5429,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
} else {
try f.writeCValueDeref(writer, operand);
try writer.writeAll(".is_null = ");
- try f.object.dg.renderValue(writer, Type.bool, Value.false, .Initializer);
+ try f.object.dg.renderValue(writer, Value.false, .Initializer);
try writer.writeAll(";\n");
if (f.liveness.isUnused(inst)) {
@@ -5432,67 +5449,82 @@ fn fieldLocation(
container_ptr_ty: Type,
field_ptr_ty: Type,
field_index: u32,
- mod: *Module,
+ zcu: *Zcu,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u32,
end: void,
} {
- const ip = &mod.intern_pool;
- const container_ty = container_ptr_ty.childType(mod);
- return switch (container_ty.zigTypeTag(mod)) {
- .Struct => blk: {
- if (mod.typeToPackedStruct(container_ty)) |struct_type| {
- if (field_ptr_ty.ptrInfo(mod).packed_offset.host_size == 0)
- break :blk .{ .byte_offset = @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + container_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8) }
+ const ip = &zcu.intern_pool;
+ const container_ty = Type.fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child);
+ switch (ip.indexToKey(container_ty.toIntern())) {
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(container_ty.toIntern());
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => {
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ var before = true;
+ while (field_it.next()) |next_field_index| {
+ if (next_field_index == field_index) before = false;
+ if (before) continue;
+ const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[next_field_index]);
+ if (!field_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ return .{ .field = if (loaded_struct.fieldName(ip, next_field_index).unwrap()) |field_name|
+ .{ .identifier = ip.stringToSlice(field_name) }
+ else
+ .{ .field = next_field_index } };
+ }
+ return if (container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .end else .begin;
+ },
+ .@"packed" => return if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
+ .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
+ container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
else
- break :blk .begin;
+ .begin,
}
-
- for (field_index..container_ty.structFieldCount(mod)) |next_field_index_usize| {
- const next_field_index: u32 = @intCast(next_field_index_usize);
- if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
- const field_ty = container_ty.structFieldType(next_field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- break :blk .{ .field = if (container_ty.isSimpleTuple(mod))
- .{ .field = next_field_index }
+ },
+ .anon_struct_type => |anon_struct_info| {
+ for (field_index..anon_struct_info.types.len) |next_field_index| {
+ if (anon_struct_info.values.get(ip)[next_field_index] != .none) continue;
+ const field_type = Type.fromInterned(anon_struct_info.types.get(ip)[next_field_index]);
+ if (!field_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ return .{ .field = if (anon_struct_info.fieldName(ip, next_field_index).unwrap()) |field_name|
+ .{ .identifier = ip.stringToSlice(field_name) }
else
- .{ .identifier = ip.stringToSlice(container_ty.legacyStructFieldName(next_field_index, mod)) } };
+ .{ .field = next_field_index } };
}
- break :blk if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin;
+ return if (container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .end else .begin;
},
- .Union => {
- const union_obj = mod.typeToUnion(container_ty).?;
- return switch (union_obj.getLayout(ip)) {
+ .union_type => {
+ const loaded_union = ip.loadUnionType(container_ty.toIntern());
+ switch (loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
- return if (container_ty.unionTagTypeSafety(mod) != null and
- !container_ty.unionHasAllZeroBitFieldTypes(mod))
+ const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu))
.{ .field = .{ .identifier = "payload" } }
else
.begin;
- const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
- return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_|
+ const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
+ return .{ .field = if (loaded_union.hasTag(ip))
.{ .payload_identifier = ip.stringToSlice(field_name) }
else
.{ .identifier = ip.stringToSlice(field_name) } };
},
- .@"packed" => .begin,
- };
+ .@"packed" => return .begin,
+ }
},
- .Pointer => switch (container_ty.ptrSize(mod)) {
+ .ptr_type => |ptr_info| switch (ptr_info.flags.size) {
+ .One, .Many, .C => unreachable,
.Slice => switch (field_index) {
- 0 => .{ .field = .{ .identifier = "ptr" } },
- 1 => .{ .field = .{ .identifier = "len" } },
+ 0 => return .{ .field = .{ .identifier = "ptr" } },
+ 1 => return .{ .field = .{ .identifier = "len" } },
else => unreachable,
},
- .One, .Many, .C => unreachable,
},
else => unreachable,
- };
+ }
}
fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue {
@@ -5515,12 +5547,12 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue
}
fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const container_ptr_ty = f.typeOfIndex(inst);
- const container_ty = container_ptr_ty.childType(mod);
+ const container_ty = container_ptr_ty.childType(zcu);
const field_ptr_ty = f.typeOf(extra.field_ptr);
const field_ptr_val = try f.resolveInst(extra.field_ptr);
@@ -5533,10 +5565,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, container_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, mod)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) {
.begin => try f.writeCValue(writer, field_ptr_val, .Initializer),
.field => |field| {
- const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8);
+ const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
@@ -5549,19 +5581,19 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("))");
},
.byte_offset => |byte_offset| {
- const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8);
-
- const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
+ const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
try writer.writeByte(')');
try f.writeCValue(writer, field_ptr_val, .Other);
- try writer.print(" - {})", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)});
+ try writer.print(" - {})", .{
+ try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)),
+ });
},
.end => {
try f.writeCValue(writer, field_ptr_val, .Other);
- try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))});
+ try writer.print(" - {}", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 1))});
},
}
@@ -5576,12 +5608,12 @@ fn fieldPtr(
container_ptr_val: CValue,
field_index: u32,
) !CValue {
- const mod = f.object.dg.module;
- const container_ty = container_ptr_ty.childType(mod);
+ const zcu = f.object.dg.zcu;
+ const container_ty = container_ptr_ty.childType(zcu);
const field_ptr_ty = f.typeOfIndex(inst);
// Ensure complete type definition is visible before accessing fields.
- _ = try f.typeToIndex(container_ty, .complete);
+ _ = try f.ctypeFromType(container_ty, .complete);
const writer = f.object.writer();
const local = try f.allocLocal(inst, field_ptr_ty);
@@ -5590,27 +5622,27 @@ fn fieldPtr(
try f.renderType(writer, field_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, mod)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) {
.begin => try f.writeCValue(writer, container_ptr_val, .Initializer),
.field => |field| {
try writer.writeByte('&');
try f.writeCValueDerefMember(writer, container_ptr_val, field);
},
.byte_offset => |byte_offset| {
- const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8);
-
- const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
+ const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
try writer.writeByte(')');
try f.writeCValue(writer, container_ptr_val, .Other);
- try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)});
+ try writer.print(" + {})", .{
+ try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)),
+ });
},
.end => {
try writer.writeByte('(');
try f.writeCValue(writer, container_ptr_val, .Other);
- try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))});
+ try writer.print(" + {})", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 1))});
},
}
@@ -5619,13 +5651,13 @@ fn fieldPtr(
}
fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
- const ip = &mod.intern_pool;
+ const zcu = f.object.dg.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{extra.struct_operand});
return .none;
}
@@ -5636,110 +5668,109 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
// Ensure complete type definition is visible before accessing fields.
- _ = try f.typeToIndex(struct_ty, .complete);
+ _ = try f.ctypeFromType(struct_ty, .complete);
+
+ const field_name: CValue = switch (ip.indexToKey(struct_ty.toIntern())) {
+ .struct_type => field_name: {
+ const loaded_struct = ip.loadStructType(struct_ty.toIntern());
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => break :field_name if (loaded_struct.fieldName(ip, extra.field_index).unwrap()) |field_name|
+ .{ .identifier = ip.stringToSlice(field_name) }
+ else
+ .{ .field = extra.field_index },
+ .@"packed" => {
+ const int_info = struct_ty.intInfo(zcu);
- const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
- .struct_type => switch (struct_ty.containerLayout(mod)) {
- .auto, .@"extern" => if (struct_ty.isSimpleTuple(mod))
- .{ .field = extra.field_index }
- else
- .{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
- .@"packed" => {
- const struct_type = mod.typeToStruct(struct_ty).?;
- const int_info = struct_ty.intInfo(mod);
+ const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
+ const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
- const bit_offset = mod.structPackedFieldBitOffset(struct_type, extra.field_index);
- const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+ const field_int_signedness = if (inst_ty.isAbiInt(zcu))
+ inst_ty.intInfo(zcu).signedness
+ else
+ .unsigned;
+ const field_int_ty = try zcu.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
- const field_int_signedness = if (inst_ty.isAbiInt(mod))
- inst_ty.intInfo(mod).signedness
- else
- .unsigned;
- const field_int_ty = try mod.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(mod))));
-
- const temp_local = try f.allocLocal(inst, field_int_ty);
- try f.writeCValue(writer, temp_local, .Other);
- try writer.writeAll(" = zig_wrap_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
- try writer.writeAll("((");
- try f.renderType(writer, field_int_ty);
- try writer.writeByte(')');
- const cant_cast = int_info.bits > 64;
- if (cant_cast) {
- if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
- try writer.writeAll("zig_lo_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
- try writer.writeByte('(');
- }
- if (bit_offset > 0) {
- try writer.writeAll("zig_shr_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
- try writer.writeByte('(');
- }
- try f.writeCValue(writer, struct_byval, .Other);
- if (bit_offset > 0) {
- try writer.writeAll(", ");
- try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ const temp_local = try f.allocLocal(inst, field_int_ty);
+ try f.writeCValue(writer, temp_local, .Other);
+ try writer.writeAll(" = zig_wrap_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
+ try writer.writeAll("((");
+ try f.renderType(writer, field_int_ty);
try writer.writeByte(')');
- }
- if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
- try writer.writeAll(");\n");
- if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
-
- const local = try f.allocLocal(inst, inst_ty);
- try writer.writeAll("memcpy(");
- try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
- try writer.writeAll(", ");
- try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
- try writer.writeAll(", sizeof(");
- try f.renderType(writer, inst_ty);
- try writer.writeAll("));\n");
- try freeLocal(f, inst, temp_local.new_local, null);
- return local;
- },
- },
+ const cant_cast = int_info.bits > 64;
+ if (cant_cast) {
+ if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ try writer.writeAll("zig_lo_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
+ try writer.writeByte('(');
+ }
+ if (bit_offset > 0) {
+ try writer.writeAll("zig_shr_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
+ try writer.writeByte('(');
+ }
+ try f.writeCValue(writer, struct_byval, .Other);
+ if (bit_offset > 0) try writer.print(", {})", .{
+ try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)),
+ });
+ if (cant_cast) try writer.writeByte(')');
+ try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
+ try writer.writeAll(");\n");
+ if (inst_ty.eql(field_int_ty, f.object.dg.zcu)) return temp_local;
- .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
- .{ .field = extra.field_index }
+ const local = try f.allocLocal(inst, inst_ty);
+ try writer.writeAll("memcpy(");
+ try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
+ try writer.writeAll(", sizeof(");
+ try f.renderType(writer, inst_ty);
+ try writer.writeAll("));\n");
+ try freeLocal(f, inst, temp_local.new_local, null);
+ return local;
+ },
+ }
+ },
+ .anon_struct_type => |anon_struct_info| if (anon_struct_info.fieldName(ip, extra.field_index).unwrap()) |field_name|
+ .{ .identifier = ip.stringToSlice(field_name) }
else
- .{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
-
+ .{ .field = extra.field_index },
.union_type => field_name: {
- const union_obj = ip.loadUnionType(struct_ty.toIntern());
- if (union_obj.flagsPtr(ip).layout == .@"packed") {
- const operand_lval = if (struct_byval == .constant) blk: {
- const operand_local = try f.allocLocal(inst, struct_ty);
- try f.writeCValue(writer, operand_local, .Other);
- try writer.writeAll(" = ");
- try f.writeCValue(writer, struct_byval, .Initializer);
- try writer.writeAll(";\n");
- break :blk operand_local;
- } else struct_byval;
-
- const local = try f.allocLocal(inst, inst_ty);
- try writer.writeAll("memcpy(&");
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(", &");
- try f.writeCValue(writer, operand_lval, .Other);
- try writer.writeAll(", sizeof(");
- try f.renderType(writer, inst_ty);
- try writer.writeAll("));\n");
-
- if (struct_byval == .constant) {
- try freeLocal(f, inst, operand_lval.new_local, null);
- }
+ const loaded_union = ip.loadUnionType(struct_ty.toIntern());
+ switch (loaded_union.getLayout(ip)) {
+ .auto, .@"extern" => {
+ const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
+ break :field_name if (loaded_union.hasTag(ip))
+ .{ .payload_identifier = ip.stringToSlice(name) }
+ else
+ .{ .identifier = ip.stringToSlice(name) };
+ },
+ .@"packed" => {
+ const operand_lval = if (struct_byval == .constant) blk: {
+ const operand_local = try f.allocLocal(inst, struct_ty);
+ try f.writeCValue(writer, operand_local, .Other);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, struct_byval, .Initializer);
+ try writer.writeAll(";\n");
+ break :blk operand_local;
+ } else struct_byval;
+
+ const local = try f.allocLocal(inst, inst_ty);
+ try writer.writeAll("memcpy(&");
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(", &");
+ try f.writeCValue(writer, operand_lval, .Other);
+ try writer.writeAll(", sizeof(");
+ try f.renderType(writer, inst_ty);
+ try writer.writeAll("));\n");
+
+ if (struct_byval == .constant) {
+ try freeLocal(f, inst, operand_lval.new_local, null);
+ }
- return local;
- } else {
- const name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
- break :field_name if (union_obj.hasTag(ip)) .{
- .payload_identifier = ip.stringToSlice(name),
- } else .{
- .identifier = ip.stringToSlice(name),
- };
+ return local;
+ },
}
},
else => unreachable,
@@ -5757,7 +5788,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
/// *(E!T) -> E
/// Note that the result is never a pointer.
fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
@@ -5765,13 +5796,13 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer;
- const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
- const error_ty = error_union_ty.errorUnionSet(mod);
- const payload_ty = error_union_ty.errorUnionPayload(mod);
+ const operand_is_ptr = operand_ty.zigTypeTag(zcu) == .Pointer;
+ const error_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
+ const error_ty = error_union_ty.errorUnionSet(zcu);
+ const payload_ty = error_union_ty.errorUnionPayload(zcu);
const local = try f.allocLocal(inst, inst_ty);
- if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) {
+ if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) {
// The store will be 'x = x'; elide it.
return local;
}
@@ -5780,35 +5811,32 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (!payload_ty.hasRuntimeBits(mod)) {
- try f.writeCValue(writer, operand, .Other);
- } else {
- if (!error_ty.errorSetIsEmpty(mod))
- if (operand_is_ptr)
- try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
- else
- try f.writeCValueMember(writer, operand, .{ .identifier = "error" })
- else {
- const err_int_ty = try mod.errorIntType();
- try f.object.dg.renderValue(writer, err_int_ty, try mod.intValue(err_int_ty, 0), .Initializer);
- }
- }
+ if (!payload_ty.hasRuntimeBits(zcu))
+ try f.writeCValue(writer, operand, .Other)
+ else if (error_ty.errorSetIsEmpty(zcu))
+ try writer.print("{}", .{
+ try f.fmtIntLiteral(try zcu.intValue(try zcu.errorIntType(), 0)),
+ })
+ else if (operand_is_ptr)
+ try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
+ else
+ try f.writeCValueMember(writer, operand, .{ .identifier = "error" });
try writer.writeAll(";\n");
return local;
}
fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const operand_ty = f.typeOf(ty_op.operand);
- const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
+ const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
const writer = f.object.writer();
- if (!error_union_ty.errorUnionPayload(mod).hasRuntimeBits(mod)) {
+ if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) {
if (!is_ptr) return .none;
const local = try f.allocLocal(inst, inst_ty);
@@ -5834,11 +5862,11 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
}
fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
- const repr_is_payload = inst_ty.optionalReprIsPayload(mod);
+ const repr_is_payload = inst_ty.optionalReprIsPayload(zcu);
const payload_ty = f.typeOf(ty_op.operand);
const payload = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5859,20 +5887,20 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, Type.bool);
try f.writeCValueMember(writer, local, .{ .identifier = "is_null" });
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, Type.bool, Value.false, .Other);
+ try f.object.dg.renderValue(writer, Value.false, .Other);
try a.end(f, writer);
}
return local;
}
fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
- const payload_ty = inst_ty.errorUnionPayload(mod);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod);
- const err_ty = inst_ty.errorUnionSet(mod);
+ const payload_ty = inst_ty.errorUnionPayload(zcu);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
+ const err_ty = inst_ty.errorUnionSet(zcu);
const err = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5888,7 +5916,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, payload_ty);
try f.writeCValueMember(writer, local, .{ .identifier = "payload" });
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, payload_ty, Value.undef, .Other);
+ try f.object.dg.renderUndefValue(writer, payload_ty, .Other);
try a.end(f, writer);
}
{
@@ -5905,29 +5933,25 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const writer = f.object.writer();
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
- const error_union_ty = f.typeOf(ty_op.operand).childType(mod);
+ const error_union_ty = f.typeOf(ty_op.operand).childType(zcu);
- const payload_ty = error_union_ty.errorUnionPayload(mod);
- const err_int_ty = try mod.errorIntType();
+ const payload_ty = error_union_ty.errorUnionPayload(zcu);
+ const err_int_ty = try zcu.errorIntType();
+ const no_err = try zcu.intValue(err_int_ty, 0);
// First, set the non-error value.
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try f.writeCValueDeref(writer, operand);
- try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, err_int_ty, try mod.intValue(err_int_ty, 0), .Other);
- try writer.writeAll(";\n ");
-
+ try writer.print(" = {};\n", .{try f.fmtIntLiteral(no_err)});
return operand;
}
try reap(f, inst, &.{ty_op.operand});
try f.writeCValueDeref(writer, operand);
- try writer.writeAll(".error = ");
- try f.object.dg.renderValue(writer, err_int_ty, try mod.intValue(err_int_ty, 0), .Other);
- try writer.writeAll(";\n");
+ try writer.print(".error = {};\n", .{try f.fmtIntLiteral(no_err)});
// Then return the payload pointer (only if it is used)
if (f.liveness.isUnused(inst)) return .none;
@@ -5956,14 +5980,14 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
- const payload_ty = inst_ty.errorUnionPayload(mod);
+ const payload_ty = inst_ty.errorUnionPayload(zcu);
const payload = try f.resolveInst(ty_op.operand);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod);
- const err_ty = inst_ty.errorUnionSet(mod);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
+ const err_ty = inst_ty.errorUnionSet(zcu);
try reap(f, inst, &.{ty_op.operand});
const writer = f.object.writer();
@@ -5982,15 +6006,14 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
else
try f.writeCValueMember(writer, local, .{ .identifier = "error" });
try a.assign(f, writer);
- const err_int_ty = try mod.errorIntType();
- try f.object.dg.renderValue(writer, err_int_ty, try mod.intValue(err_int_ty, 0), .Other);
+ try f.object.dg.renderValue(writer, try zcu.intValue(try zcu.errorIntType(), 0), .Other);
try a.end(f, writer);
}
return local;
}
fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const writer = f.object.writer();
@@ -5998,16 +6021,16 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
try reap(f, inst, &.{un_op});
const operand_ty = f.typeOf(un_op);
const local = try f.allocLocal(inst, Type.bool);
- const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
- const payload_ty = err_union_ty.errorUnionPayload(mod);
- const error_ty = err_union_ty.errorUnionSet(mod);
+ const err_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ const error_ty = err_union_ty.errorUnionSet(zcu);
+ const a = try Assignment.start(f, writer, Type.bool);
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
-
- const err_int_ty = try mod.errorIntType();
- if (!error_ty.errorSetIsEmpty(mod))
- if (payload_ty.hasRuntimeBits(mod))
+ try a.assign(f, writer);
+ const err_int_ty = try zcu.errorIntType();
+ if (!error_ty.errorSetIsEmpty(zcu))
+ if (payload_ty.hasRuntimeBits(zcu))
if (is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else
@@ -6015,63 +6038,85 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
else
try f.writeCValue(writer, operand, .Other)
else
- try f.object.dg.renderValue(writer, err_int_ty, try mod.intValue(err_int_ty, 0), .Other);
+ try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other);
try writer.writeByte(' ');
try writer.writeAll(operator);
try writer.writeByte(' ');
- try f.object.dg.renderValue(writer, err_int_ty, try mod.intValue(err_int_ty, 0), .Other);
- try writer.writeAll(";\n");
+ try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other);
+ try a.end(f, writer);
return local;
}
fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
+ const ctype_pool = &f.object.dg.ctype_pool;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.typeOfIndex(inst);
+ const ptr_ty = inst_ty.slicePtrFieldType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- const array_ty = f.typeOf(ty_op.operand).childType(mod);
-
- try f.writeCValueMember(writer, local, .{ .identifier = "ptr" });
- try writer.writeAll(" = ");
- // Unfortunately, C does not support any equivalent to
- // &(*(void *)p)[0], although LLVM does via GetElementPtr
- if (operand == .undef) {
- try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(mod) }, .Initializer);
- } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- try writer.writeAll("&(");
- try f.writeCValueDeref(writer, operand);
- try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))});
- } else try f.writeCValue(writer, operand, .Initializer);
- try writer.writeAll("; ");
+ const operand_ty = f.typeOf(ty_op.operand);
+ const array_ty = operand_ty.childType(zcu);
- const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod));
- try f.writeCValueMember(writer, local, .{ .identifier = "len" });
- try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)});
+ {
+ const a = try Assignment.start(f, writer, ptr_ty);
+ try f.writeCValueMember(writer, local, .{ .identifier = "ptr" });
+ try a.assign(f, writer);
+ if (operand == .undef) {
+ try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Initializer);
+ } else {
+ const ptr_ctype = try f.ctypeFromType(ptr_ty, .complete);
+ const ptr_child_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype;
+ const elem_ty = array_ty.childType(zcu);
+ const elem_ctype = try f.ctypeFromType(elem_ty, .complete);
+ if (!ptr_child_ctype.eql(elem_ctype)) {
+ try writer.writeByte('(');
+ try f.renderCType(writer, ptr_ctype);
+ try writer.writeByte(')');
+ }
+ const operand_ctype = try f.ctypeFromType(operand_ty, .complete);
+ const operand_child_ctype = operand_ctype.info(ctype_pool).pointer.elem_ctype;
+ if (operand_child_ctype.info(ctype_pool) == .array) {
+ try writer.writeByte('&');
+ try f.writeCValueDeref(writer, operand);
+ try writer.print("[{}]", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))});
+ } else try f.writeCValue(writer, operand, .Initializer);
+ }
+ try a.end(f, writer);
+ }
+ {
+ const a = try Assignment.start(f, writer, Type.usize);
+ try f.writeCValueMember(writer, local, .{ .identifier = "len" });
+ try a.assign(f, writer);
+ try writer.print("{}", .{
+ try f.fmtIntLiteral(try zcu.intValue(Type.usize, array_ty.arrayLen(zcu))),
+ });
+ try a.end(f, writer);
+ }
return local;
}
fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const operand_ty = f.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType(mod);
- const target = f.object.dg.module.getTarget();
+ const scalar_ty = operand_ty.scalarType(zcu);
+ const target = &f.object.dg.mod.resolved_target.result;
const operation = if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isRuntimeFloat())
- if (inst_scalar_ty.floatBits(target) < scalar_ty.floatBits(target)) "trunc" else "extend"
- else if (inst_scalar_ty.isInt(mod) and scalar_ty.isRuntimeFloat())
- if (inst_scalar_ty.isSignedInt(mod)) "fix" else "fixuns"
- else if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isInt(mod))
- if (scalar_ty.isSignedInt(mod)) "float" else "floatun"
+ if (inst_scalar_ty.floatBits(target.*) < scalar_ty.floatBits(target.*)) "trunc" else "extend"
+ else if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat())
+ if (inst_scalar_ty.isSignedInt(zcu)) "fix" else "fixuns"
+ else if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isInt(zcu))
+ if (scalar_ty.isSignedInt(zcu)) "float" else "floatun"
else
unreachable;
@@ -6082,20 +6127,20 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
try a.assign(f, writer);
- if (inst_scalar_ty.isInt(mod) and scalar_ty.isRuntimeFloat()) {
+ if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat()) {
try writer.writeAll("zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
}
try writer.writeAll("zig_");
try writer.writeAll(operation);
- try writer.writeAll(compilerRtAbbrev(scalar_ty, mod));
- try writer.writeAll(compilerRtAbbrev(inst_scalar_ty, mod));
+ try writer.writeAll(compilerRtAbbrev(scalar_ty, zcu, target.*));
+ try writer.writeAll(compilerRtAbbrev(inst_scalar_ty, zcu, target.*));
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
try writer.writeByte(')');
- if (inst_scalar_ty.isInt(mod) and scalar_ty.isRuntimeFloat()) {
+ if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat()) {
try f.object.dg.renderBuiltinInfo(writer, inst_scalar_ty, .bits);
try writer.writeByte(')');
}
@@ -6106,7 +6151,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try f.resolveInst(un_op);
@@ -6120,7 +6165,7 @@ fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(" = (");
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
- if (operand_ty.isSlice(mod)) {
+ if (operand_ty.isSlice(zcu)) {
try f.writeCValueMember(writer, operand, .{ .identifier = "ptr" });
} else {
try f.writeCValue(writer, operand, .Other);
@@ -6135,18 +6180,18 @@ fn airUnBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const operand_ty = f.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
- const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
- const ref_ret = inst_scalar_cty.tag() == .array;
+ const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6179,23 +6224,23 @@ fn airBinBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
- const operand_cty = try f.typeToCType(operand_ty, .complete);
- const is_big = operand_cty.tag() == .array;
+ const operand_ctype = try f.ctypeFromType(operand_ty, .complete);
+ const is_big = operand_ctype.info(&f.object.dg.ctype_pool) == .array;
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
- const scalar_ty = operand_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
+ const scalar_ty = operand_ty.scalarType(zcu);
- const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
- const ref_ret = inst_scalar_cty.tag() == .array;
+ const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6234,18 +6279,18 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const operand_ty = f.typeOf(data.lhs);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
- const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
- const ref_ret = inst_scalar_cty.tag() == .array;
+ const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6275,7 +6320,7 @@ fn airCmpBuiltinCall(
try writer.writeByte(')');
if (!ref_ret) try writer.print("{s}{}", .{
compareOperatorC(operator),
- try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)),
+ try f.fmtIntLiteral(try zcu.intValue(Type.i32, 0)),
});
try writer.writeAll(";\n");
try v.end(f, inst, writer);
@@ -6284,7 +6329,7 @@ fn airCmpBuiltinCall(
}
fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
@@ -6292,19 +6337,19 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
const expected_value = try f.resolveInst(extra.expected_value);
const new_value = try f.resolveInst(extra.new_value);
const ptr_ty = f.typeOf(extra.ptr);
- const ty = ptr_ty.childType(mod);
+ const ty = ptr_ty.childType(zcu);
const writer = f.object.writer();
const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value);
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
+ zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else
ty;
const local = try f.allocLocal(inst, inst_ty);
- if (inst_ty.isPtrLikeOptional(mod)) {
+ if (inst_ty.isPtrLikeOptional(zcu)) {
{
const a = try Assignment.start(f, writer, ty);
try f.writeCValue(writer, local, .Other);
@@ -6317,7 +6362,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor});
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(zcu)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6331,7 +6376,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.writeAll(", ");
try f.object.dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeAll(", ");
- try f.object.dg.renderType(writer, repr_ty);
+ try f.renderType(writer, repr_ty);
try writer.writeByte(')');
try writer.writeAll(") {\n");
f.object.indent_writer.pushIndent();
@@ -6359,7 +6404,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor});
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(zcu)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6373,7 +6418,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.writeAll(", ");
try f.object.dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeAll(", ");
- try f.object.dg.renderType(writer, repr_ty);
+ try f.renderType(writer, repr_ty);
try writer.writeByte(')');
try a.end(f, writer);
}
@@ -6389,12 +6434,12 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
}
fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(pl_op.operand);
- const ty = ptr_ty.childType(mod);
+ const ty = ptr_ty.childType(zcu);
const ptr = try f.resolveInst(pl_op.operand);
const operand = try f.resolveInst(extra.operand);
@@ -6402,10 +6447,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, writer, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const repr_bits = @as(u16, @intCast(ty.abiSize(mod) * 8));
+ const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128;
- const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty;
+ const repr_ty = if (is_float) zcu.intType(.unsigned, repr_bits) catch unreachable else ty;
const local = try f.allocLocal(inst, inst_ty);
try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())});
@@ -6421,7 +6466,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
if (use_atomic) try writer.writeAll("zig_atomic(");
try f.renderType(writer, ty);
if (use_atomic) try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(zcu)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6431,7 +6476,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try f.object.dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeAll(", ");
- try f.object.dg.renderType(writer, repr_ty);
+ try f.renderType(writer, repr_ty);
try writer.writeAll(");\n");
try operand_mat.end(f, inst);
@@ -6444,15 +6489,15 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const atomic_load = f.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const ptr = try f.resolveInst(atomic_load.ptr);
try reap(f, inst, &.{atomic_load.ptr});
const ptr_ty = f.typeOf(atomic_load.ptr);
- const ty = ptr_ty.childType(mod);
+ const ty = ptr_ty.childType(zcu);
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
+ zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else
ty;
@@ -6465,7 +6510,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", (zig_atomic(");
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(zcu)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6473,17 +6518,17 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try f.object.dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeAll(", ");
- try f.object.dg.renderType(writer, repr_ty);
+ try f.renderType(writer, repr_ty);
try writer.writeAll(");\n");
return local;
}
fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = f.typeOf(bin_op.lhs);
- const ty = ptr_ty.childType(mod);
+ const ty = ptr_ty.childType(zcu);
const ptr = try f.resolveInst(bin_op.lhs);
const element = try f.resolveInst(bin_op.rhs);
@@ -6492,14 +6537,14 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
+ zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else
ty;
try writer.writeAll("zig_atomic_store((zig_atomic(");
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(zcu)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6507,7 +6552,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try writer.print(", {s}, ", .{order});
try f.object.dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeAll(", ");
- try f.object.dg.renderType(writer, repr_ty);
+ try f.renderType(writer, repr_ty);
try writer.writeAll(");\n");
try element_mat.end(f, inst);
@@ -6515,8 +6560,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
}
fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void {
- const mod = f.object.dg.module;
- if (ptr_ty.isSlice(mod)) {
+ const zcu = f.object.dg.zcu;
+ if (ptr_ty.isSlice(zcu)) {
try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" });
} else {
try f.writeCValue(writer, ptr, .FunctionArgument);
@@ -6524,14 +6569,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo
}
fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_ty = f.typeOf(bin_op.lhs);
const dest_slice = try f.resolveInst(bin_op.lhs);
const value = try f.resolveInst(bin_op.rhs);
const elem_ty = f.typeOf(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(mod);
- const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
+ const elem_abi_size = elem_ty.abiSize(zcu);
+ const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |val| val.isUndefDeep(zcu) else false;
const writer = f.object.writer();
if (val_is_undef) {
@@ -6541,7 +6586,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
try writer.writeAll("memset(");
- switch (dest_ty.ptrSize(mod)) {
+ switch (dest_ty.ptrSize(zcu)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", 0xaa, ");
@@ -6553,8 +6598,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
},
.One => {
- const array_ty = dest_ty.childType(mod);
- const len = array_ty.arrayLen(mod) * elem_abi_size;
+ const array_ty = dest_ty.childType(zcu);
+ const len = array_ty.arrayLen(zcu) * elem_abi_size;
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.print(", 0xaa, {d});\n", .{len});
@@ -6565,12 +6610,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
return .none;
}
- if (elem_abi_size > 1 or dest_ty.isVolatilePtr(mod)) {
+ if (elem_abi_size > 1 or dest_ty.isVolatilePtr(zcu)) {
// For the assignment in this loop, the array pointer needs to get
// casted to a regular pointer, otherwise an error like this occurs:
// error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable
- const elem_ptr_ty = try mod.ptrType(.{
- .child = elem_ty.ip_index,
+ const elem_ptr_ty = try zcu.ptrType(.{
+ .child = elem_ty.toIntern(),
.flags = .{
.size = .C,
},
@@ -6581,17 +6626,17 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer);
+ try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, 0), .Initializer);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
- switch (dest_ty.ptrSize(mod)) {
+ switch (dest_ty.ptrSize(zcu)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
},
.One => {
- const array_ty = dest_ty.childType(mod);
- try writer.print("{d}", .{array_ty.arrayLen(mod)});
+ const array_ty = dest_ty.childType(zcu);
+ try writer.print("{d}", .{array_ty.arrayLen(zcu)});
},
.Many, .C => unreachable,
}
@@ -6620,7 +6665,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const bitcasted = try bitcast(f, Type.u8, value, elem_ty);
try writer.writeAll("memset(");
- switch (dest_ty.ptrSize(mod)) {
+ switch (dest_ty.ptrSize(zcu)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", ");
@@ -6630,8 +6675,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll(");\n");
},
.One => {
- const array_ty = dest_ty.childType(mod);
- const len = array_ty.arrayLen(mod) * elem_abi_size;
+ const array_ty = dest_ty.childType(zcu);
+ const len = array_ty.arrayLen(zcu) * elem_abi_size;
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.writeAll(", ");
@@ -6646,7 +6691,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_ptr = try f.resolveInst(bin_op.lhs);
const src_ptr = try f.resolveInst(bin_op.rhs);
@@ -6659,42 +6704,32 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try writeSliceOrPtr(f, writer, src_ptr, src_ty);
try writer.writeAll(", ");
- switch (dest_ty.ptrSize(mod)) {
- .Slice => {
- const elem_ty = dest_ty.childType(mod);
- const elem_abi_size = elem_ty.abiSize(mod);
- try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" });
- if (elem_abi_size > 1) {
- try writer.print(" * {d});\n", .{elem_abi_size});
- } else {
- try writer.writeAll(");\n");
- }
- },
- .One => {
- const array_ty = dest_ty.childType(mod);
- const elem_ty = array_ty.childType(mod);
- const elem_abi_size = elem_ty.abiSize(mod);
- const len = array_ty.arrayLen(mod) * elem_abi_size;
- try writer.print("{d});\n", .{len});
- },
+ switch (dest_ty.ptrSize(zcu)) {
+ .One => try writer.print("{}", .{
+ try f.fmtIntLiteral(try zcu.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))),
+ }),
.Many, .C => unreachable,
+ .Slice => try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }),
}
+ try writer.writeAll(" * sizeof(");
+ try f.renderType(writer, dest_ty.elemType2(zcu));
+ try writer.writeAll("));\n");
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const union_ptr = try f.resolveInst(bin_op.lhs);
const new_tag = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const union_ty = f.typeOf(bin_op.lhs).childType(mod);
- const layout = union_ty.unionGetLayout(mod);
+ const union_ty = f.typeOf(bin_op.lhs).childType(zcu);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none;
- const tag_ty = union_ty.unionTagTypeSafety(mod).?;
+ const tag_ty = union_ty.unionTagTypeSafety(zcu).?;
const writer = f.object.writer();
const a = try Assignment.start(f, writer, tag_ty);
@@ -6706,14 +6741,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const union_ty = f.typeOf(ty_op.operand);
- const layout = union_ty.unionGetLayout(mod);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none;
const inst_ty = f.typeOfIndex(inst);
@@ -6728,7 +6763,7 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const inst_ty = f.typeOfIndex(inst);
@@ -6740,7 +6775,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
try writer.print(" = {s}(", .{
- try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(mod) }, .{ .tag_name = enum_ty }),
+ try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(zcu) }, .{ .tag_name = enum_ty }),
});
try f.writeCValue(writer, operand, .Other);
try writer.writeAll(");\n");
@@ -6765,14 +6800,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6820,7 +6855,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -6836,15 +6871,15 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
for (0..extra.mask_len) |index| {
try f.writeCValue(writer, local, .Other);
try writer.writeByte('[');
- try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, index), .Other);
+ try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, index), .Other);
try writer.writeAll("] = ");
- const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
- const src_val = try mod.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
+ const mask_elem = (try mask.elemValue(zcu, index)).toSignedInt(zcu);
+ const src_val = try zcu.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
try writer.writeByte('[');
- try f.object.dg.renderValue(writer, Type.usize, src_val, .Other);
+ try f.object.dg.renderValue(writer, src_val, .Other);
try writer.writeAll("];\n");
}
@@ -6852,7 +6887,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const reduce = f.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const scalar_ty = f.typeOfIndex(inst);
@@ -6861,7 +6896,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(reduce.operand);
const writer = f.object.writer();
- const use_operator = scalar_ty.bitSize(mod) <= 64;
+ const use_operator = scalar_ty.bitSize(zcu) <= 64;
const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
float_op: Func,
@@ -6872,28 +6907,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
.And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } },
.Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
.Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
- .Min => switch (scalar_ty.zigTypeTag(mod)) {
+ .Min => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .ternary = " < " } else .{
.builtin = .{ .operation = "min" },
},
.Float => .{ .float_op = .{ .operation = "fmin" } },
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag(mod)) {
+ .Max => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .ternary = " > " } else .{
.builtin = .{ .operation = "max" },
},
.Float => .{ .float_op = .{ .operation = "fmax" } },
else => unreachable,
},
- .Add => switch (scalar_ty.zigTypeTag(mod)) {
+ .Add => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .infix = " += " } else .{
.builtin = .{ .operation = "addw", .info = .bits },
},
.Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
- .Mul => switch (scalar_ty.zigTypeTag(mod)) {
+ .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .infix = " *= " } else .{
.builtin = .{ .operation = "mulw", .info = .bits },
},
@@ -6908,7 +6943,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
// Equivalent to:
// reduce: {
// var accum: T = init;
- // for (vec) : (elem) {
+ // for (vec) |elem| {
// accum = func(accum, elem);
// }
// break :reduce accum;
@@ -6918,40 +6953,40 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
- .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
+ try f.object.dg.renderValue(writer, switch (reduce.operation) {
+ .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.false,
- .Int => try mod.intValue(scalar_ty, 0),
+ .Int => try zcu.intValue(scalar_ty, 0),
else => unreachable,
},
- .And => switch (scalar_ty.zigTypeTag(mod)) {
+ .And => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.true,
- .Int => switch (scalar_ty.intInfo(mod).signedness) {
- .unsigned => try scalar_ty.maxIntScalar(mod, scalar_ty),
- .signed => try mod.intValue(scalar_ty, -1),
+ .Int => switch (scalar_ty.intInfo(zcu).signedness) {
+ .unsigned => try scalar_ty.maxIntScalar(zcu, scalar_ty),
+ .signed => try zcu.intValue(scalar_ty, -1),
},
else => unreachable,
},
- .Add => switch (scalar_ty.zigTypeTag(mod)) {
- .Int => try mod.intValue(scalar_ty, 0),
- .Float => try mod.floatValue(scalar_ty, 0.0),
+ .Add => switch (scalar_ty.zigTypeTag(zcu)) {
+ .Int => try zcu.intValue(scalar_ty, 0),
+ .Float => try zcu.floatValue(scalar_ty, 0.0),
else => unreachable,
},
- .Mul => switch (scalar_ty.zigTypeTag(mod)) {
- .Int => try mod.intValue(scalar_ty, 1),
- .Float => try mod.floatValue(scalar_ty, 1.0),
+ .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
+ .Int => try zcu.intValue(scalar_ty, 1),
+ .Float => try zcu.floatValue(scalar_ty, 1.0),
else => unreachable,
},
- .Min => switch (scalar_ty.zigTypeTag(mod)) {
+ .Min => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.true,
- .Int => try scalar_ty.maxIntScalar(mod, scalar_ty),
- .Float => try mod.floatValue(scalar_ty, std.math.nan(f128)),
+ .Int => try scalar_ty.maxIntScalar(zcu, scalar_ty),
+ .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag(mod)) {
+ .Max => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.false,
- .Int => try scalar_ty.minIntScalar(mod, scalar_ty),
- .Float => try mod.floatValue(scalar_ty, std.math.nan(f128)),
+ .Int => try scalar_ty.minIntScalar(zcu, scalar_ty),
+ .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
}, .Initializer);
@@ -7007,11 +7042,11 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
- const ip = &mod.intern_pool;
+ const zcu = f.object.dg.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const inst_ty = f.typeOfIndex(inst);
- const len = @as(usize, @intCast(inst_ty.arrayLen(mod)));
+ const len = @as(usize, @intCast(inst_ty.arrayLen(zcu)));
const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len]));
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
@@ -7028,10 +7063,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- switch (inst_ty.zigTypeTag(mod)) {
- .Array, .Vector => {
- const elem_ty = inst_ty.childType(mod);
- const a = try Assignment.init(f, elem_ty);
+ switch (ip.indexToKey(inst_ty.toIntern())) {
+ inline .array_type, .vector_type => |info, tag| {
+ const a = try Assignment.init(f, Type.fromInterned(info.child));
for (resolved_elements, 0..) |element, i| {
try a.restart(f, writer);
try f.writeCValue(writer, local, .Other);
@@ -7040,94 +7074,112 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, element, .Other);
try a.end(f, writer);
}
- if (inst_ty.sentinel(mod)) |sentinel| {
+ if (tag == .array_type and info.sentinel != .none) {
try a.restart(f, writer);
try f.writeCValue(writer, local, .Other);
- try writer.print("[{d}]", .{resolved_elements.len});
+ try writer.print("[{d}]", .{info.len});
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, elem_ty, sentinel, .Other);
+ try f.object.dg.renderValue(writer, Value.fromInterned(info.sentinel), .Other);
try a.end(f, writer);
}
},
- .Struct => switch (inst_ty.containerLayout(mod)) {
- .auto, .@"extern" => for (resolved_elements, 0..) |element, field_index| {
- if (inst_ty.structFieldIsComptime(field_index, mod)) continue;
- const field_ty = inst_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- const a = try Assignment.start(f, writer, field_ty);
- try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod))
- .{ .field = field_index }
- else
- .{ .identifier = ip.stringToSlice(inst_ty.legacyStructFieldName(@intCast(field_index), mod)) });
- try a.assign(f, writer);
- try f.writeCValue(writer, element, .Other);
- try a.end(f, writer);
- },
- .@"packed" => {
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
- const int_info = inst_ty.intInfo(mod);
-
- const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(inst_ty.toIntern());
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => {
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ const a = try Assignment.start(f, writer, field_ty);
+ try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
+ .{ .identifier = ip.stringToSlice(field_name) }
+ else
+ .{ .field = field_index });
+ try a.assign(f, writer);
+ try f.writeCValue(writer, resolved_elements[field_index], .Other);
+ try a.end(f, writer);
+ }
+ },
+ .@"packed" => {
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(" = ");
+ const int_info = inst_ty.intInfo(zcu);
- var bit_offset: u64 = 0;
+ const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- var empty = true;
- for (0..elements.len) |field_index| {
- if (inst_ty.structFieldIsComptime(field_index, mod)) continue;
- const field_ty = inst_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ var bit_offset: u64 = 0;
- if (!empty) {
- try writer.writeAll("zig_or_");
+ var empty = true;
+ for (0..elements.len) |field_index| {
+ if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
+ const field_ty = inst_ty.structFieldType(field_index, zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ if (!empty) {
+ try writer.writeAll("zig_or_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try writer.writeByte('(');
+ }
+ empty = false;
+ }
+ empty = true;
+ for (resolved_elements, 0..) |element, field_index| {
+ if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
+ const field_ty = inst_ty.structFieldType(field_index, zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ if (!empty) try writer.writeAll(", ");
+ // TODO: Skip this entire shift if val is 0?
+ try writer.writeAll("zig_shlw_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
try writer.writeByte('(');
- }
- empty = false;
- }
- empty = true;
- for (resolved_elements, 0..) |element, field_index| {
- if (inst_ty.structFieldIsComptime(field_index, mod)) continue;
- const field_ty = inst_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- if (!empty) try writer.writeAll(", ");
- // TODO: Skip this entire shift if val is 0?
- try writer.writeAll("zig_shlw_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
- try writer.writeByte('(');
- if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) {
- try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
- } else {
- try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
- try writer.writeByte(')');
- if (field_ty.isPtrAtRuntime(mod)) {
+ if (inst_ty.isAbiInt(zcu) and (field_ty.isAbiInt(zcu) or field_ty.isPtrAtRuntime(zcu))) {
+ try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
+ } else {
try writer.writeByte('(');
- try f.renderType(writer, switch (int_info.signedness) {
- .unsigned => Type.usize,
- .signed => Type.isize,
- });
+ try f.renderType(writer, inst_ty);
try writer.writeByte(')');
+ if (field_ty.isPtrAtRuntime(zcu)) {
+ try writer.writeByte('(');
+ try f.renderType(writer, switch (int_info.signedness) {
+ .unsigned => Type.usize,
+ .signed => Type.isize,
+ });
+ try writer.writeByte(')');
+ }
+ try f.writeCValue(writer, element, .Other);
}
- try f.writeCValue(writer, element, .Other);
- }
-
- try writer.writeAll(", ");
- const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
- try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
- try writer.writeByte(')');
- if (!empty) try writer.writeByte(')');
- bit_offset += field_ty.bitSize(mod);
- empty = false;
- }
+ try writer.print(", {}", .{
+ try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)),
+ });
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
+ try writer.writeByte(')');
+ if (!empty) try writer.writeByte(')');
- try writer.writeAll(";\n");
- },
+ bit_offset += field_ty.bitSize(zcu);
+ empty = false;
+ }
+ try writer.writeAll(";\n");
+ },
+ }
+ },
+ .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
+ if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
+ const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ const a = try Assignment.start(f, writer, field_ty);
+ try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
+ .{ .identifier = ip.stringToSlice(field_name) }
+ else
+ .{ .field = field_index });
+ try a.assign(f, writer);
+ try f.writeCValue(writer, resolved_elements[field_index], .Other);
+ try a.end(f, writer);
},
else => unreachable,
}
@@ -7136,21 +7188,21 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
- const ip = &mod.intern_pool;
+ const zcu = f.object.dg.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = f.typeOfIndex(inst);
- const union_obj = mod.typeToUnion(union_ty).?;
- const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
+ const loaded_union = ip.loadUnionType(union_ty.toIntern());
+ const field_name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
const payload_ty = f.typeOf(extra.init);
const payload = try f.resolveInst(extra.init);
try reap(f, inst, &.{extra.init});
const writer = f.object.writer();
const local = try f.allocLocal(inst, union_ty);
- if (union_obj.getLayout(ip) == .@"packed") {
+ if (loaded_union.getLayout(ip) == .@"packed") {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, payload, .Initializer);
@@ -7158,19 +7210,16 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
- const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: {
- const layout = union_ty.unionGetLayout(mod);
+ const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size != 0) {
- const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
-
- const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
-
- const int_val = try tag_val.intFromEnum(tag_ty, mod);
+ const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
+ const tag_val = try zcu.enumValueFieldIndex(tag_ty, field_index);
const a = try Assignment.start(f, writer, tag_ty);
try f.writeCValueMember(writer, local, .{ .identifier = "tag" });
try a.assign(f, writer);
- try writer.print("{}", .{try f.fmtIntLiteral(tag_ty, int_val)});
+ try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, zcu))});
try a.end(f, writer);
}
break :field .{ .payload_identifier = ip.stringToSlice(field_name) };
@@ -7185,7 +7234,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const prefetch = f.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
const ptr_ty = f.typeOf(prefetch.ptr);
@@ -7196,7 +7245,7 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue {
switch (prefetch.cache) {
.data => {
try writer.writeAll("zig_prefetch(");
- if (ptr_ty.isSlice(mod))
+ if (ptr_ty.isSlice(zcu))
try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" })
else
try f.writeCValue(writer, ptr, .FunctionArgument);
@@ -7242,14 +7291,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const operand_ty = f.typeOf(un_op);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, operand_ty);
@@ -7268,15 +7317,15 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAbs(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
const ty = f.typeOf(ty_op.operand);
- const scalar_ty = ty.scalarType(mod);
+ const scalar_ty = ty.scalarType(zcu);
- switch (scalar_ty.zigTypeTag(mod)) {
- .Int => if (ty.zigTypeTag(mod) == .Vector) {
- return f.fail("TODO implement airAbs for '{}'", .{ty.fmt(mod)});
+ switch (scalar_ty.zigTypeTag(zcu)) {
+ .Int => if (ty.zigTypeTag(zcu) == .Vector) {
+ return f.fail("TODO implement airAbs for '{}'", .{ty.fmt(zcu)});
} else {
return airUnBuiltinCall(f, inst, "abs", .none);
},
@@ -7286,8 +7335,8 @@ fn airAbs(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn unFloatOp(f: *Function, inst: Air.Inst.Index, operand: CValue, ty: Type, operation: []const u8) !CValue {
- const mod = f.object.dg.module;
- const scalar_ty = ty.scalarType(mod);
+ const zcu = f.object.dg.zcu;
+ const scalar_ty = ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, ty);
@@ -7316,7 +7365,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
}
fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try f.resolveInst(bin_op.lhs);
@@ -7324,7 +7373,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7346,7 +7395,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
}
fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data;
@@ -7356,7 +7405,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
const inst_ty = f.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType(mod);
+ const inst_scalar_ty = inst_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7381,20 +7430,20 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
- const mod = f.object.dg.module;
+ const zcu = f.object.dg.zcu;
const inst_ty = f.typeOfIndex(inst);
const decl_index = f.object.dg.pass.decl;
- const decl = mod.declPtr(decl_index);
- const fn_cty = try f.typeToCType(decl.typeOf(mod), .complete);
- const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len;
+ const decl = zcu.declPtr(decl_index);
+ const function_ctype = try f.ctypeFromType(decl.typeOf(zcu), .complete);
+ const params_len = function_ctype.info(&f.object.dg.ctype_pool).function.param_ctypes.len;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("va_start(*(va_list *)&");
try f.writeCValue(writer, local, .Other);
- if (param_len > 0) {
+ if (params_len > 0) {
try writer.writeAll(", ");
- try f.writeCValue(writer, .{ .arg = param_len - 1 }, .FunctionArgument);
+ try f.writeCValue(writer, .{ .arg = params_len - 1 }, .FunctionArgument);
}
try writer.writeAll(");\n");
return local;
@@ -7589,9 +7638,8 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 {
};
}
-fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 {
- const target = mod.getTarget();
- return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) {
+fn compilerRtAbbrev(ty: Type, zcu: *Zcu, target: std.Target) []const u8 {
+ return if (ty.isInt(zcu)) switch (ty.intInfo(zcu).bits) {
1...32 => "si",
33...64 => "di",
65...128 => "ti",
@@ -7744,7 +7792,7 @@ const FormatIntLiteralContext = struct {
dg: *DeclGen,
int_info: InternPool.Key.IntType,
kind: CType.Kind,
- cty: CType,
+ ctype: CType,
val: Value,
};
fn formatIntLiteral(
@@ -7753,8 +7801,9 @@ fn formatIntLiteral(
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const mod = data.dg.module;
- const target = mod.getTarget();
+ const zcu = data.dg.zcu;
+ const target = &data.dg.mod.resolved_target.result;
+ const ctype_pool = &data.dg.ctype_pool;
const ExpectedContents = struct {
const base = 10;
@@ -7774,7 +7823,7 @@ fn formatIntLiteral(
defer allocator.free(undef_limbs);
var int_buf: Value.BigIntSpace = undefined;
- const int = if (data.val.isUndefDeep(mod)) blk: {
+ const int = if (data.val.isUndefDeep(zcu)) blk: {
undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
@memset(undef_limbs, undefPattern(BigIntLimb));
@@ -7785,10 +7834,10 @@ fn formatIntLiteral(
};
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
- } else data.val.toBigInt(&int_buf, mod);
+ } else data.val.toBigInt(&int_buf, zcu);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
- const c_bits: usize = @intCast(data.cty.byteSize(data.dg.ctypes.set, target) * 8);
+ const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8);
var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
@@ -7800,45 +7849,45 @@ fn formatIntLiteral(
defer allocator.free(wrap.limbs);
const c_limb_info: struct {
- cty: CType,
+ ctype: CType,
count: usize,
endian: std.builtin.Endian,
homogeneous: bool,
- } = switch (data.cty.tag()) {
- else => .{
- .cty = CType.initTag(.void),
- .count = 1,
- .endian = .little,
- .homogeneous = true,
- },
- .zig_u128, .zig_i128 => .{
- .cty = CType.initTag(.uint64_t),
- .count = 2,
- .endian = .big,
- .homogeneous = false,
- },
- .array => info: {
- const array_data = data.cty.castTag(.array).?.data;
- break :info .{
- .cty = data.dg.indexToCType(array_data.elem_type),
- .count = @as(usize, @intCast(array_data.len)),
- .endian = target.cpu.arch.endian(),
+ } = switch (data.ctype.info(ctype_pool)) {
+ .basic => |basic_info| switch (basic_info) {
+ else => .{
+ .ctype = .{ .index = .void },
+ .count = 1,
+ .endian = .little,
.homogeneous = true,
- };
+ },
+ .zig_u128, .zig_i128 => .{
+ .ctype = .{ .index = .uint64_t },
+ .count = 2,
+ .endian = .big,
+ .homogeneous = false,
+ },
},
+ .array => |array_info| .{
+ .ctype = array_info.elem_ctype,
+ .count = @intCast(array_info.len),
+ .endian = target.cpu.arch.endian(),
+ .homogeneous = true,
+ },
+ else => unreachable,
};
if (c_limb_info.count == 1) {
if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
return writer.print("{s}_{s}", .{
- data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
+ data.ctype.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
}),
if (int.positive) "MAX" else "MIN",
});
if (!int.positive) try writer.writeByte('-');
- try data.cty.renderLiteralPrefix(writer, data.kind);
+ try data.ctype.renderLiteralPrefix(writer, data.kind, ctype_pool);
const style: struct { base: u8, case: std.fmt.Case = undefined } = switch (fmt.len) {
0 => .{ .base = 10 },
@@ -7869,7 +7918,7 @@ fn formatIntLiteral(
defer allocator.free(string);
try writer.writeAll(string);
} else {
- try data.cty.renderLiteralPrefix(writer, data.kind);
+ try data.ctype.renderLiteralPrefix(writer, data.kind, ctype_pool);
wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits);
@memset(wrap.limbs[wrap.len..], 0);
wrap.len = wrap.limbs.len;
@@ -7879,7 +7928,7 @@ fn formatIntLiteral(
.signedness = undefined,
.bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))),
};
- var c_limb_cty: CType = undefined;
+ var c_limb_ctype: CType = undefined;
var limb_offset: usize = 0;
const most_significant_limb_i = wrap.len - limbs_per_c_limb;
@@ -7900,7 +7949,7 @@ fn formatIntLiteral(
{
// most significant limb is actually signed
c_limb_int_info.signedness = .signed;
- c_limb_cty = c_limb_info.cty.toSigned();
+ c_limb_ctype = c_limb_info.ctype.toSigned();
c_limb_mut.positive = wrap.positive;
c_limb_mut.truncate(
@@ -7910,7 +7959,7 @@ fn formatIntLiteral(
);
} else {
c_limb_int_info.signedness = .unsigned;
- c_limb_cty = c_limb_info.cty;
+ c_limb_ctype = c_limb_info.ctype;
}
if (limb_offset > 0) try writer.writeAll(", ");
@@ -7918,12 +7967,12 @@ fn formatIntLiteral(
.dg = data.dg,
.int_info = c_limb_int_info,
.kind = data.kind,
- .cty = c_limb_cty,
- .val = try mod.intValue_big(Type.comptime_int, c_limb_mut.toConst()),
+ .ctype = c_limb_ctype,
+ .val = try zcu.intValue_big(Type.comptime_int, c_limb_mut.toConst()),
}, fmt, options, writer);
}
}
- try data.cty.renderLiteralSuffix(writer);
+ try data.ctype.renderLiteralSuffix(writer, ctype_pool);
}
const Materialize = struct {
@@ -7966,10 +8015,10 @@ const Materialize = struct {
};
const Assignment = struct {
- cty: CType.Index,
+ ctype: CType,
pub fn init(f: *Function, ty: Type) !Assignment {
- return .{ .cty = try f.typeToIndex(ty, .complete) };
+ return .{ .ctype = try f.ctypeFromType(ty, .complete) };
}
pub fn start(f: *Function, writer: anytype, ty: Type) !Assignment {
@@ -7997,7 +8046,7 @@ const Assignment = struct {
.assign => {},
.memcpy => {
try writer.writeAll(", sizeof(");
- try f.renderCType(writer, self.cty);
+ try f.renderCType(writer, self.ctype);
try writer.writeAll("))");
},
}
@@ -8005,7 +8054,7 @@ const Assignment = struct {
}
fn strategy(self: Assignment, f: *Function) enum { assign, memcpy } {
- return switch (f.indexToCType(self.cty).tag()) {
+ return switch (self.ctype.info(&f.object.dg.ctype_pool)) {
else => .assign,
.array, .vector => .memcpy,
};
@@ -8016,21 +8065,17 @@ const Vectorize = struct {
index: CValue = .none,
pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize {
- const mod = f.object.dg.module;
- return if (ty.zigTypeTag(mod) == .Vector) index: {
- const len_val = try mod.intValue(Type.usize, ty.vectorLen(mod));
-
+ const zcu = f.object.dg.zcu;
+ return if (ty.zigTypeTag(zcu) == .Vector) index: {
const local = try f.allocLocal(inst, Type.usize);
try writer.writeAll("for (");
try f.writeCValue(writer, local, .Other);
- try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))});
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))});
try f.writeCValue(writer, local, .Other);
- try writer.print(" < {d}; ", .{
- try f.fmtIntLiteral(Type.usize, len_val),
- });
+ try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, ty.vectorLen(zcu)))});
try f.writeCValue(writer, local, .Other);
- try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))});
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 1))});
f.object.indent_writer.pushIndent();
break :index .{ .index = local };
@@ -8054,32 +8099,10 @@ const Vectorize = struct {
}
};
-fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
- if (ret_ty.ip_index == .noreturn_type) return Type.noreturn;
-
- if (lowersToArray(ret_ty, mod)) {
- const gpa = mod.gpa;
- const ip = &mod.intern_pool;
- const names = [1]InternPool.NullTerminatedString{
- try ip.getOrPutString(gpa, "array"),
- };
- const types = [1]InternPool.Index{ret_ty.ip_index};
- const values = [1]InternPool.Index{.none};
- const interned = try ip.getAnonStructType(gpa, .{
- .names = &names,
- .types = &types,
- .values = &values,
- });
- return Type.fromInterned(interned);
- }
-
- return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;
-}
-
-fn lowersToArray(ty: Type, mod: *Module) bool {
- return switch (ty.zigTypeTag(mod)) {
+fn lowersToArray(ty: Type, zcu: *Zcu) bool {
+ return switch (ty.zigTypeTag(zcu)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt(mod) and toCIntBits(@as(u32, @intCast(ty.bitSize(mod)))) == null,
+ else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
};
}
@@ -8098,7 +8121,7 @@ fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void {
const ref_inst = ref.toIndex() orelse return;
const c_value = (f.value_map.fetchRemove(ref) orelse return).value;
const local_index = switch (c_value) {
- .local, .new_local => |l| l,
+ .new_local, .local => |l| l,
else => return,
};
try freeLocal(f, inst, local_index, ref_inst);
diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig
new file mode 100644
index 000000000000..1c460acc6ba4
--- /dev/null
+++ b/src/codegen/c/Type.zig
@@ -0,0 +1,2491 @@
+index: CType.Index,
+
+pub fn fromPoolIndex(pool_index: usize) CType {
+ return .{ .index = @enumFromInt(CType.Index.first_pool_index + pool_index) };
+}
+
+pub fn toPoolIndex(ctype: CType) ?u32 {
+ const pool_index, const is_basic =
+ @subWithOverflow(@intFromEnum(ctype.index), CType.Index.first_pool_index);
+ return switch (is_basic) {
+ 0 => pool_index,
+ 1 => null,
+ };
+}
+
+pub fn eql(lhs: CType, rhs: CType) bool {
+ return lhs.index == rhs.index;
+}
+
+pub fn isBool(ctype: CType) bool {
+ return switch (ctype.index) {
+ ._Bool, .bool => true,
+ else => false,
+ };
+}
+
+pub fn isInteger(ctype: CType) bool {
+ return switch (ctype.index) {
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .ptrdiff_t,
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .uintptr_t,
+ .intptr_t,
+ .zig_u128,
+ .zig_i128,
+ => true,
+ else => false,
+ };
+}
+
+pub fn signedness(ctype: CType, mod: *Module) std.builtin.Signedness {
+ return switch (ctype.index) {
+ .char => mod.resolved_target.result.charSignedness(),
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .ptrdiff_t,
+ .int8_t,
+ .int16_t,
+ .int32_t,
+ .int64_t,
+ .intptr_t,
+ .zig_i128,
+ => .signed,
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .uint8_t,
+ .uint16_t,
+ .uint32_t,
+ .uint64_t,
+ .uintptr_t,
+ .zig_u128,
+ => .unsigned,
+ else => unreachable,
+ };
+}
+
+pub fn isFloat(ctype: CType) bool {
+ return switch (ctype.index) {
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => true,
+ else => false,
+ };
+}
+
+pub fn toSigned(ctype: CType) CType {
+ return switch (ctype.index) {
+ .char, .@"signed char", .@"unsigned char" => .{ .index = .@"signed char" },
+ .short, .@"unsigned short" => .{ .index = .short },
+ .int, .@"unsigned int" => .{ .index = .int },
+ .long, .@"unsigned long" => .{ .index = .long },
+ .@"long long", .@"unsigned long long" => .{ .index = .@"long long" },
+ .size_t, .ptrdiff_t => .{ .index = .ptrdiff_t },
+ .uint8_t, .int8_t => .{ .index = .int8_t },
+ .uint16_t, .int16_t => .{ .index = .int16_t },
+ .uint32_t, .int32_t => .{ .index = .int32_t },
+ .uint64_t, .int64_t => .{ .index = .int64_t },
+ .uintptr_t, .intptr_t => .{ .index = .intptr_t },
+ .zig_u128, .zig_i128 => .{ .index = .zig_i128 },
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => ctype,
+ else => unreachable,
+ };
+}
+
+pub fn toUnsigned(ctype: CType) CType {
+ return switch (ctype.index) {
+ .char, .@"signed char", .@"unsigned char" => .{ .index = .@"unsigned char" },
+ .short, .@"unsigned short" => .{ .index = .@"unsigned short" },
+ .int, .@"unsigned int" => .{ .index = .@"unsigned int" },
+ .long, .@"unsigned long" => .{ .index = .@"unsigned long" },
+ .@"long long", .@"unsigned long long" => .{ .index = .@"unsigned long long" },
+ .size_t, .ptrdiff_t => .{ .index = .size_t },
+ .uint8_t, .int8_t => .{ .index = .uint8_t },
+ .uint16_t, .int16_t => .{ .index = .uint16_t },
+ .uint32_t, .int32_t => .{ .index = .uint32_t },
+ .uint64_t, .int64_t => .{ .index = .uint64_t },
+ .uintptr_t, .intptr_t => .{ .index = .uintptr_t },
+ .zig_u128, .zig_i128 => .{ .index = .zig_u128 },
+ else => unreachable,
+ };
+}
+
+pub fn toSignedness(ctype: CType, s: std.builtin.Signedness) CType {
+ return switch (s) {
+ .unsigned => ctype.toUnsigned(),
+ .signed => ctype.toSigned(),
+ };
+}
+
+pub fn getStandardDefineAbbrev(ctype: CType) ?[]const u8 {
+ return switch (ctype.index) {
+ .char => "CHAR",
+ .@"signed char" => "SCHAR",
+ .short => "SHRT",
+ .int => "INT",
+ .long => "LONG",
+ .@"long long" => "LLONG",
+ .@"unsigned char" => "UCHAR",
+ .@"unsigned short" => "USHRT",
+ .@"unsigned int" => "UINT",
+ .@"unsigned long" => "ULONG",
+ .@"unsigned long long" => "ULLONG",
+ .float => "FLT",
+ .double => "DBL",
+ .@"long double" => "LDBL",
+ .size_t => "SIZE",
+ .ptrdiff_t => "PTRDIFF",
+ .uint8_t => "UINT8",
+ .int8_t => "INT8",
+ .uint16_t => "UINT16",
+ .int16_t => "INT16",
+ .uint32_t => "UINT32",
+ .int32_t => "INT32",
+ .uint64_t => "UINT64",
+ .int64_t => "INT64",
+ .uintptr_t => "UINTPTR",
+ .intptr_t => "INTPTR",
+ else => null,
+ };
+}
+
+pub fn renderLiteralPrefix(ctype: CType, writer: anytype, kind: Kind, pool: *const Pool) @TypeOf(writer).Error!void {
+ switch (ctype.info(pool)) {
+ .basic => |basic_info| switch (basic_info) {
+ .void => unreachable,
+ ._Bool,
+ .char,
+ .@"signed char",
+ .short,
+ .@"unsigned short",
+ .bool,
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ => switch (kind) {
+ else => try writer.print("({s})", .{@tagName(basic_info)}),
+ .global => {},
+ },
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .float,
+ .double,
+ .@"long double",
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ => try writer.print("{s}_C(", .{ctype.getStandardDefineAbbrev().?}),
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => try writer.print("zig_{s}_{s}(", .{
+ switch (kind) {
+ else => "make",
+ .global => "init",
+ },
+ @tagName(basic_info)["zig_".len..],
+ }),
+ .va_list => unreachable,
+ _ => unreachable,
+ },
+ .array, .vector => try writer.writeByte('{'),
+ else => unreachable,
+ }
+}
+
+pub fn renderLiteralSuffix(ctype: CType, writer: anytype, pool: *const Pool) @TypeOf(writer).Error!void {
+ switch (ctype.info(pool)) {
+ .basic => |basic_info| switch (basic_info) {
+ .void => unreachable,
+ ._Bool => {},
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ => {},
+ .long => try writer.writeByte('l'),
+ .@"long long" => try writer.writeAll("ll"),
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ => try writer.writeByte('u'),
+ .@"unsigned long",
+ .size_t,
+ .uintptr_t,
+ => try writer.writeAll("ul"),
+ .@"unsigned long long" => try writer.writeAll("ull"),
+ .float => try writer.writeByte('f'),
+ .double => {},
+ .@"long double" => try writer.writeByte('l'),
+ .bool,
+ .ptrdiff_t,
+ .intptr_t,
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => try writer.writeByte(')'),
+ .va_list => unreachable,
+ _ => unreachable,
+ },
+ .array, .vector => try writer.writeByte('}'),
+ else => unreachable,
+ }
+}
+
+pub fn floatActiveBits(ctype: CType, mod: *Module) u16 {
+ const target = &mod.resolved_target.result;
+ return switch (ctype.index) {
+ .float => target.c_type_bit_size(.float),
+ .double => target.c_type_bit_size(.double),
+ .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble),
+ .zig_f16 => 16,
+ .zig_f32 => 32,
+ .zig_f64 => 64,
+ .zig_f80 => 80,
+ .zig_f128 => 128,
+ else => unreachable,
+ };
+}
+
+pub fn byteSize(ctype: CType, pool: *const Pool, mod: *Module) u64 {
+ const target = &mod.resolved_target.result;
+ return switch (ctype.info(pool)) {
+ .basic => |basic_info| switch (basic_info) {
+ .void => 0,
+ .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1,
+ .short => target.c_type_byte_size(.short),
+ .int => target.c_type_byte_size(.int),
+ .long => target.c_type_byte_size(.long),
+ .@"long long" => target.c_type_byte_size(.longlong),
+ .@"unsigned short" => target.c_type_byte_size(.ushort),
+ .@"unsigned int" => target.c_type_byte_size(.uint),
+ .@"unsigned long" => target.c_type_byte_size(.ulong),
+ .@"unsigned long long" => target.c_type_byte_size(.ulonglong),
+ .float => target.c_type_byte_size(.float),
+ .double => target.c_type_byte_size(.double),
+ .@"long double" => target.c_type_byte_size(.longdouble),
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ => @divExact(target.ptrBitWidth(), 8),
+ .uint16_t, .int16_t, .zig_f16 => 2,
+ .uint32_t, .int32_t, .zig_f32 => 4,
+ .uint64_t, .int64_t, .zig_f64 => 8,
+ .zig_u128, .zig_i128, .zig_f128 => 16,
+ .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80)
+ target.c_type_byte_size(.longdouble)
+ else
+ 16,
+ .zig_c_longdouble => target.c_type_byte_size(.longdouble),
+ .va_list => unreachable,
+ _ => unreachable,
+ },
+ .pointer => @divExact(target.ptrBitWidth(), 8),
+ .array, .vector => |sequence_info| sequence_info.elem_ctype.byteSize(pool, mod) * sequence_info.len,
+ else => unreachable,
+ };
+}
+
+pub fn info(ctype: CType, pool: *const Pool) Info {
+ const pool_index = ctype.toPoolIndex() orelse return .{ .basic = ctype.index };
+ const item = pool.items.get(pool_index);
+ switch (item.tag) {
+ .basic => unreachable,
+ .pointer => return .{ .pointer = .{
+ .elem_ctype = .{ .index = @enumFromInt(item.data) },
+ } },
+ .pointer_const => return .{ .pointer = .{
+ .elem_ctype = .{ .index = @enumFromInt(item.data) },
+ .@"const" = true,
+ } },
+ .pointer_volatile => return .{ .pointer = .{
+ .elem_ctype = .{ .index = @enumFromInt(item.data) },
+ .@"volatile" = true,
+ } },
+ .pointer_const_volatile => return .{ .pointer = .{
+ .elem_ctype = .{ .index = @enumFromInt(item.data) },
+ .@"const" = true,
+ .@"volatile" = true,
+ } },
+ .aligned => {
+ const extra = pool.getExtra(Pool.Aligned, item.data);
+ return .{ .aligned = .{
+ .ctype = .{ .index = extra.ctype },
+ .alignas = extra.flags.alignas,
+ } };
+ },
+ .array_small => {
+ const extra = pool.getExtra(Pool.SequenceSmall, item.data);
+ return .{ .array = .{
+ .elem_ctype = .{ .index = extra.elem_ctype },
+ .len = extra.len,
+ } };
+ },
+ .array_large => {
+ const extra = pool.getExtra(Pool.SequenceLarge, item.data);
+ return .{ .array = .{
+ .elem_ctype = .{ .index = extra.elem_ctype },
+ .len = extra.len(),
+ } };
+ },
+ .vector => {
+ const extra = pool.getExtra(Pool.SequenceSmall, item.data);
+ return .{ .vector = .{
+ .elem_ctype = .{ .index = extra.elem_ctype },
+ .len = extra.len,
+ } };
+ },
+ .fwd_decl_struct_anon => {
+ const extra_trail = pool.getExtraTrail(Pool.FwdDeclAnon, item.data);
+ return .{ .fwd_decl = .{
+ .tag = .@"struct",
+ .name = .{ .anon = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ } },
+ } };
+ },
+ .fwd_decl_union_anon => {
+ const extra_trail = pool.getExtraTrail(Pool.FwdDeclAnon, item.data);
+ return .{ .fwd_decl = .{
+ .tag = .@"union",
+ .name = .{ .anon = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ } },
+ } };
+ },
+ .fwd_decl_struct => return .{ .fwd_decl = .{
+ .tag = .@"struct",
+ .name = .{ .owner_decl = @enumFromInt(item.data) },
+ } },
+ .fwd_decl_union => return .{ .fwd_decl = .{
+ .tag = .@"union",
+ .name = .{ .owner_decl = @enumFromInt(item.data) },
+ } },
+ .aggregate_struct_anon => {
+ const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"struct",
+ .name = .{ .anon = .{
+ .owner_decl = extra_trail.extra.owner_decl,
+ .id = extra_trail.extra.id,
+ } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_union_anon => {
+ const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"union",
+ .name = .{ .anon = .{
+ .owner_decl = extra_trail.extra.owner_decl,
+ .id = extra_trail.extra.id,
+ } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_struct_packed_anon => {
+ const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"struct",
+ .@"packed" = true,
+ .name = .{ .anon = .{
+ .owner_decl = extra_trail.extra.owner_decl,
+ .id = extra_trail.extra.id,
+ } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_union_packed_anon => {
+ const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"union",
+ .@"packed" = true,
+ .name = .{ .anon = .{
+ .owner_decl = extra_trail.extra.owner_decl,
+ .id = extra_trail.extra.id,
+ } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_struct => {
+ const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"struct",
+ .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_union => {
+ const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"union",
+ .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_struct_packed => {
+ const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"struct",
+ .@"packed" = true,
+ .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .aggregate_union_packed => {
+ const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data);
+ return .{ .aggregate = .{
+ .tag = .@"union",
+ .@"packed" = true,
+ .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } },
+ .fields = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.fields_len,
+ },
+ } };
+ },
+ .function => {
+ const extra_trail = pool.getExtraTrail(Pool.Function, item.data);
+ return .{ .function = .{
+ .return_ctype = .{ .index = extra_trail.extra.return_ctype },
+ .param_ctypes = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.param_ctypes_len,
+ },
+ .varargs = false,
+ } };
+ },
+ .function_varargs => {
+ const extra_trail = pool.getExtraTrail(Pool.Function, item.data);
+ return .{ .function = .{
+ .return_ctype = .{ .index = extra_trail.extra.return_ctype },
+ .param_ctypes = .{
+ .extra_index = extra_trail.trail.extra_index,
+ .len = extra_trail.extra.param_ctypes_len,
+ },
+ .varargs = true,
+ } };
+ },
+ }
+}
+
+pub fn hash(ctype: CType, pool: *const Pool) Pool.Map.Hash {
+ return if (ctype.toPoolIndex()) |pool_index|
+ pool.map.entries.items(.hash)[pool_index]
+ else
+ CType.Index.basic_hashes[@intFromEnum(ctype.index)];
+}
+
+fn toForward(ctype: CType, pool: *Pool, allocator: std.mem.Allocator) !CType {
+ return switch (ctype.info(pool)) {
+ .basic, .pointer, .fwd_decl => ctype,
+ .aligned => |aligned_info| pool.getAligned(allocator, .{
+ .ctype = try aligned_info.ctype.toForward(pool, allocator),
+ .alignas = aligned_info.alignas,
+ }),
+ .array => |array_info| pool.getArray(allocator, .{
+ .elem_ctype = try array_info.elem_ctype.toForward(pool, allocator),
+ .len = array_info.len,
+ }),
+ .vector => |vector_info| pool.getVector(allocator, .{
+ .elem_ctype = try vector_info.elem_ctype.toForward(pool, allocator),
+ .len = vector_info.len,
+ }),
+ .aggregate => |aggregate_info| switch (aggregate_info.name) {
+ .anon => ctype,
+ .fwd_decl => |fwd_decl| fwd_decl,
+ },
+ .function => unreachable,
+ };
+}
+
+const Index = enum(u32) {
+ void,
+
+ // C basic types
+ char,
+
+ @"signed char",
+ short,
+ int,
+ long,
+ @"long long",
+
+ _Bool,
+ @"unsigned char",
+ @"unsigned short",
+ @"unsigned int",
+ @"unsigned long",
+ @"unsigned long long",
+
+ float,
+ double,
+ @"long double",
+
+ // C header types
+ // - stdbool.h
+ bool,
+ // - stddef.h
+ size_t,
+ ptrdiff_t,
+ // - stdint.h
+ uint8_t,
+ int8_t,
+ uint16_t,
+ int16_t,
+ uint32_t,
+ int32_t,
+ uint64_t,
+ int64_t,
+ uintptr_t,
+ intptr_t,
+ // - stdarg.h
+ va_list,
+
+ // zig.h types
+ zig_u128,
+ zig_i128,
+ zig_f16,
+ zig_f32,
+ zig_f64,
+ zig_f80,
+ zig_f128,
+ zig_c_longdouble,
+
+ _,
+
+ const first_pool_index: u32 = @typeInfo(CType.Index).Enum.fields.len;
+ const basic_hashes = init: {
+ @setEvalBranchQuota(1_600);
+ var basic_hashes_init: [first_pool_index]Pool.Map.Hash = undefined;
+ for (&basic_hashes_init, 0..) |*basic_hash, index| {
+ const ctype_index: CType.Index = @enumFromInt(index);
+ var hasher = Pool.Hasher.init;
+ hasher.update(@intFromEnum(ctype_index));
+ basic_hash.* = hasher.final(.basic);
+ }
+ break :init basic_hashes_init;
+ };
+};
+
+const Slice = struct {
+ extra_index: Pool.ExtraIndex,
+ len: u32,
+
+ pub fn at(slice: CType.Slice, index: usize, pool: *const Pool) CType {
+ var extra: Pool.ExtraTrail = .{ .extra_index = slice.extra_index };
+ return .{ .index = extra.next(slice.len, CType.Index, pool)[index] };
+ }
+};
+
+pub const Kind = enum {
+ forward,
+ forward_parameter,
+ complete,
+ global,
+ parameter,
+
+ pub fn isForward(kind: Kind) bool {
+ return switch (kind) {
+ .forward, .forward_parameter => true,
+ .complete, .global, .parameter => false,
+ };
+ }
+
+ pub fn isParameter(kind: Kind) bool {
+ return switch (kind) {
+ .forward_parameter, .parameter => true,
+ .forward, .complete, .global => false,
+ };
+ }
+
+ pub fn asParameter(kind: Kind) Kind {
+ return switch (kind) {
+ .forward, .forward_parameter => .forward_parameter,
+ .complete, .parameter, .global => .parameter,
+ };
+ }
+
+ pub fn noParameter(kind: Kind) Kind {
+ return switch (kind) {
+ .forward, .forward_parameter => .forward,
+ .complete, .parameter => .complete,
+ .global => .global,
+ };
+ }
+};
+
+pub const String = struct {
+ index: String.Index,
+
+ const Index = enum(u32) {
+ _,
+ };
+
+ pub fn slice(string: String, pool: *const Pool) []const u8 {
+ const start = pool.string_indices.items[@intFromEnum(string.index)];
+ const end = pool.string_indices.items[@intFromEnum(string.index) + 1];
+ return pool.string_bytes.items[start..end];
+ }
+};
+
+pub const Info = union(enum) {
+ basic: CType.Index,
+ pointer: Pointer,
+ aligned: Aligned,
+ array: Sequence,
+ vector: Sequence,
+ fwd_decl: FwdDecl,
+ aggregate: Aggregate,
+ function: Function,
+
+ const Tag = @typeInfo(Info).Union.tag_type.?;
+
+ pub const Pointer = struct {
+ elem_ctype: CType,
+ @"const": bool = false,
+ @"volatile": bool = false,
+
+ fn tag(pointer_info: Pointer) Pool.Tag {
+ return @enumFromInt(@intFromEnum(Pool.Tag.pointer) +
+ @as(u2, @bitCast(packed struct(u2) {
+ @"const": bool,
+ @"volatile": bool,
+ }{
+ .@"const" = pointer_info.@"const",
+ .@"volatile" = pointer_info.@"volatile",
+ })));
+ }
+ };
+
+ pub const Aligned = struct {
+ ctype: CType,
+ alignas: AlignAs,
+ };
+
+ pub const Sequence = struct {
+ elem_ctype: CType,
+ len: u64,
+ };
+
+ pub const AggregateTag = enum { @"enum", @"struct", @"union" };
+
+ pub const Field = struct {
+ name: String,
+ ctype: CType,
+ alignas: AlignAs,
+
+ pub const Slice = struct {
+ extra_index: Pool.ExtraIndex,
+ len: u32,
+
+ pub fn at(slice: Field.Slice, index: usize, pool: *const Pool) Field {
+ assert(index < slice.len);
+ const extra = pool.getExtra(Pool.Field, @intCast(slice.extra_index +
+ index * @typeInfo(Pool.Field).Struct.fields.len));
+ return .{
+ .name = .{ .index = extra.name },
+ .ctype = .{ .index = extra.ctype },
+ .alignas = extra.flags.alignas,
+ };
+ }
+
+ fn eqlAdapted(
+ lhs_slice: Field.Slice,
+ lhs_pool: *const Pool,
+ rhs_slice: Field.Slice,
+ rhs_pool: *const Pool,
+ pool_adapter: anytype,
+ ) bool {
+ if (lhs_slice.len != rhs_slice.len) return false;
+ for (0..lhs_slice.len) |index| {
+ if (!lhs_slice.at(index, lhs_pool).eqlAdapted(
+ lhs_pool,
+ rhs_slice.at(index, rhs_pool),
+ rhs_pool,
+ pool_adapter,
+ )) return false;
+ }
+ return true;
+ }
+ };
+
+ fn eqlAdapted(
+ lhs_field: Field,
+ lhs_pool: *const Pool,
+ rhs_field: Field,
+ rhs_pool: *const Pool,
+ pool_adapter: anytype,
+ ) bool {
+ return std.meta.eql(lhs_field.alignas, rhs_field.alignas) and
+ pool_adapter.eql(lhs_field.ctype, rhs_field.ctype) and std.mem.eql(
+ u8,
+ lhs_field.name.slice(lhs_pool),
+ rhs_field.name.slice(rhs_pool),
+ );
+ }
+ };
+
+ pub const FwdDecl = struct {
+ tag: AggregateTag,
+ name: union(enum) {
+ anon: Field.Slice,
+ owner_decl: DeclIndex,
+ },
+ };
+
+ pub const Aggregate = struct {
+ tag: AggregateTag,
+ @"packed": bool = false,
+ name: union(enum) {
+ anon: struct {
+ owner_decl: DeclIndex,
+ id: u32,
+ },
+ fwd_decl: CType,
+ },
+ fields: Field.Slice,
+ };
+
+ pub const Function = struct {
+ return_ctype: CType,
+ param_ctypes: CType.Slice,
+ varargs: bool = false,
+ };
+
+ pub fn eqlAdapted(
+ lhs_info: Info,
+ lhs_pool: *const Pool,
+ rhs_ctype: CType,
+ rhs_pool: *const Pool,
+ pool_adapter: anytype,
+ ) bool {
+ const rhs_info = rhs_ctype.info(rhs_pool);
+ if (@as(Info.Tag, lhs_info) != @as(Info.Tag, rhs_info)) return false;
+ return switch (lhs_info) {
+ .basic => |lhs_basic_info| lhs_basic_info == rhs_info.basic,
+ .pointer => |lhs_pointer_info| lhs_pointer_info.@"const" == rhs_info.pointer.@"const" and
+ lhs_pointer_info.@"volatile" == rhs_info.pointer.@"volatile" and
+ pool_adapter.eql(lhs_pointer_info.elem_ctype, rhs_info.pointer.elem_ctype),
+ .aligned => |lhs_aligned_info| std.meta.eql(lhs_aligned_info.alignas, rhs_info.aligned.alignas) and
+ pool_adapter.eql(lhs_aligned_info.ctype, rhs_info.aligned.ctype),
+ .array => |lhs_array_info| lhs_array_info.len == rhs_info.array.len and
+ pool_adapter.eql(lhs_array_info.elem_ctype, rhs_info.array.elem_ctype),
+ .vector => |lhs_vector_info| lhs_vector_info.len == rhs_info.vector.len and
+ pool_adapter.eql(lhs_vector_info.elem_ctype, rhs_info.vector.elem_ctype),
+ .fwd_decl => |lhs_fwd_decl_info| lhs_fwd_decl_info.tag == rhs_info.fwd_decl.tag and
+ switch (lhs_fwd_decl_info.name) {
+ .anon => |lhs_anon| rhs_info.fwd_decl.name == .anon and lhs_anon.eqlAdapted(
+ lhs_pool,
+ rhs_info.fwd_decl.name.anon,
+ rhs_pool,
+ pool_adapter,
+ ),
+ .owner_decl => |lhs_owner_decl| rhs_info.fwd_decl.name == .owner_decl and
+ lhs_owner_decl == rhs_info.fwd_decl.name.owner_decl,
+ },
+ .aggregate => |lhs_aggregate_info| lhs_aggregate_info.tag == rhs_info.aggregate.tag and
+ lhs_aggregate_info.@"packed" == rhs_info.aggregate.@"packed" and
+ switch (lhs_aggregate_info.name) {
+ .anon => |lhs_anon| rhs_info.aggregate.name == .anon and
+ lhs_anon.owner_decl == rhs_info.aggregate.name.anon.owner_decl and
+ lhs_anon.id == rhs_info.aggregate.name.anon.id,
+ .fwd_decl => |lhs_fwd_decl| rhs_info.aggregate.name == .fwd_decl and
+ pool_adapter.eql(lhs_fwd_decl, rhs_info.aggregate.name.fwd_decl),
+ } and lhs_aggregate_info.fields.eqlAdapted(
+ lhs_pool,
+ rhs_info.aggregate.fields,
+ rhs_pool,
+ pool_adapter,
+ ),
+ .function => |lhs_function_info| lhs_function_info.param_ctypes.len ==
+ rhs_info.function.param_ctypes.len and
+ pool_adapter.eql(lhs_function_info.return_ctype, rhs_info.function.return_ctype) and
+ for (0..lhs_function_info.param_ctypes.len) |param_index|
+ {
+ if (!pool_adapter.eql(
+ lhs_function_info.param_ctypes.at(param_index, lhs_pool),
+ rhs_info.function.param_ctypes.at(param_index, rhs_pool),
+ )) break false;
+ } else true,
+ };
+ }
+};
+
+pub const Pool = struct {
+ map: Map,
+ items: std.MultiArrayList(Item),
+ extra: std.ArrayListUnmanaged(u32),
+
+ string_map: Map,
+ string_indices: std.ArrayListUnmanaged(u32),
+ string_bytes: std.ArrayListUnmanaged(u8),
+
+ const Map = std.AutoArrayHashMapUnmanaged(void, void);
+
+ pub const empty: Pool = .{
+ .map = .{},
+ .items = .{},
+ .extra = .{},
+
+ .string_map = .{},
+ .string_indices = .{},
+ .string_bytes = .{},
+ };
+
+ pub fn init(pool: *Pool, allocator: std.mem.Allocator) !void {
+ if (pool.string_indices.items.len == 0)
+ try pool.string_indices.append(allocator, 0);
+ }
+
+ pub fn deinit(pool: *Pool, allocator: std.mem.Allocator) void {
+ pool.map.deinit(allocator);
+ pool.items.deinit(allocator);
+ pool.extra.deinit(allocator);
+
+ pool.string_map.deinit(allocator);
+ pool.string_indices.deinit(allocator);
+ pool.string_bytes.deinit(allocator);
+
+ pool.* = undefined;
+ }
+
+ pub fn move(pool: *Pool) Pool {
+ defer pool.* = empty;
+ return pool.*;
+ }
+
+ pub fn clearRetainingCapacity(pool: *Pool) void {
+ pool.map.clearRetainingCapacity();
+ pool.items.shrinkRetainingCapacity(0);
+ pool.extra.clearRetainingCapacity();
+
+ pool.string_map.clearRetainingCapacity();
+ pool.string_indices.shrinkRetainingCapacity(1);
+ pool.string_bytes.clearRetainingCapacity();
+ }
+
+ pub fn freeUnusedCapacity(pool: *Pool, allocator: std.mem.Allocator) void {
+ pool.map.shrinkAndFree(allocator, pool.map.count());
+ pool.items.shrinkAndFree(allocator, pool.items.len);
+ pool.extra.shrinkAndFree(allocator, pool.extra.items.len);
+
+ pool.string_map.shrinkAndFree(allocator, pool.string_map.count());
+ pool.string_indices.shrinkAndFree(allocator, pool.string_indices.items.len);
+ pool.string_bytes.shrinkAndFree(allocator, pool.string_bytes.items.len);
+ }
+
+ pub fn getPointer(pool: *Pool, allocator: std.mem.Allocator, pointer_info: Info.Pointer) !CType {
+ var hasher = Hasher.init;
+ hasher.update(pointer_info.elem_ctype.hash(pool));
+ return pool.tagData(
+ allocator,
+ hasher,
+ pointer_info.tag(),
+ @intFromEnum(pointer_info.elem_ctype.index),
+ );
+ }
+
+ pub fn getAligned(pool: *Pool, allocator: std.mem.Allocator, aligned_info: Info.Aligned) !CType {
+ return pool.tagExtra(allocator, .aligned, Aligned, .{
+ .ctype = aligned_info.ctype.index,
+ .flags = .{ .alignas = aligned_info.alignas },
+ });
+ }
+
+ pub fn getArray(pool: *Pool, allocator: std.mem.Allocator, array_info: Info.Sequence) !CType {
+ return if (std.math.cast(u32, array_info.len)) |small_len|
+ pool.tagExtra(allocator, .array_small, SequenceSmall, .{
+ .elem_ctype = array_info.elem_ctype.index,
+ .len = small_len,
+ })
+ else
+ pool.tagExtra(allocator, .array_large, SequenceLarge, .{
+ .elem_ctype = array_info.elem_ctype.index,
+ .len_lo = @truncate(array_info.len >> 0),
+ .len_hi = @truncate(array_info.len >> 32),
+ });
+ }
+
+ pub fn getVector(pool: *Pool, allocator: std.mem.Allocator, vector_info: Info.Sequence) !CType {
+ return pool.tagExtra(allocator, .vector, SequenceSmall, .{
+ .elem_ctype = vector_info.elem_ctype.index,
+ .len = @intCast(vector_info.len),
+ });
+ }
+
+ pub fn getFwdDecl(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ fwd_decl_info: struct {
+ tag: Info.AggregateTag,
+ name: union(enum) {
+ anon: []const Info.Field,
+ owner_decl: DeclIndex,
+ },
+ },
+ ) !CType {
+ var hasher = Hasher.init;
+ switch (fwd_decl_info.name) {
+ .anon => |fields| {
+ const ExpectedContents = [32]CType;
+ var stack align(@max(
+ @alignOf(std.heap.StackFallbackAllocator(0)),
+ @alignOf(ExpectedContents),
+ )) = std.heap.stackFallback(@sizeOf(ExpectedContents), allocator);
+ const stack_allocator = stack.get();
+ const field_ctypes = try stack_allocator.alloc(CType, fields.len);
+ defer stack_allocator.free(field_ctypes);
+ for (field_ctypes, fields) |*field_ctype, field|
+ field_ctype.* = try field.ctype.toForward(pool, allocator);
+ const extra: FwdDeclAnon = .{ .fields_len = @intCast(fields.len) };
+ const extra_index = try pool.addExtra(
+ allocator,
+ FwdDeclAnon,
+ extra,
+ fields.len * @typeInfo(Field).Struct.fields.len,
+ );
+ for (fields, field_ctypes) |field, field_ctype| pool.addHashedExtraAssumeCapacity(
+ &hasher,
+ Field,
+ .{
+ .name = field.name.index,
+ .ctype = field_ctype.index,
+ .flags = .{ .alignas = field.alignas },
+ },
+ );
+ hasher.updateExtra(FwdDeclAnon, extra, pool);
+ return pool.tagTrailingExtra(allocator, hasher, switch (fwd_decl_info.tag) {
+ .@"struct" => .fwd_decl_struct_anon,
+ .@"union" => .fwd_decl_union_anon,
+ .@"enum" => unreachable,
+ }, extra_index);
+ },
+ .owner_decl => |owner_decl| {
+ hasher.update(owner_decl);
+ return pool.tagData(allocator, hasher, switch (fwd_decl_info.tag) {
+ .@"struct" => .fwd_decl_struct,
+ .@"union" => .fwd_decl_union,
+ .@"enum" => unreachable,
+ }, @intFromEnum(owner_decl));
+ },
+ }
+ }
+
+ pub fn getAggregate(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ aggregate_info: struct {
+ tag: Info.AggregateTag,
+ @"packed": bool = false,
+ name: union(enum) {
+ anon: struct {
+ owner_decl: DeclIndex,
+ id: u32,
+ },
+ fwd_decl: CType,
+ },
+ fields: []const Info.Field,
+ },
+ ) !CType {
+ var hasher = Hasher.init;
+ switch (aggregate_info.name) {
+ .anon => |anon| {
+ const extra: AggregateAnon = .{
+ .owner_decl = anon.owner_decl,
+ .id = anon.id,
+ .fields_len = @intCast(aggregate_info.fields.len),
+ };
+ const extra_index = try pool.addExtra(
+ allocator,
+ AggregateAnon,
+ extra,
+ aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len,
+ );
+ for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{
+ .name = field.name.index,
+ .ctype = field.ctype.index,
+ .flags = .{ .alignas = field.alignas },
+ });
+ hasher.updateExtra(AggregateAnon, extra, pool);
+ return pool.tagTrailingExtra(allocator, hasher, switch (aggregate_info.tag) {
+ .@"struct" => switch (aggregate_info.@"packed") {
+ false => .aggregate_struct_anon,
+ true => .aggregate_struct_packed_anon,
+ },
+ .@"union" => switch (aggregate_info.@"packed") {
+ false => .aggregate_union_anon,
+ true => .aggregate_union_packed_anon,
+ },
+ .@"enum" => unreachable,
+ }, extra_index);
+ },
+ .fwd_decl => |fwd_decl| {
+ const extra: Aggregate = .{
+ .fwd_decl = fwd_decl.index,
+ .fields_len = @intCast(aggregate_info.fields.len),
+ };
+ const extra_index = try pool.addExtra(
+ allocator,
+ Aggregate,
+ extra,
+ aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len,
+ );
+ for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{
+ .name = field.name.index,
+ .ctype = field.ctype.index,
+ .flags = .{ .alignas = field.alignas },
+ });
+ hasher.updateExtra(Aggregate, extra, pool);
+ return pool.tagTrailingExtra(allocator, hasher, switch (aggregate_info.tag) {
+ .@"struct" => switch (aggregate_info.@"packed") {
+ false => .aggregate_struct,
+ true => .aggregate_struct_packed,
+ },
+ .@"union" => switch (aggregate_info.@"packed") {
+ false => .aggregate_union,
+ true => .aggregate_union_packed,
+ },
+ .@"enum" => unreachable,
+ }, extra_index);
+ },
+ }
+ }
+
+ pub fn getFunction(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ function_info: struct {
+ return_ctype: CType,
+ param_ctypes: []const CType,
+ varargs: bool = false,
+ },
+ ) !CType {
+ var hasher = Hasher.init;
+ const extra: Function = .{
+ .return_ctype = function_info.return_ctype.index,
+ .param_ctypes_len = @intCast(function_info.param_ctypes.len),
+ };
+ const extra_index = try pool.addExtra(allocator, Function, extra, function_info.param_ctypes.len);
+ for (function_info.param_ctypes) |param_ctype| {
+ hasher.update(param_ctype.hash(pool));
+ pool.extra.appendAssumeCapacity(@intFromEnum(param_ctype.index));
+ }
+ hasher.updateExtra(Function, extra, pool);
+ return pool.tagTrailingExtra(allocator, hasher, switch (function_info.varargs) {
+ false => .function,
+ true => .function_varargs,
+ }, extra_index);
+ }
+
+ pub fn fromFields(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ tag: Info.AggregateTag,
+ fields: []Info.Field,
+ kind: Kind,
+ ) !CType {
+ sortFields(fields);
+ const fwd_decl = try pool.getFwdDecl(allocator, .{
+ .tag = tag,
+ .name = .{ .anon = fields },
+ });
+ return if (kind.isForward()) fwd_decl else pool.getAggregate(allocator, .{
+ .tag = tag,
+ .name = .{ .fwd_decl = fwd_decl },
+ .fields = fields,
+ });
+ }
+
+ pub fn fromIntInfo(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ int_info: std.builtin.Type.Int,
+ mod: *Module,
+ kind: Kind,
+ ) !CType {
+ switch (int_info.bits) {
+ 0 => return .{ .index = .void },
+ 1...8 => switch (int_info.signedness) {
+ .unsigned => return .{ .index = .uint8_t },
+ .signed => return .{ .index = .int8_t },
+ },
+ 9...16 => switch (int_info.signedness) {
+ .unsigned => return .{ .index = .uint16_t },
+ .signed => return .{ .index = .int16_t },
+ },
+ 17...32 => switch (int_info.signedness) {
+ .unsigned => return .{ .index = .uint32_t },
+ .signed => return .{ .index = .int32_t },
+ },
+ 33...64 => switch (int_info.signedness) {
+ .unsigned => return .{ .index = .uint64_t },
+ .signed => return .{ .index = .int64_t },
+ },
+ 65...128 => switch (int_info.signedness) {
+ .unsigned => return .{ .index = .zig_u128 },
+ .signed => return .{ .index = .zig_i128 },
+ },
+ else => {
+ const target = &mod.resolved_target.result;
+ const abi_align = Type.intAbiAlignment(int_info.bits, target.*);
+ const abi_align_bytes = abi_align.toByteUnits().?;
+ const array_ctype = try pool.getArray(allocator, .{
+ .len = @divExact(Type.intAbiSize(int_info.bits, target.*), abi_align_bytes),
+ .elem_ctype = try pool.fromIntInfo(allocator, .{
+ .signedness = .unsigned,
+ .bits = @intCast(abi_align_bytes * 8),
+ }, mod, kind.noParameter()),
+ });
+ if (!kind.isParameter()) return array_ctype;
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "array"),
+ .ctype = array_ctype,
+ .alignas = AlignAs.fromAbiAlignment(abi_align),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+ }
+ }
+
+ pub fn fromType(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ scratch: *std.ArrayListUnmanaged(u32),
+ ty: Type,
+ zcu: *Zcu,
+ mod: *Module,
+ kind: Kind,
+ ) !CType {
+ const ip = &zcu.intern_pool;
+ switch (ty.toIntern()) {
+ .u0_type,
+ .i0_type,
+ .anyopaque_type,
+ .void_type,
+ .empty_struct_type,
+ .type_type,
+ .comptime_int_type,
+ .comptime_float_type,
+ .null_type,
+ .undefined_type,
+ .enum_literal_type,
+ => return .{ .index = .void },
+ .u1_type, .u8_type => return .{ .index = .uint8_t },
+ .i8_type => return .{ .index = .int8_t },
+ .u16_type => return .{ .index = .uint16_t },
+ .i16_type => return .{ .index = .int16_t },
+ .u29_type, .u32_type => return .{ .index = .uint32_t },
+ .i32_type => return .{ .index = .int32_t },
+ .u64_type => return .{ .index = .uint64_t },
+ .i64_type => return .{ .index = .int64_t },
+ .u80_type, .u128_type => return .{ .index = .zig_u128 },
+ .i128_type => return .{ .index = .zig_i128 },
+ .usize_type => return .{ .index = .uintptr_t },
+ .isize_type => return .{ .index = .intptr_t },
+ .c_char_type => return .{ .index = .char },
+ .c_short_type => return .{ .index = .short },
+ .c_ushort_type => return .{ .index = .@"unsigned short" },
+ .c_int_type => return .{ .index = .int },
+ .c_uint_type => return .{ .index = .@"unsigned int" },
+ .c_long_type => return .{ .index = .long },
+ .c_ulong_type => return .{ .index = .@"unsigned long" },
+ .c_longlong_type => return .{ .index = .@"long long" },
+ .c_ulonglong_type => return .{ .index = .@"unsigned long long" },
+ .c_longdouble_type => return .{ .index = .@"long double" },
+ .f16_type => return .{ .index = .zig_f16 },
+ .f32_type => return .{ .index = .zig_f32 },
+ .f64_type => return .{ .index = .zig_f64 },
+ .f80_type => return .{ .index = .zig_f80 },
+ .f128_type => return .{ .index = .zig_f128 },
+ .bool_type, .optional_noreturn_type => return .{ .index = .bool },
+ .noreturn_type,
+ .anyframe_type,
+ .generic_poison_type,
+ => unreachable,
+ .atomic_order_type,
+ .atomic_rmw_op_type,
+ .calling_convention_type,
+ .address_space_type,
+ .float_mode_type,
+ .reduce_op_type,
+ .call_modifier_type,
+ => |ip_index| return pool.fromType(
+ allocator,
+ scratch,
+ Type.fromInterned(ip.loadEnumType(ip_index).tag_ty),
+ zcu,
+ mod,
+ kind,
+ ),
+ .anyerror_type,
+ .anyerror_void_error_union_type,
+ .adhoc_inferred_error_set_type,
+ => return pool.fromIntInfo(allocator, .{
+ .signedness = .unsigned,
+ .bits = zcu.errorSetBits(),
+ }, mod, kind),
+ .manyptr_u8_type,
+ => return pool.getPointer(allocator, .{
+ .elem_ctype = .{ .index = .uint8_t },
+ }),
+ .manyptr_const_u8_type,
+ .manyptr_const_u8_sentinel_0_type,
+ => return pool.getPointer(allocator, .{
+ .elem_ctype = .{ .index = .uint8_t },
+ .@"const" = true,
+ }),
+ .single_const_pointer_to_comptime_int_type,
+ => return pool.getPointer(allocator, .{
+ .elem_ctype = .{ .index = .void },
+ .@"const" = true,
+ }),
+ .slice_const_u8_type,
+ .slice_const_u8_sentinel_0_type,
+ => {
+ const target = &mod.resolved_target.result;
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "ptr"),
+ .ctype = try pool.getPointer(allocator, .{
+ .elem_ctype = .{ .index = .uint8_t },
+ .@"const" = true,
+ }),
+ .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)),
+ },
+ .{
+ .name = try pool.string(allocator, "len"),
+ .ctype = .{ .index = .uintptr_t },
+ .alignas = AlignAs.fromAbiAlignment(
+ Type.intAbiAlignment(target.ptrBitWidth(), target.*),
+ ),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+
+ .undef,
+ .zero,
+ .zero_usize,
+ .zero_u8,
+ .one,
+ .one_usize,
+ .one_u8,
+ .four_u8,
+ .negative_one,
+ .calling_convention_c,
+ .calling_convention_inline,
+ .void_value,
+ .unreachable_value,
+ .null_value,
+ .bool_true,
+ .bool_false,
+ .empty_struct,
+ .generic_poison,
+ .var_args_param_type,
+ .none,
+ => unreachable,
+
+ //.prefetch_options_type,
+ //.export_options_type,
+ //.extern_options_type,
+ //.type_info_type,
+ //_,
+ else => |ip_index| switch (ip.indexToKey(ip_index)) {
+ .int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind),
+ .ptr_type => |ptr_info| switch (ptr_info.flags.size) {
+ .One, .Many, .C => {
+ const elem_ctype = elem_ctype: {
+ if (ptr_info.packed_offset.host_size > 0 and
+ ptr_info.flags.vector_index == .none)
+ break :elem_ctype try pool.fromIntInfo(allocator, .{
+ .signedness = .unsigned,
+ .bits = ptr_info.packed_offset.host_size * 8,
+ }, mod, .forward);
+ const elem: Info.Aligned = .{
+ .ctype = try pool.fromType(
+ allocator,
+ scratch,
+ Type.fromInterned(ptr_info.child),
+ zcu,
+ mod,
+ .forward,
+ ),
+ .alignas = AlignAs.fromAlignment(.{
+ .@"align" = ptr_info.flags.alignment,
+ .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
+ }),
+ };
+ break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
+ elem.ctype
+ else
+ try pool.getAligned(allocator, elem);
+ };
+ const elem_tag: Info.Tag = switch (elem_ctype.info(pool)) {
+ .aligned => |aligned_info| aligned_info.ctype.info(pool),
+ else => |elem_tag| elem_tag,
+ };
+ return pool.getPointer(allocator, .{
+ .elem_ctype = elem_ctype,
+ .@"const" = switch (elem_tag) {
+ .basic,
+ .pointer,
+ .aligned,
+ .array,
+ .vector,
+ .fwd_decl,
+ .aggregate,
+ => ptr_info.flags.is_const,
+ .function => false,
+ },
+ .@"volatile" = ptr_info.flags.is_volatile,
+ });
+ },
+ .Slice => {
+ const target = &mod.resolved_target.result;
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "ptr"),
+ .ctype = try pool.fromType(
+ allocator,
+ scratch,
+ Type.fromInterned(ip.slicePtrType(ip_index)),
+ zcu,
+ mod,
+ kind,
+ ),
+ .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)),
+ },
+ .{
+ .name = try pool.string(allocator, "len"),
+ .ctype = .{ .index = .uintptr_t },
+ .alignas = AlignAs.fromAbiAlignment(
+ Type.intAbiAlignment(target.ptrBitWidth(), target.*),
+ ),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+ },
+ .array_type => |array_info| {
+ const len = array_info.len + @intFromBool(array_info.sentinel != .none);
+ if (len == 0) return .{ .index = .void };
+ const elem_type = Type.fromInterned(array_info.child);
+ const elem_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ elem_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (elem_ctype.index == .void) return .{ .index = .void };
+ const array_ctype = try pool.getArray(allocator, .{
+ .elem_ctype = elem_ctype,
+ .len = array_info.len + @intFromBool(array_info.sentinel != .none),
+ });
+ if (!kind.isParameter()) return array_ctype;
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "array"),
+ .ctype = array_ctype,
+ .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+ .vector_type => |vector_info| {
+ if (vector_info.len == 0) return .{ .index = .void };
+ const elem_type = Type.fromInterned(vector_info.child);
+ const elem_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ elem_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (elem_ctype.index == .void) return .{ .index = .void };
+ const vector_ctype = try pool.getVector(allocator, .{
+ .elem_ctype = elem_ctype,
+ .len = vector_info.len,
+ });
+ if (!kind.isParameter()) return vector_ctype;
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "array"),
+ .ctype = vector_ctype,
+ .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+ .opt_type => |payload_type| {
+ if (ip.isNoReturn(payload_type)) return .{ .index = .void };
+ const payload_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ Type.fromInterned(payload_type),
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (payload_ctype.index == .void) return .{ .index = .bool };
+ switch (payload_type) {
+ .anyerror_type => return payload_ctype,
+ else => switch (ip.indexToKey(payload_type)) {
+ .ptr_type => |payload_ptr_info| if (payload_ptr_info.flags.size != .C and
+ !payload_ptr_info.flags.is_allowzero) return payload_ctype,
+ .error_set_type, .inferred_error_set_type => return payload_ctype,
+ else => {},
+ },
+ }
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "is_null"),
+ .ctype = .{ .index = .bool },
+ .alignas = AlignAs.fromAbiAlignment(.@"1"),
+ },
+ .{
+ .name = try pool.string(allocator, "payload"),
+ .ctype = payload_ctype,
+ .alignas = AlignAs.fromAbiAlignment(
+ Type.fromInterned(payload_type).abiAlignment(zcu),
+ ),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+ .anyframe_type => unreachable,
+ .error_union_type => |error_union_info| {
+ const error_set_bits = zcu.errorSetBits();
+ const error_set_ctype = try pool.fromIntInfo(allocator, .{
+ .signedness = .unsigned,
+ .bits = error_set_bits,
+ }, mod, kind);
+ if (ip.isNoReturn(error_union_info.payload_type)) return error_set_ctype;
+ const payload_type = Type.fromInterned(error_union_info.payload_type);
+ const payload_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ payload_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (payload_ctype.index == .void) return error_set_ctype;
+ const target = &mod.resolved_target.result;
+ var fields = [_]Info.Field{
+ .{
+ .name = try pool.string(allocator, "error"),
+ .ctype = error_set_ctype,
+ .alignas = AlignAs.fromAbiAlignment(
+ Type.intAbiAlignment(error_set_bits, target.*),
+ ),
+ },
+ .{
+ .name = try pool.string(allocator, "payload"),
+ .ctype = payload_ctype,
+ .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
+ },
+ };
+ return pool.fromFields(allocator, .@"struct", &fields, kind);
+ },
+ .simple_type => unreachable,
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(ip_index);
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => {
+ const fwd_decl = try pool.getFwdDecl(allocator, .{
+ .tag = .@"struct",
+ .name = .{ .owner_decl = loaded_struct.decl.unwrap().? },
+ });
+ if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
+ fwd_decl
+ else
+ .{ .index = .void };
+ const scratch_top = scratch.items.len;
+ defer scratch.shrinkRetainingCapacity(scratch_top);
+ try scratch.ensureUnusedCapacity(
+ allocator,
+ loaded_struct.field_types.len * @typeInfo(Field).Struct.fields.len,
+ );
+ var hasher = Hasher.init;
+ var tag: Pool.Tag = .aggregate_struct;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ const field_type = Type.fromInterned(
+ loaded_struct.field_types.get(ip)[field_index],
+ );
+ const field_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ field_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (field_ctype.index == .void) continue;
+ const field_name = if (loaded_struct.fieldName(ip, field_index)
+ .unwrap()) |field_name|
+ try pool.string(allocator, ip.stringToSlice(field_name))
+ else
+ try pool.fmt(allocator, "f{d}", .{field_index});
+ const field_alignas = AlignAs.fromAlignment(.{
+ .@"align" = loaded_struct.fieldAlign(ip, field_index),
+ .abi = field_type.abiAlignment(zcu),
+ });
+ pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
+ .name = field_name.index,
+ .ctype = field_ctype.index,
+ .flags = .{ .alignas = field_alignas },
+ });
+ if (field_alignas.abiOrder().compare(.lt))
+ tag = .aggregate_struct_packed;
+ }
+ const fields_len: u32 = @intCast(@divExact(
+ scratch.items.len - scratch_top,
+ @typeInfo(Field).Struct.fields.len,
+ ));
+ if (fields_len == 0) return .{ .index = .void };
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{
+ .fwd_decl = fwd_decl.index,
+ .fields_len = fields_len,
+ }, fields_len * @typeInfo(Field).Struct.fields.len);
+ pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
+ return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index);
+ },
+ .@"packed" => return pool.fromType(
+ allocator,
+ scratch,
+ Type.fromInterned(loaded_struct.backingIntType(ip).*),
+ zcu,
+ mod,
+ kind,
+ ),
+ }
+ },
+ .anon_struct_type => |anon_struct_info| {
+ const scratch_top = scratch.items.len;
+ defer scratch.shrinkRetainingCapacity(scratch_top);
+ try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len *
+ @typeInfo(Field).Struct.fields.len);
+ var hasher = Hasher.init;
+ for (0..anon_struct_info.types.len) |field_index| {
+ if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
+ const field_type = Type.fromInterned(
+ anon_struct_info.types.get(ip)[field_index],
+ );
+ const field_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ field_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (field_ctype.index == .void) continue;
+ const field_name = if (anon_struct_info.fieldName(ip, @intCast(field_index))
+ .unwrap()) |field_name|
+ try pool.string(allocator, ip.stringToSlice(field_name))
+ else
+ try pool.fmt(allocator, "f{d}", .{field_index});
+ pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
+ .name = field_name.index,
+ .ctype = field_ctype.index,
+ .flags = .{ .alignas = AlignAs.fromAbiAlignment(
+ field_type.abiAlignment(zcu),
+ ) },
+ });
+ }
+ const fields_len: u32 = @intCast(@divExact(
+ scratch.items.len - scratch_top,
+ @typeInfo(Field).Struct.fields.len,
+ ));
+ if (fields_len == 0) return .{ .index = .void };
+ if (kind.isForward()) {
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const extra_index = try pool.addHashedExtra(
+ allocator,
+ &hasher,
+ FwdDeclAnon,
+ .{ .fields_len = fields_len },
+ fields_len * @typeInfo(Field).Struct.fields.len,
+ );
+ pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
+ return pool.tagTrailingExtra(
+ allocator,
+ hasher,
+ .fwd_decl_struct_anon,
+ extra_index,
+ );
+ }
+ const fwd_decl = try pool.fromType(allocator, scratch, ty, zcu, mod, .forward);
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{
+ .fwd_decl = fwd_decl.index,
+ .fields_len = fields_len,
+ }, fields_len * @typeInfo(Field).Struct.fields.len);
+ pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
+ return pool.tagTrailingExtraAssumeCapacity(hasher, .aggregate_struct, extra_index);
+ },
+ .union_type => {
+ const loaded_union = ip.loadUnionType(ip_index);
+ switch (loaded_union.getLayout(ip)) {
+ .auto, .@"extern" => {
+ const has_tag = loaded_union.hasTag(ip);
+ const fwd_decl = try pool.getFwdDecl(allocator, .{
+ .tag = if (has_tag) .@"struct" else .@"union",
+ .name = .{ .owner_decl = loaded_union.decl },
+ });
+ if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
+ fwd_decl
+ else
+ .{ .index = .void };
+ const loaded_tag = loaded_union.loadTagType(ip);
+ const scratch_top = scratch.items.len;
+ defer scratch.shrinkRetainingCapacity(scratch_top);
+ try scratch.ensureUnusedCapacity(
+ allocator,
+ loaded_union.field_types.len * @typeInfo(Field).Struct.fields.len,
+ );
+ var hasher = Hasher.init;
+ var tag: Pool.Tag = .aggregate_union;
+ var payload_align: Alignment = .@"1";
+ for (0..loaded_union.field_types.len) |field_index| {
+ const field_type = Type.fromInterned(
+ loaded_union.field_types.get(ip)[field_index],
+ );
+ if (ip.isNoReturn(field_type.toIntern())) continue;
+ const field_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ field_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (field_ctype.index == .void) continue;
+ const field_name = try pool.string(
+ allocator,
+ ip.stringToSlice(loaded_tag.names.get(ip)[field_index]),
+ );
+ const field_alignas = AlignAs.fromAlignment(.{
+ .@"align" = loaded_union.fieldAlign(ip, @intCast(field_index)),
+ .abi = field_type.abiAlignment(zcu),
+ });
+ pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
+ .name = field_name.index,
+ .ctype = field_ctype.index,
+ .flags = .{ .alignas = field_alignas },
+ });
+ if (field_alignas.abiOrder().compare(.lt))
+ tag = .aggregate_union_packed;
+ payload_align = payload_align.maxStrict(field_alignas.@"align");
+ }
+ const fields_len: u32 = @intCast(@divExact(
+ scratch.items.len - scratch_top,
+ @typeInfo(Field).Struct.fields.len,
+ ));
+ if (!has_tag) {
+ if (fields_len == 0) return .{ .index = .void };
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const extra_index = try pool.addHashedExtra(
+ allocator,
+ &hasher,
+ Aggregate,
+ .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len },
+ fields_len * @typeInfo(Field).Struct.fields.len,
+ );
+ pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
+ return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index);
+ }
+ try pool.ensureUnusedCapacity(allocator, 2);
+ var struct_fields: [2]Info.Field = undefined;
+ var struct_fields_len: usize = 0;
+ if (loaded_tag.tag_ty != .comptime_int_type) {
+ const tag_type = Type.fromInterned(loaded_tag.tag_ty);
+ const tag_ctype: CType = try pool.fromType(
+ allocator,
+ scratch,
+ tag_type,
+ zcu,
+ mod,
+ kind.noParameter(),
+ );
+ if (tag_ctype.index != .void) {
+ struct_fields[struct_fields_len] = .{
+ .name = try pool.string(allocator, "tag"),
+ .ctype = tag_ctype,
+ .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
+ };
+ struct_fields_len += 1;
+ }
+ }
+ if (fields_len > 0) {
+ const payload_ctype = payload_ctype: {
+ const extra_index = try pool.addHashedExtra(
+ allocator,
+ &hasher,
+ AggregateAnon,
+ .{
+ .owner_decl = loaded_union.decl,
+ .id = 0,
+ .fields_len = fields_len,
+ },
+ fields_len * @typeInfo(Field).Struct.fields.len,
+ );
+ pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
+ break :payload_ctype pool.tagTrailingExtraAssumeCapacity(
+ hasher,
+ switch (tag) {
+ .aggregate_union => .aggregate_union_anon,
+ .aggregate_union_packed => .aggregate_union_packed_anon,
+ else => unreachable,
+ },
+ extra_index,
+ );
+ };
+ if (payload_ctype.index != .void) {
+ struct_fields[struct_fields_len] = .{
+ .name = try pool.string(allocator, "payload"),
+ .ctype = payload_ctype,
+ .alignas = AlignAs.fromAbiAlignment(payload_align),
+ };
+ struct_fields_len += 1;
+ }
+ }
+ if (struct_fields_len == 0) return .{ .index = .void };
+ sortFields(struct_fields[0..struct_fields_len]);
+ return pool.getAggregate(allocator, .{
+ .tag = .@"struct",
+ .name = .{ .fwd_decl = fwd_decl },
+ .fields = struct_fields[0..struct_fields_len],
+ });
+ },
+ .@"packed" => return pool.fromIntInfo(allocator, .{
+ .signedness = .unsigned,
+ .bits = @intCast(ty.bitSize(zcu)),
+ }, mod, kind),
+ }
+ },
+ .opaque_type => return .{ .index = .void },
+ .enum_type => return pool.fromType(
+ allocator,
+ scratch,
+ Type.fromInterned(ip.loadEnumType(ip_index).tag_ty),
+ zcu,
+ mod,
+ kind,
+ ),
+ .func_type => |func_info| if (func_info.is_generic) return .{ .index = .void } else {
+ const scratch_top = scratch.items.len;
+ defer scratch.shrinkRetainingCapacity(scratch_top);
+ try scratch.ensureUnusedCapacity(allocator, func_info.param_types.len);
+ var hasher = Hasher.init;
+ const return_type = Type.fromInterned(func_info.return_type);
+ const return_ctype: CType =
+ if (!ip.isNoReturn(func_info.return_type)) try pool.fromType(
+ allocator,
+ scratch,
+ return_type,
+ zcu,
+ mod,
+ kind.asParameter(),
+ ) else .{ .index = .void };
+ for (0..func_info.param_types.len) |param_index| {
+ const param_type = Type.fromInterned(
+ func_info.param_types.get(ip)[param_index],
+ );
+ const param_ctype = try pool.fromType(
+ allocator,
+ scratch,
+ param_type,
+ zcu,
+ mod,
+ kind.asParameter(),
+ );
+ if (param_ctype.index == .void) continue;
+ hasher.update(param_ctype.hash(pool));
+ scratch.appendAssumeCapacity(@intFromEnum(param_ctype.index));
+ }
+ const param_ctypes_len: u32 = @intCast(scratch.items.len - scratch_top);
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const extra_index = try pool.addHashedExtra(allocator, &hasher, Function, .{
+ .return_ctype = return_ctype.index,
+ .param_ctypes_len = param_ctypes_len,
+ }, param_ctypes_len);
+ pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
+ return pool.tagTrailingExtraAssumeCapacity(hasher, switch (func_info.is_var_args) {
+ false => .function,
+ true => .function_varargs,
+ }, extra_index);
+ },
+ .error_set_type,
+ .inferred_error_set_type,
+ => return pool.fromIntInfo(allocator, .{
+ .signedness = .unsigned,
+ .bits = zcu.errorSetBits(),
+ }, mod, kind),
+
+ .undef,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .slice,
+ .opt,
+ .aggregate,
+ .un,
+ .memoized_call,
+ => unreachable,
+ },
+ }
+ }
+
+ pub fn getOrPutAdapted(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ source_pool: *const Pool,
+ source_ctype: CType,
+ pool_adapter: anytype,
+ ) !struct { CType, bool } {
+ const tag = source_pool.items.items(.tag)[
+ source_ctype.toPoolIndex() orelse return .{ source_ctype, true }
+ ];
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const CTypeAdapter = struct {
+ pool: *const Pool,
+ source_pool: *const Pool,
+ source_info: Info,
+ pool_adapter: @TypeOf(pool_adapter),
+ pub fn hash(map_adapter: @This(), key_ctype: CType) Map.Hash {
+ return key_ctype.hash(map_adapter.source_pool);
+ }
+ pub fn eql(map_adapter: @This(), _: CType, _: void, pool_index: usize) bool {
+ return map_adapter.source_info.eqlAdapted(
+ map_adapter.source_pool,
+ CType.fromPoolIndex(pool_index),
+ map_adapter.pool,
+ map_adapter.pool_adapter,
+ );
+ }
+ };
+ const source_info = source_ctype.info(source_pool);
+ const gop = pool.map.getOrPutAssumeCapacityAdapted(source_ctype, CTypeAdapter{
+ .pool = pool,
+ .source_pool = source_pool,
+ .source_info = source_info,
+ .pool_adapter = pool_adapter,
+ });
+ errdefer _ = pool.map.pop();
+ const ctype = CType.fromPoolIndex(gop.index);
+ if (!gop.found_existing) switch (source_info) {
+ .basic => unreachable,
+ .pointer => |pointer_info| pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = @intFromEnum(pool_adapter.copy(pointer_info.elem_ctype).index),
+ }),
+ .aligned => |aligned_info| pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = try pool.addExtra(allocator, Aligned, .{
+ .ctype = pool_adapter.copy(aligned_info.ctype).index,
+ .flags = .{ .alignas = aligned_info.alignas },
+ }, 0),
+ }),
+ .array, .vector => |sequence_info| pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = switch (tag) {
+ .array_small, .vector => try pool.addExtra(allocator, SequenceSmall, .{
+ .elem_ctype = pool_adapter.copy(sequence_info.elem_ctype).index,
+ .len = @intCast(sequence_info.len),
+ }, 0),
+ .array_large => try pool.addExtra(allocator, SequenceLarge, .{
+ .elem_ctype = pool_adapter.copy(sequence_info.elem_ctype).index,
+ .len_lo = @truncate(sequence_info.len >> 0),
+ .len_hi = @truncate(sequence_info.len >> 32),
+ }, 0),
+ else => unreachable,
+ },
+ }),
+ .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) {
+ .anon => |fields| {
+ pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = try pool.addExtra(allocator, FwdDeclAnon, .{
+ .fields_len = fields.len,
+ }, fields.len * @typeInfo(Field).Struct.fields.len),
+ });
+ for (0..fields.len) |field_index| {
+ const field = fields.at(field_index, source_pool);
+ const field_name = try pool.string(allocator, field.name.slice(source_pool));
+ pool.addExtraAssumeCapacity(Field, .{
+ .name = field_name.index,
+ .ctype = pool_adapter.copy(field.ctype).index,
+ .flags = .{ .alignas = field.alignas },
+ });
+ }
+ },
+ .owner_decl => |owner_decl| pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = @intFromEnum(owner_decl),
+ }),
+ },
+ .aggregate => |aggregate_info| {
+ pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = switch (aggregate_info.name) {
+ .anon => |anon| try pool.addExtra(allocator, AggregateAnon, .{
+ .owner_decl = anon.owner_decl,
+ .id = anon.id,
+ .fields_len = aggregate_info.fields.len,
+ }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len),
+ .fwd_decl => |fwd_decl| try pool.addExtra(allocator, Aggregate, .{
+ .fwd_decl = pool_adapter.copy(fwd_decl).index,
+ .fields_len = aggregate_info.fields.len,
+ }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len),
+ },
+ });
+ for (0..aggregate_info.fields.len) |field_index| {
+ const field = aggregate_info.fields.at(field_index, source_pool);
+ const field_name = try pool.string(allocator, field.name.slice(source_pool));
+ pool.addExtraAssumeCapacity(Field, .{
+ .name = field_name.index,
+ .ctype = pool_adapter.copy(field.ctype).index,
+ .flags = .{ .alignas = field.alignas },
+ });
+ }
+ },
+ .function => |function_info| {
+ pool.items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = try pool.addExtra(allocator, Function, .{
+ .return_ctype = pool_adapter.copy(function_info.return_ctype).index,
+ .param_ctypes_len = function_info.param_ctypes.len,
+ }, function_info.param_ctypes.len),
+ });
+ for (0..function_info.param_ctypes.len) |param_index| pool.extra.appendAssumeCapacity(
+ @intFromEnum(pool_adapter.copy(
+ function_info.param_ctypes.at(param_index, source_pool),
+ ).index),
+ );
+ },
+ };
+ assert(source_info.eqlAdapted(source_pool, ctype, pool, pool_adapter));
+ assert(source_ctype.hash(source_pool) == ctype.hash(pool));
+ return .{ ctype, gop.found_existing };
+ }
+
+ pub fn string(pool: *Pool, allocator: std.mem.Allocator, str: []const u8) !String {
+ try pool.string_bytes.appendSlice(allocator, str);
+ return pool.trailingString(allocator);
+ }
+
+ pub fn fmt(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ comptime fmt_str: []const u8,
+ fmt_args: anytype,
+ ) !String {
+ try pool.string_bytes.writer(allocator).print(fmt_str, fmt_args);
+ return pool.trailingString(allocator);
+ }
+
+ fn ensureUnusedCapacity(pool: *Pool, allocator: std.mem.Allocator, len: u32) !void {
+ try pool.map.ensureUnusedCapacity(allocator, len);
+ try pool.items.ensureUnusedCapacity(allocator, len);
+ }
+
+ const Hasher = struct {
+ const Impl = std.hash.Wyhash;
+ impl: Impl,
+
+ const init: Hasher = .{ .impl = Impl.init(0) };
+
+ fn updateExtra(hasher: *Hasher, comptime Extra: type, extra: Extra, pool: *const Pool) void {
+ inline for (@typeInfo(Extra).Struct.fields) |field| {
+ const value = @field(extra, field.name);
+ hasher.update(switch (field.type) {
+ Pool.Tag, String, CType => unreachable,
+ CType.Index => (CType{ .index = value }).hash(pool),
+ String.Index => (String{ .index = value }).slice(pool),
+ else => value,
+ });
+ }
+ }
+ fn update(hasher: *Hasher, data: anytype) void {
+ switch (@TypeOf(data)) {
+ Pool.Tag => @compileError("pass tag to final"),
+ CType, CType.Index => @compileError("hash ctype.hash(pool) instead"),
+ String, String.Index => @compileError("hash string.slice(pool) instead"),
+ u32, DeclIndex, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)),
+ []const u8 => hasher.impl.update(data),
+ else => @compileError("unhandled type: " ++ @typeName(@TypeOf(data))),
+ }
+ }
+
+ fn final(hasher: Hasher, tag: Pool.Tag) Map.Hash {
+ var impl = hasher.impl;
+ impl.update(std.mem.asBytes(&tag));
+ return @truncate(impl.final());
+ }
+ };
+
+ fn tagData(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ hasher: Hasher,
+ tag: Pool.Tag,
+ data: u32,
+ ) !CType {
+ try pool.ensureUnusedCapacity(allocator, 1);
+ const Key = struct { hash: Map.Hash, tag: Pool.Tag, data: u32 };
+ const CTypeAdapter = struct {
+ pool: *const Pool,
+ pub fn hash(_: @This(), key: Key) Map.Hash {
+ return key.hash;
+ }
+ pub fn eql(ctype_adapter: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ const rhs_item = ctype_adapter.pool.items.get(rhs_index);
+ return lhs_key.tag == rhs_item.tag and lhs_key.data == rhs_item.data;
+ }
+ };
+ const gop = pool.map.getOrPutAssumeCapacityAdapted(
+ Key{ .hash = hasher.final(tag), .tag = tag, .data = data },
+ CTypeAdapter{ .pool = pool },
+ );
+ if (!gop.found_existing) pool.items.appendAssumeCapacity(.{ .tag = tag, .data = data });
+ return CType.fromPoolIndex(gop.index);
+ }
+
+ fn tagExtra(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ tag: Pool.Tag,
+ comptime Extra: type,
+ extra: Extra,
+ ) !CType {
+ var hasher = Hasher.init;
+ hasher.updateExtra(Extra, extra, pool);
+ return pool.tagTrailingExtra(
+ allocator,
+ hasher,
+ tag,
+ try pool.addExtra(allocator, Extra, extra, 0),
+ );
+ }
+
+ fn tagTrailingExtra(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ hasher: Hasher,
+ tag: Pool.Tag,
+ extra_index: ExtraIndex,
+ ) !CType {
+ try pool.ensureUnusedCapacity(allocator, 1);
+ return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index);
+ }
+
+ fn tagTrailingExtraAssumeCapacity(
+ pool: *Pool,
+ hasher: Hasher,
+ tag: Pool.Tag,
+ extra_index: ExtraIndex,
+ ) CType {
+ const Key = struct { hash: Map.Hash, tag: Pool.Tag, extra: []const u32 };
+ const CTypeAdapter = struct {
+ pool: *const Pool,
+ pub fn hash(_: @This(), key: Key) Map.Hash {
+ return key.hash;
+ }
+ pub fn eql(ctype_adapter: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ const rhs_item = ctype_adapter.pool.items.get(rhs_index);
+ if (lhs_key.tag != rhs_item.tag) return false;
+ const rhs_extra = ctype_adapter.pool.extra.items[rhs_item.data..];
+ return std.mem.startsWith(u32, rhs_extra, lhs_key.extra);
+ }
+ };
+ const gop = pool.map.getOrPutAssumeCapacityAdapted(
+ Key{ .hash = hasher.final(tag), .tag = tag, .extra = pool.extra.items[extra_index..] },
+ CTypeAdapter{ .pool = pool },
+ );
+ if (gop.found_existing)
+ pool.extra.shrinkRetainingCapacity(extra_index)
+ else
+ pool.items.appendAssumeCapacity(.{ .tag = tag, .data = extra_index });
+ return CType.fromPoolIndex(gop.index);
+ }
+
+ fn sortFields(fields: []Info.Field) void {
+ std.mem.sort(Info.Field, fields, {}, struct {
+ fn before(_: void, lhs_field: Info.Field, rhs_field: Info.Field) bool {
+ return lhs_field.alignas.order(rhs_field.alignas).compare(.gt);
+ }
+ }.before);
+ }
+
+ fn trailingString(pool: *Pool, allocator: std.mem.Allocator) !String {
+ const StringAdapter = struct {
+ pool: *const Pool,
+ pub fn hash(_: @This(), slice: []const u8) Map.Hash {
+ return @truncate(Hasher.Impl.hash(1, slice));
+ }
+ pub fn eql(string_adapter: @This(), lhs_slice: []const u8, _: void, rhs_index: usize) bool {
+ const rhs_string: String = .{ .index = @enumFromInt(rhs_index) };
+ const rhs_slice = rhs_string.slice(string_adapter.pool);
+ return std.mem.eql(u8, lhs_slice, rhs_slice);
+ }
+ };
+ try pool.string_map.ensureUnusedCapacity(allocator, 1);
+ try pool.string_indices.ensureUnusedCapacity(allocator, 1);
+
+ const start = pool.string_indices.getLast();
+ const gop = pool.string_map.getOrPutAssumeCapacityAdapted(
+ @as([]const u8, pool.string_bytes.items[start..]),
+ StringAdapter{ .pool = pool },
+ );
+ if (gop.found_existing)
+ pool.string_bytes.shrinkRetainingCapacity(start)
+ else
+ pool.string_indices.appendAssumeCapacity(@intCast(pool.string_bytes.items.len));
+ return .{ .index = @enumFromInt(gop.index) };
+ }
+
+ const Item = struct {
+ tag: Pool.Tag,
+ data: u32,
+ };
+
+ const ExtraIndex = u32;
+
+ const Tag = enum(u8) {
+ basic,
+ pointer,
+ pointer_const,
+ pointer_volatile,
+ pointer_const_volatile,
+ aligned,
+ array_small,
+ array_large,
+ vector,
+ fwd_decl_struct_anon,
+ fwd_decl_union_anon,
+ fwd_decl_struct,
+ fwd_decl_union,
+ aggregate_struct_anon,
+ aggregate_struct_packed_anon,
+ aggregate_union_anon,
+ aggregate_union_packed_anon,
+ aggregate_struct,
+ aggregate_struct_packed,
+ aggregate_union,
+ aggregate_union_packed,
+ function,
+ function_varargs,
+ };
+
+ const Aligned = struct {
+ ctype: CType.Index,
+ flags: Flags,
+
+ const Flags = packed struct(u32) {
+ alignas: AlignAs,
+ _: u20 = 0,
+ };
+ };
+
+ const SequenceSmall = struct {
+ elem_ctype: CType.Index,
+ len: u32,
+ };
+
+ const SequenceLarge = struct {
+ elem_ctype: CType.Index,
+ len_lo: u32,
+ len_hi: u32,
+
+ fn len(extra: SequenceLarge) u64 {
+ return @as(u64, extra.len_lo) << 0 |
+ @as(u64, extra.len_hi) << 32;
+ }
+ };
+
+ const Field = struct {
+ name: String.Index,
+ ctype: CType.Index,
+ flags: Flags,
+
+ const Flags = Aligned.Flags;
+ };
+
+ const FwdDeclAnon = struct {
+ fields_len: u32,
+ };
+
+ const AggregateAnon = struct {
+ owner_decl: DeclIndex,
+ id: u32,
+ fields_len: u32,
+ };
+
+ const Aggregate = struct {
+ fwd_decl: CType.Index,
+ fields_len: u32,
+ };
+
+ const Function = struct {
+ return_ctype: CType.Index,
+ param_ctypes_len: u32,
+ };
+
+ fn addExtra(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ comptime Extra: type,
+ extra: Extra,
+ trailing_len: usize,
+ ) !ExtraIndex {
+ try pool.extra.ensureUnusedCapacity(
+ allocator,
+ @typeInfo(Extra).Struct.fields.len + trailing_len,
+ );
+ defer pool.addExtraAssumeCapacity(Extra, extra);
+ return @intCast(pool.extra.items.len);
+ }
+ fn addExtraAssumeCapacity(pool: *Pool, comptime Extra: type, extra: Extra) void {
+ addExtraAssumeCapacityTo(&pool.extra, Extra, extra);
+ }
+ fn addExtraAssumeCapacityTo(
+ array: *std.ArrayListUnmanaged(u32),
+ comptime Extra: type,
+ extra: Extra,
+ ) void {
+ inline for (@typeInfo(Extra).Struct.fields) |field| {
+ const value = @field(extra, field.name);
+ array.appendAssumeCapacity(switch (field.type) {
+ u32 => value,
+ CType.Index, String.Index, DeclIndex => @intFromEnum(value),
+ Aligned.Flags => @bitCast(value),
+ else => @compileError("bad field type: " ++ field.name ++ ": " ++
+ @typeName(field.type)),
+ });
+ }
+ }
+
+ fn addHashedExtra(
+ pool: *Pool,
+ allocator: std.mem.Allocator,
+ hasher: *Hasher,
+ comptime Extra: type,
+ extra: Extra,
+ trailing_len: usize,
+ ) !ExtraIndex {
+ hasher.updateExtra(Extra, extra, pool);
+ return pool.addExtra(allocator, Extra, extra, trailing_len);
+ }
+ fn addHashedExtraAssumeCapacity(
+ pool: *Pool,
+ hasher: *Hasher,
+ comptime Extra: type,
+ extra: Extra,
+ ) void {
+ hasher.updateExtra(Extra, extra, pool);
+ pool.addExtraAssumeCapacity(Extra, extra);
+ }
+ fn addHashedExtraAssumeCapacityTo(
+ pool: *Pool,
+ array: *std.ArrayListUnmanaged(u32),
+ hasher: *Hasher,
+ comptime Extra: type,
+ extra: Extra,
+ ) void {
+ hasher.updateExtra(Extra, extra, pool);
+ addExtraAssumeCapacityTo(array, Extra, extra);
+ }
+
+ const ExtraTrail = struct {
+ extra_index: ExtraIndex,
+
+ fn next(
+ extra_trail: *ExtraTrail,
+ len: u32,
+ comptime Extra: type,
+ pool: *const Pool,
+ ) []const Extra {
+ defer extra_trail.extra_index += @intCast(len);
+ return @ptrCast(pool.extra.items[extra_trail.extra_index..][0..len]);
+ }
+ };
+
+ fn getExtraTrail(
+ pool: *const Pool,
+ comptime Extra: type,
+ extra_index: ExtraIndex,
+ ) struct { extra: Extra, trail: ExtraTrail } {
+ var extra: Extra = undefined;
+ const fields = @typeInfo(Extra).Struct.fields;
+ inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value|
+ @field(extra, field.name) = switch (field.type) {
+ u32 => value,
+ CType.Index, String.Index, DeclIndex => @enumFromInt(value),
+ Aligned.Flags => @bitCast(value),
+ else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
+ };
+ return .{
+ .extra = extra,
+ .trail = .{ .extra_index = extra_index + @as(ExtraIndex, @intCast(fields.len)) },
+ };
+ }
+
+ fn getExtra(pool: *const Pool, comptime Extra: type, extra_index: ExtraIndex) Extra {
+ return pool.getExtraTrail(Extra, extra_index).extra;
+ }
+};
+
+pub const AlignAs = packed struct {
+ @"align": Alignment,
+ abi: Alignment,
+
+ pub fn fromAlignment(alignas: AlignAs) AlignAs {
+ assert(alignas.abi != .none);
+ return .{
+ .@"align" = if (alignas.@"align" != .none) alignas.@"align" else alignas.abi,
+ .abi = alignas.abi,
+ };
+ }
+ pub fn fromAbiAlignment(abi: Alignment) AlignAs {
+ assert(abi != .none);
+ return .{ .@"align" = abi, .abi = abi };
+ }
+ pub fn fromByteUnits(@"align": u64, abi: u64) AlignAs {
+ return fromAlignment(.{
+ .@"align" = Alignment.fromByteUnits(@"align"),
+ .abi = Alignment.fromNonzeroByteUnits(abi),
+ });
+ }
+
+ pub fn order(lhs: AlignAs, rhs: AlignAs) std.math.Order {
+ return lhs.@"align".order(rhs.@"align");
+ }
+ pub fn abiOrder(alignas: AlignAs) std.math.Order {
+ return alignas.@"align".order(alignas.abi);
+ }
+ pub fn toByteUnits(alignas: AlignAs) u64 {
+ return alignas.@"align".toByteUnits().?;
+ }
+};
+
+const Alignment = @import("../../InternPool.zig").Alignment;
+const assert = std.debug.assert;
+const CType = @This();
+const DeclIndex = std.zig.DeclIndex;
+const Module = @import("../../Package/Module.zig");
+const std = @import("std");
+const Type = @import("../../type.zig").Type;
+const Zcu = @import("../../Module.zig");
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
deleted file mode 100644
index 66f92c930d99..000000000000
--- a/src/codegen/c/type.zig
+++ /dev/null
@@ -1,2318 +0,0 @@
-const std = @import("std");
-const mem = std.mem;
-const Allocator = mem.Allocator;
-const assert = std.debug.assert;
-const autoHash = std.hash.autoHash;
-const Target = std.Target;
-
-const Alignment = @import("../../InternPool.zig").Alignment;
-const Module = @import("../../Module.zig");
-const InternPool = @import("../../InternPool.zig");
-const Type = @import("../../type.zig").Type;
-
-pub const CType = extern union {
- /// If the tag value is less than Tag.no_payload_count, then no pointer
- /// dereference is needed.
- tag_if_small_enough: Tag,
- ptr_otherwise: *const Payload,
-
- pub fn initTag(small_tag: Tag) CType {
- assert(!small_tag.hasPayload());
- return .{ .tag_if_small_enough = small_tag };
- }
-
- pub fn initPayload(pl: anytype) CType {
- const T = @typeInfo(@TypeOf(pl)).Pointer.child;
- return switch (pl.base.tag) {
- inline else => |t| if (comptime t.hasPayload() and t.Type() == T) .{
- .ptr_otherwise = &pl.base,
- } else unreachable,
- };
- }
-
- pub fn hasPayload(self: CType) bool {
- return self.tag_if_small_enough.hasPayload();
- }
-
- pub fn tag(self: CType) Tag {
- return if (self.hasPayload()) self.ptr_otherwise.tag else self.tag_if_small_enough;
- }
-
- pub fn cast(self: CType, comptime T: type) ?*const T {
- if (!self.hasPayload()) return null;
- const pl = self.ptr_otherwise;
- return switch (pl.tag) {
- inline else => |t| if (comptime t.hasPayload() and t.Type() == T)
- @fieldParentPtr(T, "base", pl)
- else
- null,
- };
- }
-
- pub fn castTag(self: CType, comptime t: Tag) ?*const t.Type() {
- return if (self.tag() == t) @fieldParentPtr(t.Type(), "base", self.ptr_otherwise) else null;
- }
-
- pub const Tag = enum(usize) {
- // The first section of this enum are tags that require no payload.
- void,
-
- // C basic types
- char,
-
- @"signed char",
- short,
- int,
- long,
- @"long long",
-
- _Bool,
- @"unsigned char",
- @"unsigned short",
- @"unsigned int",
- @"unsigned long",
- @"unsigned long long",
-
- float,
- double,
- @"long double",
-
- // C header types
- // - stdbool.h
- bool,
- // - stddef.h
- size_t,
- ptrdiff_t,
- // - stdint.h
- uint8_t,
- int8_t,
- uint16_t,
- int16_t,
- uint32_t,
- int32_t,
- uint64_t,
- int64_t,
- uintptr_t,
- intptr_t,
-
- // zig.h types
- zig_u128,
- zig_i128,
- zig_f16,
- zig_f32,
- zig_f64,
- zig_f80,
- zig_f128,
- zig_c_longdouble, // Keep last_no_payload_tag updated!
-
- // After this, the tag requires a payload.
- pointer,
- pointer_const,
- pointer_volatile,
- pointer_const_volatile,
- array,
- vector,
- fwd_anon_struct,
- fwd_anon_union,
- fwd_struct,
- fwd_union,
- unnamed_struct,
- unnamed_union,
- packed_unnamed_struct,
- packed_unnamed_union,
- anon_struct,
- anon_union,
- @"struct",
- @"union",
- packed_struct,
- packed_union,
- function,
- varargs_function,
-
- pub const last_no_payload_tag = Tag.zig_c_longdouble;
- pub const no_payload_count = @intFromEnum(last_no_payload_tag) + 1;
-
- pub fn hasPayload(self: Tag) bool {
- return @intFromEnum(self) >= no_payload_count;
- }
-
- pub fn toIndex(self: Tag) Index {
- assert(!self.hasPayload());
- return @as(Index, @intCast(@intFromEnum(self)));
- }
-
- pub fn Type(comptime self: Tag) type {
- return switch (self) {
- .void,
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- ._Bool,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => @compileError("Type Tag " ++ @tagName(self) ++ " has no payload"),
-
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => Payload.Child,
-
- .array,
- .vector,
- => Payload.Sequence,
-
- .fwd_anon_struct,
- .fwd_anon_union,
- => Payload.Fields,
-
- .fwd_struct,
- .fwd_union,
- => Payload.FwdDecl,
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => Payload.Unnamed,
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => Payload.Aggregate,
-
- .function,
- .varargs_function,
- => Payload.Function,
- };
- }
- };
-
- pub const Payload = struct {
- tag: Tag,
-
- pub const Child = struct {
- base: Payload,
- data: Index,
- };
-
- pub const Sequence = struct {
- base: Payload,
- data: struct {
- len: u64,
- elem_type: Index,
- },
- };
-
- pub const FwdDecl = struct {
- base: Payload,
- data: InternPool.DeclIndex,
- };
-
- pub const Fields = struct {
- base: Payload,
- data: Data,
-
- pub const Data = []const Field;
- pub const Field = struct {
- name: [*:0]const u8,
- type: Index,
- alignas: AlignAs,
- };
- };
-
- pub const Unnamed = struct {
- base: Payload,
- data: struct {
- fields: Fields.Data,
- owner_decl: InternPool.DeclIndex,
- id: u32,
- },
- };
-
- pub const Aggregate = struct {
- base: Payload,
- data: struct {
- fields: Fields.Data,
- fwd_decl: Index,
- },
- };
-
- pub const Function = struct {
- base: Payload,
- data: struct {
- return_type: Index,
- param_types: []const Index,
- },
- };
- };
-
- pub const AlignAs = struct {
- @"align": Alignment,
- abi: Alignment,
-
- pub fn init(@"align": Alignment, abi_align: Alignment) AlignAs {
- assert(abi_align != .none);
- return .{
- .@"align" = if (@"align" != .none) @"align" else abi_align,
- .abi = abi_align,
- };
- }
-
- pub fn initByteUnits(alignment: u64, abi_alignment: u32) AlignAs {
- return init(
- Alignment.fromByteUnits(alignment),
- Alignment.fromNonzeroByteUnits(abi_alignment),
- );
- }
- pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
- const abi_align = ty.abiAlignment(mod);
- return init(abi_align, abi_align);
- }
- pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs {
- return init(
- struct_ty.structFieldAlign(field_i, mod),
- struct_ty.structFieldType(field_i, mod).abiAlignment(mod),
- );
- }
- pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs {
- const union_obj = mod.typeToUnion(union_ty).?;
- const union_payload_align = mod.unionAbiAlignment(union_obj);
- return init(union_payload_align, union_payload_align);
- }
-
- pub fn order(lhs: AlignAs, rhs: AlignAs) std.math.Order {
- return lhs.@"align".order(rhs.@"align");
- }
- pub fn abiOrder(self: AlignAs) std.math.Order {
- return self.@"align".order(self.abi);
- }
- pub fn toByteUnits(self: AlignAs) u64 {
- return self.@"align".toByteUnitsOptional().?;
- }
- };
-
- pub const Index = u32;
- pub const Store = struct {
- arena: std.heap.ArenaAllocator.State = .{},
- set: Set = .{},
-
- pub const Set = struct {
- pub const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext, true);
- const HashContext = struct {
- store: *const Set,
-
- pub fn hash(self: @This(), cty: CType) Map.Hash {
- return @as(Map.Hash, @truncate(cty.hash(self.store.*)));
- }
- pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool {
- return lhs.eql(rhs);
- }
- };
-
- map: Map = .{},
-
- pub fn indexToCType(self: Set, index: Index) CType {
- if (index < Tag.no_payload_count) return initTag(@as(Tag, @enumFromInt(index)));
- return self.map.keys()[index - Tag.no_payload_count];
- }
-
- pub fn indexToHash(self: Set, index: Index) Map.Hash {
- if (index < Tag.no_payload_count)
- return (HashContext{ .store = &self }).hash(self.indexToCType(index));
- return self.map.entries.items(.hash)[index - Tag.no_payload_count];
- }
-
- pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index {
- const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } };
-
- var convert: Convert = undefined;
- convert.initType(ty, kind, lookup) catch unreachable;
-
- const t = convert.tag();
- if (!t.hasPayload()) return t.toIndex();
-
- return if (self.map.getIndexAdapted(
- ty,
- TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert },
- )) |idx| @as(Index, @intCast(Tag.no_payload_count + idx)) else null;
- }
- };
-
- pub const Promoted = struct {
- arena: std.heap.ArenaAllocator,
- set: Set,
-
- pub fn gpa(self: *Promoted) Allocator {
- return self.arena.child_allocator;
- }
-
- pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index {
- const t = cty.tag();
- if (@intFromEnum(t) < Tag.no_payload_count) return @as(Index, @intCast(@intFromEnum(t)));
-
- const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set });
- if (!gop.found_existing) gop.key_ptr.* = cty;
- if (std.debug.runtime_safety) {
- const key = &self.set.map.entries.items(.key)[gop.index];
- assert(key == gop.key_ptr);
- assert(cty.eql(key.*));
- assert(cty.hash(self.set) == key.hash(self.set));
- }
- return @as(Index, @intCast(Tag.no_payload_count + gop.index));
- }
-
- pub fn typeToIndex(
- self: *Promoted,
- ty: Type,
- mod: *Module,
- kind: Kind,
- ) Allocator.Error!Index {
- const lookup = Convert.Lookup{ .mut = .{ .promoted = self, .mod = mod } };
-
- var convert: Convert = undefined;
- try convert.initType(ty, kind, lookup);
-
- const t = convert.tag();
- if (!t.hasPayload()) return t.toIndex();
-
- const gop = try self.set.map.getOrPutContextAdapted(
- self.gpa(),
- ty,
- TypeAdapter32{ .kind = kind, .lookup = lookup.freeze(), .convert = &convert },
- .{ .store = &self.set },
- );
- if (!gop.found_existing) {
- errdefer _ = self.set.map.pop();
- gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert);
- }
- if (std.debug.runtime_safety) {
- const adapter = TypeAdapter64{
- .kind = kind,
- .lookup = lookup.freeze(),
- .convert = &convert,
- };
- const cty = &self.set.map.entries.items(.key)[gop.index];
- assert(cty == gop.key_ptr);
- assert(adapter.eql(ty, cty.*));
- assert(adapter.hash(ty) == cty.hash(self.set));
- }
- return @as(Index, @intCast(Tag.no_payload_count + gop.index));
- }
- };
-
- pub fn promote(self: Store, gpa: Allocator) Promoted {
- return .{ .arena = self.arena.promote(gpa), .set = self.set };
- }
-
- pub fn demote(self: *Store, promoted: Promoted) void {
- self.arena = promoted.arena.state;
- self.set = promoted.set;
- }
-
- pub fn indexToCType(self: Store, index: Index) CType {
- return self.set.indexToCType(index);
- }
-
- pub fn indexToHash(self: Store, index: Index) Set.Map.Hash {
- return self.set.indexToHash(index);
- }
-
- pub fn cTypeToIndex(self: *Store, gpa: Allocator, cty: CType) !Index {
- var promoted = self.promote(gpa);
- defer self.demote(promoted);
- return promoted.cTypeToIndex(cty);
- }
-
- pub fn typeToCType(self: *Store, gpa: Allocator, ty: Type, mod: *Module, kind: Kind) !CType {
- const idx = try self.typeToIndex(gpa, ty, mod, kind);
- return self.indexToCType(idx);
- }
-
- pub fn typeToIndex(self: *Store, gpa: Allocator, ty: Type, mod: *Module, kind: Kind) !Index {
- var promoted = self.promote(gpa);
- defer self.demote(promoted);
- return promoted.typeToIndex(ty, mod, kind);
- }
-
- pub fn clearRetainingCapacity(self: *Store, gpa: Allocator) void {
- var promoted = self.promote(gpa);
- defer self.demote(promoted);
- promoted.set.map.clearRetainingCapacity();
- _ = promoted.arena.reset(.retain_capacity);
- }
-
- pub fn clearAndFree(self: *Store, gpa: Allocator) void {
- var promoted = self.promote(gpa);
- defer self.demote(promoted);
- promoted.set.map.clearAndFree(gpa);
- _ = promoted.arena.reset(.free_all);
- }
-
- pub fn shrinkRetainingCapacity(self: *Store, gpa: Allocator, new_len: usize) void {
- self.set.map.shrinkRetainingCapacity(gpa, new_len);
- }
-
- pub fn shrinkAndFree(self: *Store, gpa: Allocator, new_len: usize) void {
- self.set.map.shrinkAndFree(gpa, new_len);
- }
-
- pub fn count(self: Store) usize {
- return self.set.map.count();
- }
-
- pub fn move(self: *Store) Store {
- const moved = self.*;
- self.* = .{};
- return moved;
- }
-
- pub fn deinit(self: *Store, gpa: Allocator) void {
- var promoted = self.promote(gpa);
- promoted.set.map.deinit(gpa);
- _ = promoted.arena.deinit();
- self.* = undefined;
- }
- };
-
- pub fn isBool(self: CType) bool {
- return switch (self.tag()) {
- ._Bool,
- .bool,
- => true,
- else => false,
- };
- }
-
- pub fn isInteger(self: CType) bool {
- return switch (self.tag()) {
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- => true,
- else => false,
- };
- }
-
- pub fn signedness(self: CType, target: std.Target) std.builtin.Signedness {
- return switch (self.tag()) {
- .char => target.charSignedness(),
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- .ptrdiff_t,
- .int8_t,
- .int16_t,
- .int32_t,
- .int64_t,
- .intptr_t,
- .zig_i128,
- => .signed,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .size_t,
- .uint8_t,
- .uint16_t,
- .uint32_t,
- .uint64_t,
- .uintptr_t,
- .zig_u128,
- => .unsigned,
- else => unreachable,
- };
- }
-
- pub fn isFloat(self: CType) bool {
- return switch (self.tag()) {
- .float,
- .double,
- .@"long double",
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => true,
- else => false,
- };
- }
-
- pub fn isPointer(self: CType) bool {
- return switch (self.tag()) {
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => true,
- else => false,
- };
- }
-
- pub fn isFunction(self: CType) bool {
- return switch (self.tag()) {
- .function,
- .varargs_function,
- => true,
- else => false,
- };
- }
-
- pub fn toSigned(self: CType) CType {
- return CType.initTag(switch (self.tag()) {
- .char, .@"signed char", .@"unsigned char" => .@"signed char",
- .short, .@"unsigned short" => .short,
- .int, .@"unsigned int" => .int,
- .long, .@"unsigned long" => .long,
- .@"long long", .@"unsigned long long" => .@"long long",
- .size_t, .ptrdiff_t => .ptrdiff_t,
- .uint8_t, .int8_t => .int8_t,
- .uint16_t, .int16_t => .int16_t,
- .uint32_t, .int32_t => .int32_t,
- .uint64_t, .int64_t => .int64_t,
- .uintptr_t, .intptr_t => .intptr_t,
- .zig_u128, .zig_i128 => .zig_i128,
- .float,
- .double,
- .@"long double",
- .zig_f16,
- .zig_f32,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => |t| t,
- else => unreachable,
- });
- }
-
- pub fn toUnsigned(self: CType) CType {
- return CType.initTag(switch (self.tag()) {
- .char, .@"signed char", .@"unsigned char" => .@"unsigned char",
- .short, .@"unsigned short" => .@"unsigned short",
- .int, .@"unsigned int" => .@"unsigned int",
- .long, .@"unsigned long" => .@"unsigned long",
- .@"long long", .@"unsigned long long" => .@"unsigned long long",
- .size_t, .ptrdiff_t => .size_t,
- .uint8_t, .int8_t => .uint8_t,
- .uint16_t, .int16_t => .uint16_t,
- .uint32_t, .int32_t => .uint32_t,
- .uint64_t, .int64_t => .uint64_t,
- .uintptr_t, .intptr_t => .uintptr_t,
- .zig_u128, .zig_i128 => .zig_u128,
- else => unreachable,
- });
- }
-
- pub fn toSignedness(self: CType, s: std.builtin.Signedness) CType {
- return switch (s) {
- .unsigned => self.toUnsigned(),
- .signed => self.toSigned(),
- };
- }
-
- pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 {
- return switch (self.tag()) {
- .char => "CHAR",
- .@"signed char" => "SCHAR",
- .short => "SHRT",
- .int => "INT",
- .long => "LONG",
- .@"long long" => "LLONG",
- .@"unsigned char" => "UCHAR",
- .@"unsigned short" => "USHRT",
- .@"unsigned int" => "UINT",
- .@"unsigned long" => "ULONG",
- .@"unsigned long long" => "ULLONG",
- .float => "FLT",
- .double => "DBL",
- .@"long double" => "LDBL",
- .size_t => "SIZE",
- .ptrdiff_t => "PTRDIFF",
- .uint8_t => "UINT8",
- .int8_t => "INT8",
- .uint16_t => "UINT16",
- .int16_t => "INT16",
- .uint32_t => "UINT32",
- .int32_t => "INT32",
- .uint64_t => "UINT64",
- .int64_t => "INT64",
- .uintptr_t => "UINTPTR",
- .intptr_t => "INTPTR",
- else => null,
- };
- }
-
- pub fn renderLiteralPrefix(self: CType, writer: anytype, kind: Kind) @TypeOf(writer).Error!void {
- switch (self.tag()) {
- .void => unreachable,
- ._Bool,
- .char,
- .@"signed char",
- .short,
- .@"unsigned short",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uintptr_t,
- .intptr_t,
- => |t| switch (kind) {
- else => try writer.print("({s})", .{@tagName(t)}),
- .global => {},
- },
- .int,
- .long,
- .@"long long",
- .@"unsigned char",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- => {},
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- => try writer.print("{s}_C(", .{self.getStandardDefineAbbrev().?}),
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => |t| try writer.print("zig_{s}_{s}(", .{
- switch (kind) {
- else => "make",
- .global => "init",
- },
- @tagName(t)["zig_".len..],
- }),
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => unreachable,
- .array,
- .vector,
- => try writer.writeByte('{'),
- .fwd_anon_struct,
- .fwd_anon_union,
- .fwd_struct,
- .fwd_union,
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- .function,
- .varargs_function,
- => unreachable,
- }
- }
-
- pub fn renderLiteralSuffix(self: CType, writer: anytype) @TypeOf(writer).Error!void {
- switch (self.tag()) {
- .void => unreachable,
- ._Bool => {},
- .char,
- .@"signed char",
- .short,
- .int,
- => {},
- .long => try writer.writeByte('l'),
- .@"long long" => try writer.writeAll("ll"),
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- => try writer.writeByte('u'),
- .@"unsigned long",
- .size_t,
- .uintptr_t,
- => try writer.writeAll("ul"),
- .@"unsigned long long" => try writer.writeAll("ull"),
- .float => try writer.writeByte('f'),
- .double => {},
- .@"long double" => try writer.writeByte('l'),
- .bool,
- .ptrdiff_t,
- .intptr_t,
- => {},
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => try writer.writeByte(')'),
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => unreachable,
- .array,
- .vector,
- => try writer.writeByte('}'),
- .fwd_anon_struct,
- .fwd_anon_union,
- .fwd_struct,
- .fwd_union,
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- .function,
- .varargs_function,
- => unreachable,
- }
- }
-
- pub fn floatActiveBits(self: CType, target: Target) u16 {
- return switch (self.tag()) {
- .float => target.c_type_bit_size(.float),
- .double => target.c_type_bit_size(.double),
- .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble),
- .zig_f16 => 16,
- .zig_f32 => 32,
- .zig_f64 => 64,
- .zig_f80 => 80,
- .zig_f128 => 128,
- else => unreachable,
- };
- }
-
- pub fn byteSize(self: CType, store: Store.Set, target: Target) u64 {
- return switch (self.tag()) {
- .void => 0,
- .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1,
- .short => target.c_type_byte_size(.short),
- .int => target.c_type_byte_size(.int),
- .long => target.c_type_byte_size(.long),
- .@"long long" => target.c_type_byte_size(.longlong),
- .@"unsigned short" => target.c_type_byte_size(.ushort),
- .@"unsigned int" => target.c_type_byte_size(.uint),
- .@"unsigned long" => target.c_type_byte_size(.ulong),
- .@"unsigned long long" => target.c_type_byte_size(.ulonglong),
- .float => target.c_type_byte_size(.float),
- .double => target.c_type_byte_size(.double),
- .@"long double" => target.c_type_byte_size(.longdouble),
- .size_t,
- .ptrdiff_t,
- .uintptr_t,
- .intptr_t,
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => @divExact(target.ptrBitWidth(), 8),
- .uint16_t, .int16_t, .zig_f16 => 2,
- .uint32_t, .int32_t, .zig_f32 => 4,
- .uint64_t, .int64_t, .zig_f64 => 8,
- .zig_u128, .zig_i128, .zig_f128 => 16,
- .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80)
- target.c_type_byte_size(.longdouble)
- else
- 16,
- .zig_c_longdouble => target.c_type_byte_size(.longdouble),
-
- .array,
- .vector,
- => {
- const data = self.cast(Payload.Sequence).?.data;
- return data.len * store.indexToCType(data.elem_type).byteSize(store, target);
- },
-
- .fwd_anon_struct,
- .fwd_anon_union,
- .fwd_struct,
- .fwd_union,
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- .function,
- .varargs_function,
- => unreachable,
- };
- }
-
- pub fn isPacked(self: CType) bool {
- return switch (self.tag()) {
- else => false,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- .packed_struct,
- .packed_union,
- => true,
- };
- }
-
- pub fn fields(self: CType) Payload.Fields.Data {
- return if (self.cast(Payload.Aggregate)) |pl|
- pl.data.fields
- else if (self.cast(Payload.Unnamed)) |pl|
- pl.data.fields
- else if (self.cast(Payload.Fields)) |pl|
- pl.data
- else
- unreachable;
- }
-
- pub fn eql(lhs: CType, rhs: CType) bool {
- return lhs.eqlContext(rhs, struct {
- pub fn eqlIndex(_: @This(), lhs_idx: Index, rhs_idx: Index) bool {
- return lhs_idx == rhs_idx;
- }
- }{});
- }
-
- pub fn eqlContext(lhs: CType, rhs: CType, ctx: anytype) bool {
- // As a shortcut, if the small tags / addresses match, we're done.
- if (lhs.tag_if_small_enough == rhs.tag_if_small_enough) return true;
-
- const lhs_tag = lhs.tag();
- const rhs_tag = rhs.tag();
- if (lhs_tag != rhs_tag) return false;
-
- return switch (lhs_tag) {
- .void,
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- ._Bool,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => false,
-
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => ctx.eqlIndex(lhs.cast(Payload.Child).?.data, rhs.cast(Payload.Child).?.data),
-
- .array,
- .vector,
- => {
- const lhs_data = lhs.cast(Payload.Sequence).?.data;
- const rhs_data = rhs.cast(Payload.Sequence).?.data;
- return lhs_data.len == rhs_data.len and
- ctx.eqlIndex(lhs_data.elem_type, rhs_data.elem_type);
- },
-
- .fwd_anon_struct,
- .fwd_anon_union,
- => {
- const lhs_data = lhs.cast(Payload.Fields).?.data;
- const rhs_data = rhs.cast(Payload.Fields).?.data;
- if (lhs_data.len != rhs_data.len) return false;
- for (lhs_data, rhs_data) |lhs_field, rhs_field| {
- if (!ctx.eqlIndex(lhs_field.type, rhs_field.type)) return false;
- if (lhs_field.alignas.@"align" != rhs_field.alignas.@"align") return false;
- if (std.mem.orderZ(u8, lhs_field.name, rhs_field.name) != .eq) return false;
- }
- return true;
- },
-
- .fwd_struct,
- .fwd_union,
- => lhs.cast(Payload.FwdDecl).?.data == rhs.cast(Payload.FwdDecl).?.data,
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => {
- const lhs_data = lhs.cast(Payload.Unnamed).?.data;
- const rhs_data = rhs.cast(Payload.Unnamed).?.data;
- return lhs_data.owner_decl == rhs_data.owner_decl and lhs_data.id == rhs_data.id;
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => ctx.eqlIndex(
- lhs.cast(Payload.Aggregate).?.data.fwd_decl,
- rhs.cast(Payload.Aggregate).?.data.fwd_decl,
- ),
-
- .function,
- .varargs_function,
- => {
- const lhs_data = lhs.cast(Payload.Function).?.data;
- const rhs_data = rhs.cast(Payload.Function).?.data;
- if (lhs_data.param_types.len != rhs_data.param_types.len) return false;
- if (!ctx.eqlIndex(lhs_data.return_type, rhs_data.return_type)) return false;
- for (lhs_data.param_types, rhs_data.param_types) |lhs_param_idx, rhs_param_idx| {
- if (!ctx.eqlIndex(lhs_param_idx, rhs_param_idx)) return false;
- }
- return true;
- },
- };
- }
-
- pub fn hash(self: CType, store: Store.Set) u64 {
- var hasher = std.hash.Wyhash.init(0);
- self.updateHasher(&hasher, store);
- return hasher.final();
- }
-
- pub fn updateHasher(self: CType, hasher: anytype, store: Store.Set) void {
- const t = self.tag();
- autoHash(hasher, t);
- switch (t) {
- .void,
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- ._Bool,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => {},
-
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => store.indexToCType(self.cast(Payload.Child).?.data).updateHasher(hasher, store),
-
- .array,
- .vector,
- => {
- const data = self.cast(Payload.Sequence).?.data;
- autoHash(hasher, data.len);
- store.indexToCType(data.elem_type).updateHasher(hasher, store);
- },
-
- .fwd_anon_struct,
- .fwd_anon_union,
- => for (self.cast(Payload.Fields).?.data) |field| {
- store.indexToCType(field.type).updateHasher(hasher, store);
- hasher.update(mem.span(field.name));
- autoHash(hasher, field.alignas.@"align");
- },
-
- .fwd_struct,
- .fwd_union,
- => autoHash(hasher, self.cast(Payload.FwdDecl).?.data),
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => {
- const data = self.cast(Payload.Unnamed).?.data;
- autoHash(hasher, data.owner_decl);
- autoHash(hasher, data.id);
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => store.indexToCType(self.cast(Payload.Aggregate).?.data.fwd_decl)
- .updateHasher(hasher, store),
-
- .function,
- .varargs_function,
- => {
- const data = self.cast(Payload.Function).?.data;
- store.indexToCType(data.return_type).updateHasher(hasher, store);
- for (data.param_types) |param_ty| {
- store.indexToCType(param_ty).updateHasher(hasher, store);
- }
- },
- }
- }
-
- pub const Kind = enum { forward, forward_parameter, complete, global, parameter, payload };
-
- const Convert = struct {
- storage: union {
- none: void,
- child: Payload.Child,
- seq: Payload.Sequence,
- fwd: Payload.FwdDecl,
- anon: struct {
- fields: [2]Payload.Fields.Field,
- pl: union {
- forward: Payload.Fields,
- complete: Payload.Aggregate,
- },
- },
- },
- value: union(enum) {
- tag: Tag,
- cty: CType,
- },
-
- pub fn init(self: *@This(), t: Tag) void {
- self.* = if (t.hasPayload()) .{
- .storage = .{ .none = {} },
- .value = .{ .tag = t },
- } else .{
- .storage = .{ .none = {} },
- .value = .{ .cty = initTag(t) },
- };
- }
-
- pub fn tag(self: @This()) Tag {
- return switch (self.value) {
- .tag => |t| t,
- .cty => |c| c.tag(),
- };
- }
-
- fn tagFromIntInfo(int_info: std.builtin.Type.Int) Tag {
- return switch (int_info.bits) {
- 0 => .void,
- 1...8 => switch (int_info.signedness) {
- .unsigned => .uint8_t,
- .signed => .int8_t,
- },
- 9...16 => switch (int_info.signedness) {
- .unsigned => .uint16_t,
- .signed => .int16_t,
- },
- 17...32 => switch (int_info.signedness) {
- .unsigned => .uint32_t,
- .signed => .int32_t,
- },
- 33...64 => switch (int_info.signedness) {
- .unsigned => .uint64_t,
- .signed => .int64_t,
- },
- 65...128 => switch (int_info.signedness) {
- .unsigned => .zig_u128,
- .signed => .zig_i128,
- },
- else => .array,
- };
- }
-
- pub const Lookup = union(enum) {
- fail: *Module,
- imm: struct {
- set: *const Store.Set,
- mod: *Module,
- },
- mut: struct {
- promoted: *Store.Promoted,
- mod: *Module,
- },
-
- pub fn isMutable(self: @This()) bool {
- return switch (self) {
- .fail, .imm => false,
- .mut => true,
- };
- }
-
- pub fn getTarget(self: @This()) Target {
- return self.getModule().getTarget();
- }
-
- pub fn getModule(self: @This()) *Module {
- return switch (self) {
- .fail => |mod| mod,
- .imm => |imm| imm.mod,
- .mut => |mut| mut.mod,
- };
- }
-
- pub fn getSet(self: @This()) ?*const Store.Set {
- return switch (self) {
- .fail => null,
- .imm => |imm| imm.set,
- .mut => |mut| &mut.promoted.set,
- };
- }
-
- pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index {
- return switch (self) {
- .fail => null,
- .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind),
- .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind),
- };
- }
-
- pub fn indexToCType(self: @This(), index: Index) ?CType {
- return if (self.getSet()) |set| set.indexToCType(index) else null;
- }
-
- pub fn freeze(self: @This()) @This() {
- return switch (self) {
- .fail, .imm => self,
- .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } },
- };
- }
- };
-
- fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field {
- const Field = Payload.Fields.Field;
- const slice = self.storage.anon.fields[0..fields_len];
- mem.sort(Field, slice, {}, struct {
- fn before(_: void, lhs: Field, rhs: Field) bool {
- return lhs.alignas.order(rhs.alignas).compare(.gt);
- }
- }.before);
- return slice;
- }
-
- fn initAnon(self: *@This(), kind: Kind, fwd_idx: Index, fields_len: usize) void {
- switch (kind) {
- .forward, .forward_parameter => {
- self.storage.anon.pl = .{ .forward = .{
- .base = .{ .tag = .fwd_anon_struct },
- .data = self.sortFields(fields_len),
- } };
- self.value = .{ .cty = initPayload(&self.storage.anon.pl.forward) };
- },
- .complete, .parameter, .global => {
- self.storage.anon.pl = .{ .complete = .{
- .base = .{ .tag = .anon_struct },
- .data = .{
- .fields = self.sortFields(fields_len),
- .fwd_decl = fwd_idx,
- },
- } };
- self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) };
- },
- .payload => unreachable,
- }
- }
-
- fn initArrayParameter(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
- if (switch (kind) {
- .forward_parameter => @as(Index, undefined),
- .parameter => try lookup.typeToIndex(ty, .forward_parameter),
- .forward, .complete, .global, .payload => unreachable,
- }) |fwd_idx| {
- if (try lookup.typeToIndex(ty, switch (kind) {
- .forward_parameter => .forward,
- .parameter => .complete,
- .forward, .complete, .global, .payload => unreachable,
- })) |array_idx| {
- self.storage = .{ .anon = undefined };
- self.storage.anon.fields[0] = .{
- .name = "array",
- .type = array_idx,
- .alignas = AlignAs.abiAlign(ty, lookup.getModule()),
- };
- self.initAnon(kind, fwd_idx, 1);
- } else self.init(switch (kind) {
- .forward_parameter => .fwd_anon_struct,
- .parameter => .anon_struct,
- .forward, .complete, .global, .payload => unreachable,
- });
- } else self.init(.anon_struct);
- }
-
- pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
- const mod = lookup.getModule();
- const ip = &mod.intern_pool;
-
- self.* = undefined;
- if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
- self.init(.void)
- else if (ty.isAbiInt(mod)) switch (ty.ip_index) {
- .usize_type => self.init(.uintptr_t),
- .isize_type => self.init(.intptr_t),
- .c_char_type => self.init(.char),
- .c_short_type => self.init(.short),
- .c_ushort_type => self.init(.@"unsigned short"),
- .c_int_type => self.init(.int),
- .c_uint_type => self.init(.@"unsigned int"),
- .c_long_type => self.init(.long),
- .c_ulong_type => self.init(.@"unsigned long"),
- .c_longlong_type => self.init(.@"long long"),
- .c_ulonglong_type => self.init(.@"unsigned long long"),
- else => switch (tagFromIntInfo(ty.intInfo(mod))) {
- .void => unreachable,
- else => |t| self.init(t),
- .array => switch (kind) {
- .forward, .complete, .global => {
- const abi_size = ty.abiSize(mod);
- const abi_align = ty.abiAlignment(mod).toByteUnits(0);
- self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
- .len = @divExact(abi_size, abi_align),
- .elem_type = tagFromIntInfo(.{
- .signedness = .unsigned,
- .bits = @intCast(abi_align * 8),
- }).toIndex(),
- } } };
- self.value = .{ .cty = initPayload(&self.storage.seq) };
- },
- .forward_parameter,
- .parameter,
- => try self.initArrayParameter(ty, kind, lookup),
- .payload => unreachable,
- },
- },
- } else switch (ty.zigTypeTag(mod)) {
- .Frame => unreachable,
- .AnyFrame => unreachable,
-
- .Int,
- .Enum,
- .ErrorSet,
- .Type,
- .Void,
- .NoReturn,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .EnumLiteral,
- => unreachable,
-
- .Bool => self.init(.bool),
-
- .Float => self.init(switch (ty.ip_index) {
- .f16_type => .zig_f16,
- .f32_type => .zig_f32,
- .f64_type => .zig_f64,
- .f80_type => .zig_f80,
- .f128_type => .zig_f128,
- .c_longdouble_type => .zig_c_longdouble,
- else => unreachable,
- }),
-
- .Pointer => {
- const info = ty.ptrInfo(mod);
- switch (info.flags.size) {
- .Slice => {
- if (switch (kind) {
- .forward, .forward_parameter => @as(Index, undefined),
- .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
- .payload => unreachable,
- }) |fwd_idx| {
- const ptr_ty = ty.slicePtrFieldType(mod);
- if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| {
- self.storage = .{ .anon = undefined };
- self.storage.anon.fields[0] = .{
- .name = "ptr",
- .type = ptr_idx,
- .alignas = AlignAs.abiAlign(ptr_ty, mod),
- };
- self.storage.anon.fields[1] = .{
- .name = "len",
- .type = Tag.uintptr_t.toIndex(),
- .alignas = AlignAs.abiAlign(Type.usize, mod),
- };
- self.initAnon(kind, fwd_idx, 2);
- } else self.init(switch (kind) {
- .forward, .forward_parameter => .fwd_anon_struct,
- .complete, .parameter, .global => .anon_struct,
- .payload => unreachable,
- });
- } else self.init(.anon_struct);
- },
-
- .One, .Many, .C => {
- const t: Tag = switch (info.flags.is_volatile) {
- false => switch (info.flags.is_const) {
- false => .pointer,
- true => .pointer_const,
- },
- true => switch (info.flags.is_const) {
- false => .pointer_volatile,
- true => .pointer_const_volatile,
- },
- };
-
- const pointee_ty = if (info.packed_offset.host_size > 0 and
- info.flags.vector_index == .none)
- try mod.intType(.unsigned, info.packed_offset.host_size * 8)
- else
- Type.fromInterned(info.child);
-
- if (try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| {
- self.storage = .{ .child = .{
- .base = .{ .tag = t },
- .data = child_idx,
- } };
- self.value = .{ .cty = initPayload(&self.storage.child) };
- } else self.init(t);
- },
- }
- },
-
- .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .@"packed") {
- if (mod.typeToPackedStruct(ty)) |packed_struct| {
- try self.initType(Type.fromInterned(packed_struct.backingIntType(ip).*), kind, lookup);
- } else {
- const bits: u16 = @intCast(ty.bitSize(mod));
- const int_ty = try mod.intType(.unsigned, bits);
- try self.initType(int_ty, kind, lookup);
- }
- } else if (ty.isTupleOrAnonStruct(mod)) {
- if (lookup.isMutable()) {
- for (0..switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_types.len,
- else => unreachable,
- }) |field_i| {
- const field_ty = ty.structFieldType(field_i, mod);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
- !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- _ = try lookup.typeToIndex(field_ty, switch (kind) {
- .forward, .forward_parameter => .forward,
- .complete, .parameter => .complete,
- .global => .global,
- .payload => unreachable,
- });
- }
- switch (kind) {
- .forward, .forward_parameter => {},
- .complete, .parameter, .global => _ = try lookup.typeToIndex(ty, .forward),
- .payload => unreachable,
- }
- }
- self.init(switch (kind) {
- .forward, .forward_parameter => switch (zig_ty_tag) {
- .Struct => .fwd_anon_struct,
- .Union => .fwd_anon_union,
- else => unreachable,
- },
- .complete, .parameter, .global => switch (zig_ty_tag) {
- .Struct => .anon_struct,
- .Union => .anon_union,
- else => unreachable,
- },
- .payload => unreachable,
- });
- } else {
- const tag_ty = ty.unionTagTypeSafety(mod);
- const is_tagged_union_wrapper = kind != .payload and tag_ty != null;
- const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper;
- switch (kind) {
- .forward, .forward_parameter => {
- self.storage = .{ .fwd = .{
- .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union },
- .data = ty.getOwnerDecl(mod),
- } };
- self.value = .{ .cty = initPayload(&self.storage.fwd) };
- },
- .complete, .parameter, .global, .payload => if (is_tagged_union_wrapper) {
- const fwd_idx = try lookup.typeToIndex(ty, .forward);
- const payload_idx = try lookup.typeToIndex(ty, .payload);
- const tag_idx = try lookup.typeToIndex(tag_ty.?, kind);
- if (fwd_idx != null and payload_idx != null and tag_idx != null) {
- self.storage = .{ .anon = undefined };
- var field_count: usize = 0;
- if (payload_idx != Tag.void.toIndex()) {
- self.storage.anon.fields[field_count] = .{
- .name = "payload",
- .type = payload_idx.?,
- .alignas = AlignAs.unionPayloadAlign(ty, mod),
- };
- field_count += 1;
- }
- if (tag_idx != Tag.void.toIndex()) {
- self.storage.anon.fields[field_count] = .{
- .name = "tag",
- .type = tag_idx.?,
- .alignas = AlignAs.abiAlign(tag_ty.?, mod),
- };
- field_count += 1;
- }
- self.storage.anon.pl = .{ .complete = .{
- .base = .{ .tag = .@"struct" },
- .data = .{
- .fields = self.sortFields(field_count),
- .fwd_decl = fwd_idx.?,
- },
- } };
- self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) };
- } else self.init(.@"struct");
- } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) {
- self.init(.void);
- } else {
- var is_packed = false;
- for (0..switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_types.len,
- else => unreachable,
- }) |field_i| {
- const field_ty = ty.structFieldType(field_i, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- const field_align = AlignAs.fieldAlign(ty, field_i, mod);
- if (field_align.abiOrder().compare(.lt)) {
- is_packed = true;
- if (!lookup.isMutable()) break;
- }
-
- if (lookup.isMutable()) {
- _ = try lookup.typeToIndex(field_ty, switch (kind) {
- .forward, .forward_parameter => unreachable,
- .complete, .parameter, .payload => .complete,
- .global => .global,
- });
- }
- }
- switch (kind) {
- .forward, .forward_parameter => unreachable,
- .complete, .parameter, .global => {
- _ = try lookup.typeToIndex(ty, .forward);
- self.init(if (is_struct)
- if (is_packed) .packed_struct else .@"struct"
- else if (is_packed) .packed_union else .@"union");
- },
- .payload => self.init(if (is_packed)
- .packed_unnamed_union
- else
- .unnamed_union),
- }
- },
- }
- },
-
- .Array, .Vector => |zig_ty_tag| {
- switch (kind) {
- .forward, .complete, .global => {
- const t: Tag = switch (zig_ty_tag) {
- .Array => .array,
- .Vector => .vector,
- else => unreachable,
- };
- if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| {
- self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{
- .len = ty.arrayLenIncludingSentinel(mod),
- .elem_type = child_idx,
- } } };
- self.value = .{ .cty = initPayload(&self.storage.seq) };
- } else self.init(t);
- },
- .forward_parameter, .parameter => try self.initArrayParameter(ty, kind, lookup),
- .payload => unreachable,
- }
- },
-
- .Optional => {
- const payload_ty = ty.optionalChild(mod);
- if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- if (ty.optionalReprIsPayload(mod)) {
- try self.initType(payload_ty, kind, lookup);
- } else if (switch (kind) {
- .forward, .forward_parameter => @as(Index, undefined),
- .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
- .payload => unreachable,
- }) |fwd_idx| {
- if (try lookup.typeToIndex(payload_ty, switch (kind) {
- .forward, .forward_parameter => .forward,
- .complete, .parameter => .complete,
- .global => .global,
- .payload => unreachable,
- })) |payload_idx| {
- self.storage = .{ .anon = undefined };
- self.storage.anon.fields[0] = .{
- .name = "payload",
- .type = payload_idx,
- .alignas = AlignAs.abiAlign(payload_ty, mod),
- };
- self.storage.anon.fields[1] = .{
- .name = "is_null",
- .type = Tag.bool.toIndex(),
- .alignas = AlignAs.abiAlign(Type.bool, mod),
- };
- self.initAnon(kind, fwd_idx, 2);
- } else self.init(switch (kind) {
- .forward, .forward_parameter => .fwd_anon_struct,
- .complete, .parameter, .global => .anon_struct,
- .payload => unreachable,
- });
- } else self.init(.anon_struct);
- } else self.init(.bool);
- },
-
- .ErrorUnion => {
- if (switch (kind) {
- .forward, .forward_parameter => @as(Index, undefined),
- .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
- .payload => unreachable,
- }) |fwd_idx| {
- const payload_ty = ty.errorUnionPayload(mod);
- if (try lookup.typeToIndex(payload_ty, switch (kind) {
- .forward, .forward_parameter => .forward,
- .complete, .parameter => .complete,
- .global => .global,
- .payload => unreachable,
- })) |payload_idx| {
- const error_ty = ty.errorUnionSet(mod);
- if (payload_idx == Tag.void.toIndex()) {
- try self.initType(error_ty, kind, lookup);
- } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| {
- self.storage = .{ .anon = undefined };
- self.storage.anon.fields[0] = .{
- .name = "payload",
- .type = payload_idx,
- .alignas = AlignAs.abiAlign(payload_ty, mod),
- };
- self.storage.anon.fields[1] = .{
- .name = "error",
- .type = error_idx,
- .alignas = AlignAs.abiAlign(error_ty, mod),
- };
- self.initAnon(kind, fwd_idx, 2);
- } else self.init(switch (kind) {
- .forward, .forward_parameter => .fwd_anon_struct,
- .complete, .parameter, .global => .anon_struct,
- .payload => unreachable,
- });
- } else self.init(switch (kind) {
- .forward, .forward_parameter => .fwd_anon_struct,
- .complete, .parameter, .global => .anon_struct,
- .payload => unreachable,
- });
- } else self.init(.anon_struct);
- },
-
- .Opaque => self.init(.void),
-
- .Fn => {
- const info = mod.typeToFunc(ty).?;
- if (!info.is_generic) {
- if (lookup.isMutable()) {
- const param_kind: Kind = switch (kind) {
- .forward, .forward_parameter => .forward_parameter,
- .complete, .parameter, .global => .parameter,
- .payload => unreachable,
- };
- _ = try lookup.typeToIndex(Type.fromInterned(info.return_type), param_kind);
- for (info.param_types.get(ip)) |param_type| {
- if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
- _ = try lookup.typeToIndex(Type.fromInterned(param_type), param_kind);
- }
- }
- self.init(if (info.is_var_args) .varargs_function else .function);
- } else self.init(.void);
- },
- }
- }
- };
-
- pub fn copy(self: CType, arena: Allocator) !CType {
- return self.copyContext(struct {
- arena: Allocator,
- pub fn copyIndex(_: @This(), idx: Index) Index {
- return idx;
- }
- }{ .arena = arena });
- }
-
- fn copyFields(ctx: anytype, old_fields: Payload.Fields.Data) !Payload.Fields.Data {
- const new_fields = try ctx.arena.alloc(Payload.Fields.Field, old_fields.len);
- for (new_fields, old_fields) |*new_field, old_field| {
- new_field.name = try ctx.arena.dupeZ(u8, mem.span(old_field.name));
- new_field.type = ctx.copyIndex(old_field.type);
- new_field.alignas = old_field.alignas;
- }
- return new_fields;
- }
-
- fn copyParams(ctx: anytype, old_param_types: []const Index) ![]const Index {
- const new_param_types = try ctx.arena.alloc(Index, old_param_types.len);
- for (new_param_types, old_param_types) |*new_param_type, old_param_type|
- new_param_type.* = ctx.copyIndex(old_param_type);
- return new_param_types;
- }
-
- pub fn copyContext(self: CType, ctx: anytype) !CType {
- switch (self.tag()) {
- .void,
- .char,
- .@"signed char",
- .short,
- .int,
- .long,
- .@"long long",
- ._Bool,
- .@"unsigned char",
- .@"unsigned short",
- .@"unsigned int",
- .@"unsigned long",
- .@"unsigned long long",
- .float,
- .double,
- .@"long double",
- .bool,
- .size_t,
- .ptrdiff_t,
- .uint8_t,
- .int8_t,
- .uint16_t,
- .int16_t,
- .uint32_t,
- .int32_t,
- .uint64_t,
- .int64_t,
- .uintptr_t,
- .intptr_t,
- .zig_u128,
- .zig_i128,
- .zig_f16,
- .zig_f32,
- .zig_f64,
- .zig_f80,
- .zig_f128,
- .zig_c_longdouble,
- => return self,
-
- .pointer,
- .pointer_const,
- .pointer_volatile,
- .pointer_const_volatile,
- => {
- const pl = self.cast(Payload.Child).?;
- const new_pl = try ctx.arena.create(Payload.Child);
- new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = ctx.copyIndex(pl.data) };
- return initPayload(new_pl);
- },
-
- .array,
- .vector,
- => {
- const pl = self.cast(Payload.Sequence).?;
- const new_pl = try ctx.arena.create(Payload.Sequence);
- new_pl.* = .{
- .base = .{ .tag = pl.base.tag },
- .data = .{ .len = pl.data.len, .elem_type = ctx.copyIndex(pl.data.elem_type) },
- };
- return initPayload(new_pl);
- },
-
- .fwd_anon_struct,
- .fwd_anon_union,
- => {
- const pl = self.cast(Payload.Fields).?;
- const new_pl = try ctx.arena.create(Payload.Fields);
- new_pl.* = .{
- .base = .{ .tag = pl.base.tag },
- .data = try copyFields(ctx, pl.data),
- };
- return initPayload(new_pl);
- },
-
- .fwd_struct,
- .fwd_union,
- => {
- const pl = self.cast(Payload.FwdDecl).?;
- const new_pl = try ctx.arena.create(Payload.FwdDecl);
- new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = pl.data };
- return initPayload(new_pl);
- },
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => {
- const pl = self.cast(Payload.Unnamed).?;
- const new_pl = try ctx.arena.create(Payload.Unnamed);
- new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{
- .fields = try copyFields(ctx, pl.data.fields),
- .owner_decl = pl.data.owner_decl,
- .id = pl.data.id,
- } };
- return initPayload(new_pl);
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => {
- const pl = self.cast(Payload.Aggregate).?;
- const new_pl = try ctx.arena.create(Payload.Aggregate);
- new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{
- .fields = try copyFields(ctx, pl.data.fields),
- .fwd_decl = ctx.copyIndex(pl.data.fwd_decl),
- } };
- return initPayload(new_pl);
- },
-
- .function,
- .varargs_function,
- => {
- const pl = self.cast(Payload.Function).?;
- const new_pl = try ctx.arena.create(Payload.Function);
- new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{
- .return_type = ctx.copyIndex(pl.data.return_type),
- .param_types = try copyParams(ctx, pl.data.param_types),
- } };
- return initPayload(new_pl);
- },
- }
- }
-
- fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType {
- var convert: Convert = undefined;
- try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } });
- return createFromConvert(store, ty, mod, kind, &convert);
- }
-
- fn createFromConvert(
- store: *Store.Promoted,
- ty: Type,
- mod: *Module,
- kind: Kind,
- convert: Convert,
- ) !CType {
- const ip = &mod.intern_pool;
- const arena = store.arena.allocator();
- switch (convert.value) {
- .cty => |c| return c.copy(arena),
- .tag => |t| switch (t) {
- .fwd_anon_struct,
- .fwd_anon_union,
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => {
- const zig_ty_tag = ty.zigTypeTag(mod);
- const fields_len = switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_types.len,
- else => unreachable,
- };
-
- var c_fields_len: usize = 0;
- for (0..fields_len) |field_i| {
- const field_ty = ty.structFieldType(field_i, mod);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
- !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- c_fields_len += 1;
- }
-
- const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
- var c_field_i: usize = 0;
- for (0..fields_len) |field_i_usize| {
- const field_i: u32 = @intCast(field_i_usize);
- const field_ty = ty.structFieldType(field_i, mod);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
- !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- defer c_field_i += 1;
- fields_pl[c_field_i] = .{
- .name = try if (ty.isSimpleTuple(mod))
- std.fmt.allocPrintZ(arena, "f{}", .{field_i})
- else
- arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) {
- .Struct => ty.legacyStructFieldName(field_i, mod),
- .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
- else => unreachable,
- })),
- .type = store.set.typeToIndex(field_ty, mod, switch (kind) {
- .forward, .forward_parameter => .forward,
- .complete, .parameter, .payload => .complete,
- .global => .global,
- }).?,
- .alignas = AlignAs.fieldAlign(ty, field_i, mod),
- };
- }
-
- switch (t) {
- .fwd_anon_struct,
- .fwd_anon_union,
- => {
- const anon_pl = try arena.create(Payload.Fields);
- anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl };
- return initPayload(anon_pl);
- },
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => {
- const unnamed_pl = try arena.create(Payload.Unnamed);
- unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .fields = fields_pl,
- .owner_decl = ty.getOwnerDecl(mod),
- .id = if (ty.unionTagTypeSafety(mod)) |_| 0 else unreachable,
- } };
- return initPayload(unnamed_pl);
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => {
- const struct_pl = try arena.create(Payload.Aggregate);
- struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .fields = fields_pl,
- .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?,
- } };
- return initPayload(struct_pl);
- },
-
- else => unreachable,
- }
- },
-
- .function,
- .varargs_function,
- => {
- const info = mod.typeToFunc(ty).?;
- assert(!info.is_generic);
- const param_kind: Kind = switch (kind) {
- .forward, .forward_parameter => .forward_parameter,
- .complete, .parameter, .global => .parameter,
- .payload => unreachable,
- };
-
- var c_params_len: usize = 0;
- for (info.param_types.get(ip)) |param_type| {
- if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
- c_params_len += 1;
- }
-
- const params_pl = try arena.alloc(Index, c_params_len);
- var c_param_i: usize = 0;
- for (info.param_types.get(ip)) |param_type| {
- if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
- params_pl[c_param_i] = store.set.typeToIndex(Type.fromInterned(param_type), mod, param_kind).?;
- c_param_i += 1;
- }
-
- const fn_pl = try arena.create(Payload.Function);
- fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .return_type = store.set.typeToIndex(Type.fromInterned(info.return_type), mod, param_kind).?,
- .param_types = params_pl,
- } };
- return initPayload(fn_pl);
- },
-
- else => unreachable,
- },
- }
- }
-
- pub const TypeAdapter64 = struct {
- kind: Kind,
- lookup: Convert.Lookup,
- convert: *const Convert,
-
- fn eqlRecurse(self: @This(), ty: Type, cty: Index, kind: Kind) bool {
- assert(!self.lookup.isMutable());
-
- var convert: Convert = undefined;
- convert.initType(ty, kind, self.lookup) catch unreachable;
-
- const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert };
- return self_recurse.eql(ty, self.lookup.indexToCType(cty).?);
- }
-
- pub fn eql(self: @This(), ty: Type, cty: CType) bool {
- const mod = self.lookup.getModule();
- const ip = &mod.intern_pool;
- switch (self.convert.value) {
- .cty => |c| return c.eql(cty),
- .tag => |t| {
- if (t != cty.tag()) return false;
-
- switch (t) {
- .fwd_anon_struct,
- .fwd_anon_union,
- => {
- if (!ty.isTupleOrAnonStruct(mod)) return false;
-
- var name_buf: [
- std.fmt.count("f{}", .{std.math.maxInt(usize)})
- ]u8 = undefined;
- const c_fields = cty.cast(Payload.Fields).?.data;
-
- const zig_ty_tag = ty.zigTypeTag(mod);
- var c_field_i: usize = 0;
- for (0..switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_types.len,
- else => unreachable,
- }) |field_i_usize| {
- const field_i: u32 = @intCast(field_i_usize);
- const field_ty = ty.structFieldType(field_i, mod);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
- !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- defer c_field_i += 1;
- const c_field = &c_fields[c_field_i];
-
- if (!self.eqlRecurse(field_ty, c_field.type, switch (self.kind) {
- .forward, .forward_parameter => .forward,
- .complete, .parameter => .complete,
- .global => .global,
- .payload => unreachable,
- }) or !mem.eql(
- u8,
- if (ty.isSimpleTuple(mod))
- std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable
- else
- ip.stringToSlice(switch (zig_ty_tag) {
- .Struct => ty.legacyStructFieldName(field_i, mod),
- .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
- else => unreachable,
- }),
- mem.span(c_field.name),
- ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" !=
- c_field.alignas.@"align") return false;
- }
- return true;
- },
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => switch (self.kind) {
- .forward, .forward_parameter, .complete, .parameter, .global => unreachable,
- .payload => if (ty.unionTagTypeSafety(mod)) |_| {
- const data = cty.cast(Payload.Unnamed).?.data;
- return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0;
- } else unreachable,
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => return self.eqlRecurse(
- ty,
- cty.cast(Payload.Aggregate).?.data.fwd_decl,
- .forward,
- ),
-
- .function,
- .varargs_function,
- => {
- if (ty.zigTypeTag(mod) != .Fn) return false;
-
- const info = mod.typeToFunc(ty).?;
- assert(!info.is_generic);
- const data = cty.cast(Payload.Function).?.data;
- const param_kind: Kind = switch (self.kind) {
- .forward, .forward_parameter => .forward_parameter,
- .complete, .parameter, .global => .parameter,
- .payload => unreachable,
- };
-
- if (!self.eqlRecurse(Type.fromInterned(info.return_type), data.return_type, param_kind))
- return false;
-
- var c_param_i: usize = 0;
- for (info.param_types.get(ip)) |param_type| {
- if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- if (c_param_i >= data.param_types.len) return false;
- const param_cty = data.param_types[c_param_i];
- c_param_i += 1;
-
- if (!self.eqlRecurse(Type.fromInterned(param_type), param_cty, param_kind))
- return false;
- }
- return c_param_i == data.param_types.len;
- },
-
- else => unreachable,
- }
- },
- }
- }
-
- pub fn hash(self: @This(), ty: Type) u64 {
- var hasher = std.hash.Wyhash.init(0);
- self.updateHasher(&hasher, ty);
- return hasher.final();
- }
-
- fn updateHasherRecurse(self: @This(), hasher: anytype, ty: Type, kind: Kind) void {
- assert(!self.lookup.isMutable());
-
- var convert: Convert = undefined;
- convert.initType(ty, kind, self.lookup) catch unreachable;
-
- const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert };
- self_recurse.updateHasher(hasher, ty);
- }
-
- pub fn updateHasher(self: @This(), hasher: anytype, ty: Type) void {
- switch (self.convert.value) {
- .cty => |c| return c.updateHasher(hasher, self.lookup.getSet().?.*),
- .tag => |t| {
- autoHash(hasher, t);
-
- const mod = self.lookup.getModule();
- const ip = &mod.intern_pool;
- switch (t) {
- .fwd_anon_struct,
- .fwd_anon_union,
- => {
- var name_buf: [
- std.fmt.count("f{}", .{std.math.maxInt(usize)})
- ]u8 = undefined;
-
- const zig_ty_tag = ty.zigTypeTag(mod);
- for (0..switch (ty.zigTypeTag(mod)) {
- .Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_types.len,
- else => unreachable,
- }) |field_i_usize| {
- const field_i: u32 = @intCast(field_i_usize);
- const field_ty = ty.structFieldType(field_i, mod);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
- !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
- .forward, .forward_parameter => .forward,
- .complete, .parameter => .complete,
- .global => .global,
- .payload => unreachable,
- });
- hasher.update(if (ty.isSimpleTuple(mod))
- std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
- else
- mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
- .Struct => ty.legacyStructFieldName(field_i, mod),
- .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
- else => unreachable,
- }));
- autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
- }
- },
-
- .unnamed_struct,
- .unnamed_union,
- .packed_unnamed_struct,
- .packed_unnamed_union,
- => switch (self.kind) {
- .forward, .forward_parameter, .complete, .parameter, .global => unreachable,
- .payload => if (ty.unionTagTypeSafety(mod)) |_| {
- autoHash(hasher, ty.getOwnerDecl(mod));
- autoHash(hasher, @as(u32, 0));
- } else unreachable,
- },
-
- .anon_struct,
- .anon_union,
- .@"struct",
- .@"union",
- .packed_struct,
- .packed_union,
- => self.updateHasherRecurse(hasher, ty, .forward),
-
- .function,
- .varargs_function,
- => {
- const info = mod.typeToFunc(ty).?;
- assert(!info.is_generic);
- const param_kind: Kind = switch (self.kind) {
- .forward, .forward_parameter => .forward_parameter,
- .complete, .parameter, .global => .parameter,
- .payload => unreachable,
- };
-
- self.updateHasherRecurse(hasher, Type.fromInterned(info.return_type), param_kind);
- for (info.param_types.get(ip)) |param_type| {
- if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
- self.updateHasherRecurse(hasher, Type.fromInterned(param_type), param_kind);
- }
- },
-
- else => unreachable,
- }
- },
- }
- }
- };
-
- pub const TypeAdapter32 = struct {
- kind: Kind,
- lookup: Convert.Lookup,
- convert: *const Convert,
-
- fn to64(self: @This()) TypeAdapter64 {
- return .{ .kind = self.kind, .lookup = self.lookup, .convert = self.convert };
- }
-
- pub fn eql(self: @This(), ty: Type, cty: CType, cty_index: usize) bool {
- _ = cty_index;
- return self.to64().eql(ty, cty);
- }
-
- pub fn hash(self: @This(), ty: Type) u32 {
- return @as(u32, @truncate(self.to64().hash(ty)));
- }
- };
-};
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 8ddacbe11ca1..7419e778a18a 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2033,7 +2033,7 @@ pub const Object = struct {
owner_decl.src_node + 1, // Line
try o.lowerDebugType(int_ty),
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(enumerators),
);
@@ -2120,7 +2120,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(ptr_ty),
ptr_size * 8,
- ptr_align.toByteUnits(0) * 8,
+ (ptr_align.toByteUnits() orelse 0) * 8,
0, // Offset
);
@@ -2131,7 +2131,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(len_ty),
len_size * 8,
- len_align.toByteUnits(0) * 8,
+ (len_align.toByteUnits() orelse 0) * 8,
len_offset * 8,
);
@@ -2142,7 +2142,7 @@ pub const Object = struct {
line,
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_ptr_type,
debug_len_type,
@@ -2170,7 +2170,7 @@ pub const Object = struct {
0, // Line
debug_elem_ty,
target.ptrBitWidth(),
- ty.ptrAlignment(mod).toByteUnits(0) * 8,
+ (ty.ptrAlignment(mod).toByteUnits() orelse 0) * 8,
0, // Offset
);
@@ -2217,7 +2217,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(ty.childType(mod)),
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2260,7 +2260,7 @@ pub const Object = struct {
0, // Line
debug_elem_type,
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2316,7 +2316,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(child_ty),
payload_size * 8,
- payload_align.toByteUnits(0) * 8,
+ (payload_align.toByteUnits() orelse 0) * 8,
0, // Offset
);
@@ -2327,7 +2327,7 @@ pub const Object = struct {
0,
try o.lowerDebugType(non_null_ty),
non_null_size * 8,
- non_null_align.toByteUnits(0) * 8,
+ (non_null_align.toByteUnits() orelse 0) * 8,
non_null_offset * 8,
);
@@ -2338,7 +2338,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_data_type,
debug_some_type,
@@ -2396,7 +2396,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(Type.anyerror),
error_size * 8,
- error_align.toByteUnits(0) * 8,
+ (error_align.toByteUnits() orelse 0) * 8,
error_offset * 8,
);
fields[payload_index] = try o.builder.debugMemberType(
@@ -2406,7 +2406,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(payload_ty),
payload_size * 8,
- payload_align.toByteUnits(0) * 8,
+ (payload_align.toByteUnits() orelse 0) * 8,
payload_offset * 8,
);
@@ -2417,7 +2417,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&fields),
);
@@ -2485,7 +2485,7 @@ pub const Object = struct {
0,
try o.lowerDebugType(Type.fromInterned(field_ty)),
field_size * 8,
- field_align.toByteUnits(0) * 8,
+ (field_align.toByteUnits() orelse 0) * 8,
field_offset * 8,
));
}
@@ -2497,7 +2497,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2566,7 +2566,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(field_ty),
field_size * 8,
- field_align.toByteUnits(0) * 8,
+ (field_align.toByteUnits() orelse 0) * 8,
field_offset * 8,
));
}
@@ -2578,7 +2578,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2621,7 +2621,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
@@ -2661,7 +2661,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(Type.fromInterned(field_ty)),
field_size * 8,
- field_align.toByteUnits(0) * 8,
+ (field_align.toByteUnits() orelse 0) * 8,
0, // Offset
));
}
@@ -2680,7 +2680,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2711,7 +2711,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty)),
layout.tag_size * 8,
- layout.tag_align.toByteUnits(0) * 8,
+ (layout.tag_align.toByteUnits() orelse 0) * 8,
tag_offset * 8,
);
@@ -2722,7 +2722,7 @@ pub const Object = struct {
0, // Line
debug_union_type,
layout.payload_size * 8,
- layout.payload_align.toByteUnits(0) * 8,
+ (layout.payload_align.toByteUnits() orelse 0) * 8,
payload_offset * 8,
);
@@ -2739,7 +2739,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
- ty.abiAlignment(mod).toByteUnits(0) * 8,
+ (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&full_fields),
);
@@ -4473,7 +4473,7 @@ pub const Object = struct {
// The value cannot be undefined, because we use the `nonnull` annotation
// for non-optional pointers. We also need to respect the alignment, even though
// the address will never be dereferenced.
- const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional() orelse
+ const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnits() orelse
// Note that these 0xaa values are appropriate even in release-optimized builds
// because we need a well-defined value that is not null, and LLVM does not
// have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR
diff --git a/src/crash_report.zig b/src/crash_report.zig
index f33bef78e75c..311647f23ff7 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -172,7 +172,7 @@ pub fn attachSegfaultHandler() void {
};
}
-fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn {
+fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) noreturn {
// TODO: use alarm() here to prevent infinite loops
PanicSwitch.preDispatch();
diff --git a/src/link.zig b/src/link.zig
index 9e1daa09af28..8b28a17b9dfa 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -188,15 +188,10 @@ pub const File = struct {
emit: Compilation.Emit,
options: OpenOptions,
) !*File {
- const tag = Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt);
- switch (tag) {
- .c => {
- const ptr = try C.open(arena, comp, emit, options);
- return &ptr.base;
- },
- inline else => |t| {
- if (build_options.only_c) unreachable;
- const ptr = try t.Type().open(arena, comp, emit, options);
+ switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt)) {
+ inline else => |tag| {
+ if (tag != .c and build_options.only_c) unreachable;
+ const ptr = try tag.Type().open(arena, comp, emit, options);
return &ptr.base;
},
}
@@ -208,25 +203,17 @@ pub const File = struct {
emit: Compilation.Emit,
options: OpenOptions,
) !*File {
- const tag = Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt);
- switch (tag) {
- .c => {
- const ptr = try C.createEmpty(arena, comp, emit, options);
- return &ptr.base;
- },
- inline else => |t| {
- if (build_options.only_c) unreachable;
- const ptr = try t.Type().createEmpty(arena, comp, emit, options);
+ switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt)) {
+ inline else => |tag| {
+ if (tag != .c and build_options.only_c) unreachable;
+ const ptr = try tag.Type().createEmpty(arena, comp, emit, options);
return &ptr.base;
},
}
}
pub fn cast(base: *File, comptime T: type) ?*T {
- if (base.tag != T.base_tag)
- return null;
-
- return @fieldParentPtr(T, "base", base);
+ return if (base.tag == T.base_tag) @fieldParentPtr("base", base) else null;
}
pub fn makeWritable(base: *File) !void {
@@ -383,7 +370,7 @@ pub const File = struct {
.c => unreachable,
.nvptx => unreachable,
inline else => |t| {
- return @fieldParentPtr(t.Type(), "base", base).lowerUnnamedConst(val, decl_index);
+ return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(val, decl_index);
},
}
}
@@ -402,7 +389,7 @@ pub const File = struct {
.c => unreachable,
.nvptx => unreachable,
inline else => |t| {
- return @fieldParentPtr(t.Type(), "base", base).getGlobalSymbol(name, lib_name);
+ return @as(*t.Type(), @fieldParentPtr("base", base)).getGlobalSymbol(name, lib_name);
},
}
}
@@ -412,12 +399,9 @@ pub const File = struct {
const decl = module.declPtr(decl_index);
assert(decl.has_tv);
switch (base.tag) {
- .c => {
- return @fieldParentPtr(C, "base", base).updateDecl(module, decl_index);
- },
inline else => |tag| {
- if (build_options.only_c) unreachable;
- return @fieldParentPtr(tag.Type(), "base", base).updateDecl(module, decl_index);
+ if (tag != .c and build_options.only_c) unreachable;
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(module, decl_index);
},
}
}
@@ -431,12 +415,9 @@ pub const File = struct {
liveness: Liveness,
) UpdateDeclError!void {
switch (base.tag) {
- .c => {
- return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness);
- },
inline else => |tag| {
- if (build_options.only_c) unreachable;
- return @fieldParentPtr(tag.Type(), "base", base).updateFunc(module, func_index, air, liveness);
+ if (tag != .c and build_options.only_c) unreachable;
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(module, func_index, air, liveness);
},
}
}
@@ -446,12 +427,9 @@ pub const File = struct {
assert(decl.has_tv);
switch (base.tag) {
.spirv, .nvptx => {},
- .c => {
- return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
- },
inline else => |tag| {
- if (build_options.only_c) unreachable;
- return @fieldParentPtr(tag.Type(), "base", base).updateDeclLineNumber(module, decl_index);
+ if (tag != .c and build_options.only_c) unreachable;
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(module, decl_index);
},
}
}
@@ -473,11 +451,9 @@ pub const File = struct {
base.releaseLock();
if (base.file) |f| f.close();
switch (base.tag) {
- .c => @fieldParentPtr(C, "base", base).deinit(),
-
inline else => |tag| {
- if (build_options.only_c) unreachable;
- @fieldParentPtr(tag.Type(), "base", base).deinit();
+ if (tag != .c and build_options.only_c) unreachable;
+ @as(*tag.Type(), @fieldParentPtr("base", base)).deinit();
},
}
}
@@ -560,7 +536,7 @@ pub const File = struct {
pub fn flush(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void {
if (build_options.only_c) {
assert(base.tag == .c);
- return @fieldParentPtr(C, "base", base).flush(arena, prog_node);
+ return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node);
}
const comp = base.comp;
if (comp.clang_preprocessor_mode == .yes or comp.clang_preprocessor_mode == .pch) {
@@ -587,7 +563,7 @@ pub const File = struct {
}
switch (base.tag) {
inline else => |tag| {
- return @fieldParentPtr(tag.Type(), "base", base).flush(arena, prog_node);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, prog_node);
},
}
}
@@ -596,12 +572,9 @@ pub const File = struct {
/// rather than final output mode.
pub fn flushModule(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void {
switch (base.tag) {
- .c => {
- return @fieldParentPtr(C, "base", base).flushModule(arena, prog_node);
- },
inline else => |tag| {
- if (build_options.only_c) unreachable;
- return @fieldParentPtr(tag.Type(), "base", base).flushModule(arena, prog_node);
+ if (tag != .c and build_options.only_c) unreachable;
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, prog_node);
},
}
}
@@ -609,12 +582,9 @@ pub const File = struct {
/// Called when a Decl is deleted from the Module.
pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void {
switch (base.tag) {
- .c => {
- @fieldParentPtr(C, "base", base).freeDecl(decl_index);
- },
inline else => |tag| {
- if (build_options.only_c) unreachable;
- @fieldParentPtr(tag.Type(), "base", base).freeDecl(decl_index);
+ if (tag != .c and build_options.only_c) unreachable;
+ @as(*tag.Type(), @fieldParentPtr("base", base)).freeDecl(decl_index);
},
}
}
@@ -635,12 +605,9 @@ pub const File = struct {
exports: []const *Module.Export,
) UpdateExportsError!void {
switch (base.tag) {
- .c => {
- return @fieldParentPtr(C, "base", base).updateExports(module, exported, exports);
- },
inline else => |tag| {
- if (build_options.only_c) unreachable;
- return @fieldParentPtr(tag.Type(), "base", base).updateExports(module, exported, exports);
+ if (tag != .c and build_options.only_c) unreachable;
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, exports);
},
}
}
@@ -664,7 +631,7 @@ pub const File = struct {
.spirv => unreachable,
.nvptx => unreachable,
inline else => |tag| {
- return @fieldParentPtr(tag.Type(), "base", base).getDeclVAddr(decl_index, reloc_info);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(decl_index, reloc_info);
},
}
}
@@ -683,7 +650,7 @@ pub const File = struct {
.spirv => unreachable,
.nvptx => unreachable,
inline else => |tag| {
- return @fieldParentPtr(tag.Type(), "base", base).lowerAnonDecl(decl_val, decl_align, src_loc);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(decl_val, decl_align, src_loc);
},
}
}
@@ -695,7 +662,7 @@ pub const File = struct {
.spirv => unreachable,
.nvptx => unreachable,
inline else => |tag| {
- return @fieldParentPtr(tag.Type(), "base", base).getAnonDeclVAddr(decl_val, reloc_info);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).getAnonDeclVAddr(decl_val, reloc_info);
},
}
}
@@ -714,7 +681,7 @@ pub const File = struct {
=> {},
inline else => |tag| {
- return @fieldParentPtr(tag.Type(), "base", base).deleteDeclExport(decl_index, name);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).deleteDeclExport(decl_index, name);
},
}
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 73049b40cd13..8bff6d9fce5d 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -6,7 +6,8 @@ const fs = std.fs;
const C = @This();
const build_options = @import("build_options");
-const Module = @import("../Module.zig");
+const Zcu = @import("../Module.zig");
+const Module = @import("../Package/Module.zig");
const InternPool = @import("../InternPool.zig");
const Alignment = InternPool.Alignment;
const Compilation = @import("../Compilation.zig");
@@ -68,13 +69,13 @@ pub const DeclBlock = struct {
fwd_decl: String = String.empty,
/// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate
/// over each `Decl` and generate the definition for each used `CType` once.
- ctypes: codegen.CType.Store = .{},
- /// Key and Value storage use the ctype arena.
+ ctype_pool: codegen.CType.Pool = codegen.CType.Pool.empty,
+ /// May contain string references to ctype_pool
lazy_fns: codegen.LazyFnMap = .{},
fn deinit(db: *DeclBlock, gpa: Allocator) void {
db.lazy_fns.deinit(gpa);
- db.ctypes.deinit(gpa);
+ db.ctype_pool.deinit(gpa);
db.* = undefined;
}
};
@@ -177,23 +178,24 @@ pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void {
pub fn updateFunc(
self: *C,
- module: *Module,
+ zcu: *Zcu,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
const gpa = self.base.comp.gpa;
- const func = module.funcInfo(func_index);
+ const func = zcu.funcInfo(func_index);
const decl_index = func.owner_decl;
- const decl = module.declPtr(decl_index);
+ const decl = zcu.declPtr(decl_index);
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
- const ctypes = &gop.value_ptr.ctypes;
+ const ctype_pool = &gop.value_ptr.ctype_pool;
const lazy_fns = &gop.value_ptr.lazy_fns;
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
- ctypes.clearRetainingCapacity(gpa);
+ try ctype_pool.init(gpa);
+ ctype_pool.clearRetainingCapacity();
lazy_fns.clearRetainingCapacity();
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
@@ -206,12 +208,14 @@ pub fn updateFunc(
.object = .{
.dg = .{
.gpa = gpa,
- .module = module,
+ .zcu = zcu,
+ .mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod,
.error_msg = null,
.pass = .{ .decl = decl_index },
- .is_naked_fn = decl.typeOf(module).fnCallingConvention(module) == .Naked,
+ .is_naked_fn = decl.typeOf(zcu).fnCallingConvention(zcu) == .Naked,
.fwd_decl = fwd_decl.toManaged(gpa),
- .ctypes = ctypes.*,
+ .ctype_pool = ctype_pool.*,
+ .scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@@ -220,36 +224,32 @@ pub fn updateFunc(
},
.lazy_fns = lazy_fns.*,
};
-
function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
defer {
self.anon_decls = function.object.dg.anon_decl_deps;
self.aligned_anon_decls = function.object.dg.aligned_anon_decls;
fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
+ ctype_pool.* = function.object.dg.ctype_pool.move();
+ ctype_pool.freeUnusedCapacity(gpa);
+ function.object.dg.scratch.deinit(gpa);
+ lazy_fns.* = function.lazy_fns.move();
+ lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
code.* = function.object.code.moveToUnmanaged();
function.deinit();
}
codegen.genFunc(&function) catch |err| switch (err) {
error.AnalysisFail => {
- try module.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?);
+ try zcu.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?);
return;
},
else => |e| return e,
};
-
- ctypes.* = function.object.dg.ctypes.move();
- lazy_fns.* = function.lazy_fns.move();
-
- // Free excess allocated memory for this Decl.
- ctypes.shrinkAndFree(gpa, ctypes.count());
- lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
-
- gop.value_ptr.code = try self.addString(function.object.code.items);
gop.value_ptr.fwd_decl = try self.addString(function.object.dg.fwd_decl.items);
+ gop.value_ptr.code = try self.addString(function.object.code.items);
}
-fn updateAnonDecl(self: *C, module: *Module, i: usize) !void {
+fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
const gpa = self.base.comp.gpa;
const anon_decl = self.anon_decls.keys()[i];
@@ -261,12 +261,14 @@ fn updateAnonDecl(self: *C, module: *Module, i: usize) !void {
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
- .module = module,
+ .zcu = zcu,
+ .mod = zcu.root_mod,
.error_msg = null,
.pass = .{ .anon = anon_decl },
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
- .ctypes = .{},
+ .ctype_pool = codegen.CType.Pool.empty,
+ .scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@@ -274,62 +276,64 @@ fn updateAnonDecl(self: *C, module: *Module, i: usize) !void {
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
-
defer {
self.anon_decls = object.dg.anon_decl_deps;
self.aligned_anon_decls = object.dg.aligned_anon_decls;
- object.dg.ctypes.deinit(object.dg.gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
+ object.dg.ctype_pool.deinit(object.dg.gpa);
+ object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
+ try object.dg.ctype_pool.init(gpa);
- const c_value: codegen.CValue = .{ .constant = anon_decl };
+ const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) };
const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none;
- codegen.genDeclValue(&object, Value.fromInterned(anon_decl), false, c_value, alignment, .none) catch |err| switch (err) {
+ codegen.genDeclValue(&object, c_value.constant, false, c_value, alignment, .none) catch |err| switch (err) {
error.AnalysisFail => {
@panic("TODO: C backend AnalysisFail on anonymous decl");
- //try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
+ //try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
//return;
},
else => |e| return e,
};
- // Free excess allocated memory for this Decl.
- object.dg.ctypes.shrinkAndFree(gpa, object.dg.ctypes.count());
-
+ object.dg.ctype_pool.freeUnusedCapacity(gpa);
object.dg.anon_decl_deps.values()[i] = .{
.code = try self.addString(object.code.items),
.fwd_decl = try self.addString(object.dg.fwd_decl.items),
- .ctypes = object.dg.ctypes.move(),
+ .ctype_pool = object.dg.ctype_pool.move(),
};
}
-pub fn updateDecl(self: *C, module: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.comp.gpa;
+ const decl = zcu.declPtr(decl_index);
const gop = try self.decl_table.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- const ctypes = &gop.value_ptr.ctypes;
+ errdefer _ = self.decl_table.pop();
+ if (!gop.found_existing) gop.value_ptr.* = .{};
+ const ctype_pool = &gop.value_ptr.ctype_pool;
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
- ctypes.clearRetainingCapacity(gpa);
+ try ctype_pool.init(gpa);
+ ctype_pool.clearRetainingCapacity();
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
- .module = module,
+ .zcu = zcu,
+ .mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod,
.error_msg = null,
.pass = .{ .decl = decl_index },
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
- .ctypes = ctypes.*,
+ .ctype_pool = ctype_pool.*,
+ .scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@@ -340,33 +344,29 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: InternPool.DeclIndex) !
defer {
self.anon_decls = object.dg.anon_decl_deps;
self.aligned_anon_decls = object.dg.aligned_anon_decls;
- object.dg.ctypes.deinit(object.dg.gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
+ ctype_pool.* = object.dg.ctype_pool.move();
+ ctype_pool.freeUnusedCapacity(gpa);
+ object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
codegen.genDecl(&object) catch |err| switch (err) {
error.AnalysisFail => {
- try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
+ try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
return;
},
else => |e| return e,
};
-
- ctypes.* = object.dg.ctypes.move();
-
- // Free excess allocated memory for this Decl.
- ctypes.shrinkAndFree(gpa, ctypes.count());
-
gop.value_ptr.code = try self.addString(object.code.items);
gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items);
}
-pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
_ = self;
- _ = module;
+ _ = zcu;
_ = decl_index;
}
@@ -399,22 +399,25 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
const comp = self.base.comp;
const gpa = comp.gpa;
- const module = self.base.comp.module.?;
+ const zcu = self.base.comp.module.?;
{
var i: usize = 0;
while (i < self.anon_decls.count()) : (i += 1) {
- try updateAnonDecl(self, module, i);
+ try updateAnonDecl(self, zcu, i);
}
}
// This code path happens exclusively with -ofmt=c. The flush logic for
// emit-h is in `flushEmitH` below.
- var f: Flush = .{};
+ var f: Flush = .{
+ .ctype_pool = codegen.CType.Pool.empty,
+ .lazy_ctype_pool = codegen.CType.Pool.empty,
+ };
defer f.deinit(gpa);
- const abi_defines = try self.abiDefines(module.getTarget());
+ const abi_defines = try self.abiDefines(zcu.getTarget());
defer abi_defines.deinit();
// Covers defines, zig.h, ctypes, asm, lazy fwd.
@@ -429,7 +432,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
{
var asm_buf = f.asm_buf.toManaged(gpa);
defer f.asm_buf = asm_buf.moveToUnmanaged();
- try codegen.genGlobalAsm(module, asm_buf.writer());
+ try codegen.genGlobalAsm(zcu, asm_buf.writer());
f.appendBufAssumeCapacity(asm_buf.items);
}
@@ -438,7 +441,8 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
self.lazy_fwd_decl_buf.clearRetainingCapacity();
self.lazy_code_buf.clearRetainingCapacity();
- try self.flushErrDecls(&f.lazy_ctypes);
+ try f.lazy_ctype_pool.init(gpa);
+ try self.flushErrDecls(zcu, &f.lazy_ctype_pool);
// Unlike other backends, the .c code we are emitting has order-dependent decls.
// `CType`s, forward decls, and non-functions first.
@@ -446,34 +450,35 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
{
var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer export_names.deinit(gpa);
- try export_names.ensureTotalCapacity(gpa, @intCast(module.decl_exports.entries.len));
- for (module.decl_exports.values()) |exports| for (exports.items) |@"export"|
+ try export_names.ensureTotalCapacity(gpa, @intCast(zcu.decl_exports.entries.len));
+ for (zcu.decl_exports.values()) |exports| for (exports.items) |@"export"|
try export_names.put(gpa, @"export".opts.name, {});
for (self.anon_decls.values()) |*decl_block| {
- try self.flushDeclBlock(&f, decl_block, export_names, .none);
+ try self.flushDeclBlock(zcu, zcu.root_mod, &f, decl_block, export_names, .none);
}
for (self.decl_table.keys(), self.decl_table.values()) |decl_index, *decl_block| {
- assert(module.declPtr(decl_index).has_tv);
- const decl = module.declPtr(decl_index);
- const extern_symbol_name = if (decl.isExtern(module)) decl.name.toOptional() else .none;
- try self.flushDeclBlock(&f, decl_block, export_names, extern_symbol_name);
+ const decl = zcu.declPtr(decl_index);
+ assert(decl.has_tv);
+ const extern_symbol_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none;
+ const mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod;
+ try self.flushDeclBlock(zcu, mod, &f, decl_block, export_names, extern_symbol_name);
}
}
{
// We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes.
// This ensures that every lazy CType.Index exactly matches the global CType.Index.
- assert(f.ctypes.count() == 0);
- try self.flushCTypes(&f, .flush, f.lazy_ctypes);
+ try f.ctype_pool.init(gpa);
+ try self.flushCTypes(zcu, &f, .flush, &f.lazy_ctype_pool);
for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| {
- try self.flushCTypes(&f, .{ .anon = anon_decl }, decl_block.ctypes);
+ try self.flushCTypes(zcu, &f, .{ .anon = anon_decl }, &decl_block.ctype_pool);
}
for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| {
- try self.flushCTypes(&f, .{ .decl = decl_index }, decl_block.ctypes);
+ try self.flushCTypes(zcu, &f, .{ .decl = decl_index }, &decl_block.ctype_pool);
}
}
@@ -504,11 +509,11 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
}
const Flush = struct {
- ctypes: codegen.CType.Store = .{},
- ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{},
+ ctype_pool: codegen.CType.Pool,
+ ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .{},
ctypes_buf: std.ArrayListUnmanaged(u8) = .{},
- lazy_ctypes: codegen.CType.Store = .{},
+ lazy_ctype_pool: codegen.CType.Pool,
lazy_fns: LazyFns = .{},
asm_buf: std.ArrayListUnmanaged(u8) = .{},
@@ -530,10 +535,11 @@ const Flush = struct {
f.all_buffers.deinit(gpa);
f.asm_buf.deinit(gpa);
f.lazy_fns.deinit(gpa);
- f.lazy_ctypes.deinit(gpa);
+ f.lazy_ctype_pool.deinit(gpa);
f.ctypes_buf.deinit(gpa);
- f.ctypes_map.deinit(gpa);
- f.ctypes.deinit(gpa);
+ assert(f.ctype_global_from_decl_map.items.len == 0);
+ f.ctype_global_from_decl_map.deinit(gpa);
+ f.ctype_pool.deinit(gpa);
}
};
@@ -543,91 +549,62 @@ const FlushDeclError = error{
fn flushCTypes(
self: *C,
+ zcu: *Zcu,
f: *Flush,
pass: codegen.DeclGen.Pass,
- decl_ctypes: codegen.CType.Store,
+ decl_ctype_pool: *const codegen.CType.Pool,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
- const mod = self.base.comp.module.?;
+ const global_ctype_pool = &f.ctype_pool;
- const decl_ctypes_len = decl_ctypes.count();
- f.ctypes_map.clearRetainingCapacity();
- try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len);
-
- var global_ctypes = f.ctypes.promote(gpa);
- defer f.ctypes.demote(global_ctypes);
+ const global_from_decl_map = &f.ctype_global_from_decl_map;
+ assert(global_from_decl_map.items.len == 0);
+ try global_from_decl_map.ensureTotalCapacity(gpa, decl_ctype_pool.items.len);
+ defer global_from_decl_map.clearRetainingCapacity();
var ctypes_buf = f.ctypes_buf.toManaged(gpa);
defer f.ctypes_buf = ctypes_buf.moveToUnmanaged();
const writer = ctypes_buf.writer();
- const slice = decl_ctypes.set.map.entries.slice();
- for (slice.items(.key), 0..) |decl_cty, decl_i| {
- const Context = struct {
- arena: Allocator,
- ctypes_map: []codegen.CType.Index,
- cached_hash: codegen.CType.Store.Set.Map.Hash,
- idx: codegen.CType.Index,
-
- pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash {
- return ctx.cached_hash;
- }
- pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool {
- return lhs.eqlContext(rhs, ctx);
- }
- pub fn eqlIndex(
- ctx: @This(),
- lhs_idx: codegen.CType.Index,
- rhs_idx: codegen.CType.Index,
- ) bool {
- if (lhs_idx < codegen.CType.Tag.no_payload_count or
- rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx;
- const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count;
- if (lhs_i >= ctx.ctypes_map.len) return false;
- return ctx.ctypes_map[lhs_i] == rhs_idx;
+ for (0..decl_ctype_pool.items.len) |decl_ctype_pool_index| {
+ const PoolAdapter = struct {
+ global_from_decl_map: []const codegen.CType,
+ pub fn eql(pool_adapter: @This(), decl_ctype: codegen.CType, global_ctype: codegen.CType) bool {
+ return if (decl_ctype.toPoolIndex()) |decl_pool_index|
+ decl_pool_index < pool_adapter.global_from_decl_map.len and
+ pool_adapter.global_from_decl_map[decl_pool_index].eql(global_ctype)
+ else
+ decl_ctype.index == global_ctype.index;
}
- pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index {
- if (idx < codegen.CType.Tag.no_payload_count) return idx;
- return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count];
+ pub fn copy(pool_adapter: @This(), decl_ctype: codegen.CType) codegen.CType {
+ return if (decl_ctype.toPoolIndex()) |decl_pool_index|
+ pool_adapter.global_from_decl_map[decl_pool_index]
+ else
+ decl_ctype;
}
};
- const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i));
- const ctx = Context{
- .arena = global_ctypes.arena.allocator(),
- .ctypes_map = f.ctypes_map.items,
- .cached_hash = decl_ctypes.indexToHash(decl_idx),
- .idx = decl_idx,
- };
- const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{
- .store = &global_ctypes.set,
- });
- const global_idx =
- @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index));
- f.ctypes_map.appendAssumeCapacity(global_idx);
- if (!gop.found_existing) {
- errdefer _ = global_ctypes.set.map.pop();
- gop.key_ptr.* = try decl_cty.copyContext(ctx);
- }
- if (std.debug.runtime_safety) {
- const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index];
- assert(global_cty == gop.key_ptr);
- assert(decl_cty.eqlContext(global_cty.*, ctx));
- assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set));
- }
+ const decl_ctype = codegen.CType.fromPoolIndex(decl_ctype_pool_index);
+ const global_ctype, const found_existing = try global_ctype_pool.getOrPutAdapted(
+ gpa,
+ decl_ctype_pool,
+ decl_ctype,
+ PoolAdapter{ .global_from_decl_map = global_from_decl_map.items },
+ );
+ global_from_decl_map.appendAssumeCapacity(global_ctype);
try codegen.genTypeDecl(
- mod,
+ zcu,
writer,
- global_ctypes.set,
- global_idx,
+ global_ctype_pool,
+ global_ctype,
pass,
- decl_ctypes.set,
- decl_idx,
- gop.found_existing,
+ decl_ctype_pool,
+ decl_ctype,
+ found_existing,
);
}
}
-fn flushErrDecls(self: *C, ctypes: *codegen.CType.Store) FlushDeclError!void {
+fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
const gpa = self.base.comp.gpa;
const fwd_decl = &self.lazy_fwd_decl_buf;
@@ -636,12 +613,14 @@ fn flushErrDecls(self: *C, ctypes: *codegen.CType.Store) FlushDeclError!void {
var object = codegen.Object{
.dg = .{
.gpa = gpa,
- .module = self.base.comp.module.?,
+ .zcu = zcu,
+ .mod = zcu.root_mod,
.error_msg = null,
.pass = .flush,
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
- .ctypes = ctypes.*,
+ .ctype_pool = ctype_pool.*,
+ .scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@@ -652,8 +631,10 @@ fn flushErrDecls(self: *C, ctypes: *codegen.CType.Store) FlushDeclError!void {
defer {
self.anon_decls = object.dg.anon_decl_deps;
self.aligned_anon_decls = object.dg.aligned_anon_decls;
- object.dg.ctypes.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
+ ctype_pool.* = object.dg.ctype_pool.move();
+ ctype_pool.freeUnusedCapacity(gpa);
+ object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
@@ -661,13 +642,14 @@ fn flushErrDecls(self: *C, ctypes: *codegen.CType.Store) FlushDeclError!void {
error.AnalysisFail => unreachable,
else => |e| return e,
};
-
- ctypes.* = object.dg.ctypes.move();
}
fn flushLazyFn(
self: *C,
- ctypes: *codegen.CType.Store,
+ zcu: *Zcu,
+ mod: *Module,
+ ctype_pool: *codegen.CType.Pool,
+ lazy_ctype_pool: *const codegen.CType.Pool,
lazy_fn: codegen.LazyFnMap.Entry,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
@@ -678,12 +660,14 @@ fn flushLazyFn(
var object = codegen.Object{
.dg = .{
.gpa = gpa,
- .module = self.base.comp.module.?,
+ .zcu = zcu,
+ .mod = mod,
.error_msg = null,
.pass = .flush,
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
- .ctypes = ctypes.*,
+ .ctype_pool = ctype_pool.*,
+ .scratch = .{},
.anon_decl_deps = .{},
.aligned_anon_decls = .{},
},
@@ -696,20 +680,27 @@ fn flushLazyFn(
// `updateFunc()` does.
assert(object.dg.anon_decl_deps.count() == 0);
assert(object.dg.aligned_anon_decls.count() == 0);
- object.dg.ctypes.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
+ ctype_pool.* = object.dg.ctype_pool.move();
+ ctype_pool.freeUnusedCapacity(gpa);
+ object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
- codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) {
+ codegen.genLazyFn(&object, lazy_ctype_pool, lazy_fn) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
-
- ctypes.* = object.dg.ctypes.move();
}
-fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void {
+fn flushLazyFns(
+ self: *C,
+ zcu: *Zcu,
+ mod: *Module,
+ f: *Flush,
+ lazy_ctype_pool: *const codegen.CType.Pool,
+ lazy_fns: codegen.LazyFnMap,
+) FlushDeclError!void {
const gpa = self.base.comp.gpa;
try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(lazy_fns.count()));
@@ -718,19 +709,21 @@ fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
if (gop.found_existing) continue;
gop.value_ptr.* = {};
- try self.flushLazyFn(&f.lazy_ctypes, entry);
+ try self.flushLazyFn(zcu, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
}
}
fn flushDeclBlock(
self: *C,
+ zcu: *Zcu,
+ mod: *Module,
f: *Flush,
decl_block: *DeclBlock,
export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
extern_symbol_name: InternPool.OptionalNullTerminatedString,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
- try self.flushLazyFns(f, decl_block.lazy_fns);
+ try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
fwd_decl: {
if (extern_symbol_name.unwrap()) |name| {
@@ -740,15 +733,15 @@ fn flushDeclBlock(
}
}
-pub fn flushEmitH(module: *Module) !void {
+pub fn flushEmitH(zcu: *Zcu) !void {
const tracy = trace(@src());
defer tracy.end();
- const emit_h = module.emit_h orelse return;
+ const emit_h = zcu.emit_h orelse return;
// We collect a list of buffers to write, and write them all at once with pwritev 😎
const num_buffers = emit_h.decl_table.count() + 1;
- var all_buffers = try std.ArrayList(std.posix.iovec_const).initCapacity(module.gpa, num_buffers);
+ var all_buffers = try std.ArrayList(std.posix.iovec_const).initCapacity(zcu.gpa, num_buffers);
defer all_buffers.deinit();
var file_size: u64 = zig_h.len;
@@ -771,7 +764,7 @@ pub fn flushEmitH(module: *Module) !void {
}
}
- const directory = emit_h.loc.directory orelse module.comp.local_cache_directory;
+ const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory;
const file = try directory.handle.createFile(emit_h.loc.basename, .{
// We set the end position explicitly below; by not truncating the file, we possibly
// make it easier on the file system by doing 1 reallocation instead of two.
@@ -785,12 +778,12 @@ pub fn flushEmitH(module: *Module) !void {
pub fn updateExports(
self: *C,
- module: *Module,
- exported: Module.Exported,
- exports: []const *Module.Export,
+ zcu: *Zcu,
+ exported: Zcu.Exported,
+ exports: []const *Zcu.Export,
) !void {
_ = exports;
_ = exported;
- _ = module;
+ _ = zcu;
_ = self;
}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 7becb3f366f8..aaf840e02ccf 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1223,7 +1223,7 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int
atom.getSymbolPtr(self).value = try self.allocateAtom(
atom_index,
atom.size,
- @intCast(required_alignment.toByteUnitsOptional().?),
+ @intCast(required_alignment.toByteUnits().?),
);
errdefer self.freeAtom(atom_index);
@@ -1344,7 +1344,7 @@ fn updateLazySymbolAtom(
symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
- const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0)));
+ const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr });
@@ -1428,7 +1428,7 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
- const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0));
+ const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 95ddc81e3c08..c20a4b6afa48 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -4051,7 +4051,7 @@ fn updateSectionSizes(self: *Elf) !void {
const padding = offset - shdr.sh_size;
atom_ptr.value = offset;
shdr.sh_size += padding + atom_ptr.size;
- shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits(1));
+ shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
}
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index f391326670b4..3db11826963b 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -208,7 +208,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
zig_object.debug_aranges_section_dirty = true;
}
}
- shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?);
+ shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index 6ed55dac10eb..6aede441c885 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -313,7 +313,7 @@ pub fn inputShdr(self: ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.El
shdr.sh_addr = 0;
shdr.sh_offset = 0;
shdr.sh_size = atom.size;
- shdr.sh_addralign = atom.alignment.toByteUnits(1);
+ shdr.sh_addralign = atom.alignment.toByteUnits() orelse 1;
return shdr;
}
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index 017329dde773..98abed420eb0 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -330,7 +330,7 @@ fn updateSectionSizes(elf_file: *Elf) !void {
const padding = offset - shdr.sh_size;
atom_ptr.value = offset;
shdr.sh_size += padding + atom_ptr.size;
- shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits(1));
+ shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
}
diff --git a/src/link/Elf/thunks.zig b/src/link/Elf/thunks.zig
index 119529b5123d..7d06b3b19099 100644
--- a/src/link/Elf/thunks.zig
+++ b/src/link/Elf/thunks.zig
@@ -63,7 +63,7 @@ fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !u64 {
const offset = alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
shdr.sh_size += padding + size;
- shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits(1));
+ shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
return offset;
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 71666beb074d..2c8a3da59f7a 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -2060,7 +2060,7 @@ fn calcSectionSizes(self: *MachO) !void {
for (atoms.items) |atom_index| {
const atom = self.getAtom(atom_index).?;
- const atom_alignment = atom.alignment.toByteUnits(1);
+ const atom_alignment = atom.alignment.toByteUnits() orelse 1;
const offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = offset - header.size;
atom.value = offset;
diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig
index b0eced27ebbf..711aa01fb488 100644
--- a/src/link/MachO/relocatable.zig
+++ b/src/link/MachO/relocatable.zig
@@ -380,7 +380,7 @@ fn calcSectionSizes(macho_file: *MachO) !void {
if (atoms.items.len == 0) continue;
for (atoms.items) |atom_index| {
const atom = macho_file.getAtom(atom_index).?;
- const atom_alignment = atom.alignment.toByteUnits(1);
+ const atom_alignment = atom.alignment.toByteUnits() orelse 1;
const offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = offset - header.size;
atom.value = offset;
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index ce91beedae72..10f00d8992dc 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2263,7 +2263,7 @@ fn setupMemory(wasm: *Wasm) !void {
}
if (wasm.findGlobalSymbol("__tls_align")) |loc| {
const sym = loc.getSymbol(wasm);
- wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?);
+ wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnits().?);
}
if (wasm.findGlobalSymbol("__tls_base")) |loc| {
const sym = loc.getSymbol(wasm);
diff --git a/src/link/tapi/parse.zig b/src/link/tapi/parse.zig
index 89e0b238244e..deba9aaef01f 100644
--- a/src/link/tapi/parse.zig
+++ b/src/link/tapi/parse.zig
@@ -29,34 +29,28 @@ pub const Node = struct {
map,
list,
value,
+
+ pub fn Type(comptime tag: Tag) type {
+ return switch (tag) {
+ .doc => Doc,
+ .map => Map,
+ .list => List,
+ .value => Value,
+ };
+ }
};
pub fn cast(self: *const Node, comptime T: type) ?*const T {
if (self.tag != T.base_tag) {
return null;
}
- return @fieldParentPtr(T, "base", self);
+ return @fieldParentPtr("base", self);
}
pub fn deinit(self: *Node, allocator: Allocator) void {
switch (self.tag) {
- .doc => {
- const parent = @fieldParentPtr(Node.Doc, "base", self);
- parent.deinit(allocator);
- allocator.destroy(parent);
- },
- .map => {
- const parent = @fieldParentPtr(Node.Map, "base", self);
- parent.deinit(allocator);
- allocator.destroy(parent);
- },
- .list => {
- const parent = @fieldParentPtr(Node.List, "base", self);
- parent.deinit(allocator);
- allocator.destroy(parent);
- },
- .value => {
- const parent = @fieldParentPtr(Node.Value, "base", self);
+ inline else => |tag| {
+ const parent: *tag.Type() = @fieldParentPtr("base", self);
parent.deinit(allocator);
allocator.destroy(parent);
},
@@ -69,12 +63,9 @@ pub const Node = struct {
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
- return switch (self.tag) {
- .doc => @fieldParentPtr(Node.Doc, "base", self).format(fmt, options, writer),
- .map => @fieldParentPtr(Node.Map, "base", self).format(fmt, options, writer),
- .list => @fieldParentPtr(Node.List, "base", self).format(fmt, options, writer),
- .value => @fieldParentPtr(Node.Value, "base", self).format(fmt, options, writer),
- };
+ switch (self.tag) {
+ inline else => |tag| return @as(*tag.Type(), @fieldParentPtr("base", self)).format(fmt, options, writer),
+ }
}
pub const Doc = struct {
diff --git a/src/main.zig b/src/main.zig
index 2edc3864c6ad..9e699c07e6f5 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -3544,11 +3544,7 @@ fn createModule(
// If the target is not overridden, use the parent's target. Of course,
// if this is the root module then we need to proceed to resolve the
// target.
- if (cli_mod.target_arch_os_abi == null and
- cli_mod.target_mcpu == null and
- create_module.dynamic_linker == null and
- create_module.object_format == null)
- {
+ if (cli_mod.target_arch_os_abi == null and cli_mod.target_mcpu == null) {
if (parent) |p| break :t p.resolved_target;
}
diff --git a/src/print_value.zig b/src/print_value.zig
index 25c20bbbbd10..21a322cd6371 100644
--- a/src/print_value.zig
+++ b/src/print_value.zig
@@ -80,7 +80,7 @@ pub fn print(
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (opt_sema) |sema| {
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar;
- try writer.print("{}", .{a.toByteUnits(0)});
+ try writer.print("{}", .{a.toByteUnits() orelse 0});
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
.lazy_size => |ty| if (opt_sema) |sema| {
const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar;
diff --git a/src/print_zir.zig b/src/print_zir.zig
index e20eff63281e..a2929c32d9df 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -355,7 +355,6 @@ const Writer = struct {
.atomic_rmw => try self.writeAtomicRmw(stream, inst),
.shuffle => try self.writeShuffle(stream, inst),
.mul_add => try self.writeMulAdd(stream, inst),
- .field_parent_ptr => try self.writeFieldParentPtr(stream, inst),
.builtin_call => try self.writeBuiltinCall(stream, inst),
.field_type_ref => try self.writeFieldTypeRef(stream, inst),
@@ -609,6 +608,7 @@ const Writer = struct {
.restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, extended),
.closure_get => try self.writeClosureGet(stream, extended),
+ .field_parent_ptr => try self.writeFieldParentPtr(stream, extended),
}
}
@@ -901,16 +901,21 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
- fn writeFieldParentPtr(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
- const extra = self.code.extraData(Zir.Inst.FieldParentPtr, inst_data.payload_index).data;
- try self.writeInstRef(stream, extra.parent_type);
+ fn writeFieldParentPtr(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
+ if (flags.align_cast) try stream.writeAll("align_cast, ");
+ if (flags.addrspace_cast) try stream.writeAll("addrspace_cast, ");
+ if (flags.const_cast) try stream.writeAll("const_cast, ");
+ if (flags.volatile_cast) try stream.writeAll("volatile_cast, ");
+ try self.writeInstRef(stream, extra.parent_ptr_type);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.field_name);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.field_ptr);
try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
+ try self.writeSrc(stream, extra.src());
}
fn writeBuiltinAsyncCall(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
@@ -1069,7 +1074,8 @@ const Writer = struct {
}
fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
- const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
if (flags.ptr_cast) try stream.writeAll("ptr_cast, ");
@@ -1085,7 +1091,8 @@ const Writer = struct {
}
fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
- const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
if (flags.const_cast) try stream.writeAll("const_cast, ");
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 62aeb78fc73b..d1d773ed10e7 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -59,7 +59,7 @@ pub fn RegisterManager(
pub const RegisterBitSet = StaticBitSet(tracked_registers.len);
fn getFunction(self: *Self) *Function {
- return @fieldParentPtr(Function, "register_manager", self);
+ return @alignCast(@fieldParentPtr("register_manager", self));
}
fn excludeRegister(reg: Register, register_class: RegisterBitSet) bool {
diff --git a/src/target.zig b/src/target.zig
index 8f1942111da4..fa782075c77e 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -525,7 +525,7 @@ pub fn backendSupportsFeature(
.error_return_trace => use_llvm,
.is_named_enum_value => use_llvm,
.error_set_has_value => use_llvm or cpu_arch.isWasm(),
- .field_reordering => use_llvm,
+ .field_reordering => ofmt == .c or use_llvm,
.safety_checked_instructions => use_llvm,
};
}
diff --git a/src/type.zig b/src/type.zig
index 203ab4f63e68..8352552463e9 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -203,7 +203,7 @@ pub const Type = struct {
info.flags.alignment
else
Type.fromInterned(info.child).abiAlignment(mod);
- try writer.print("align({d}", .{alignment.toByteUnits(0)});
+ try writer.print("align({d}", .{alignment.toByteUnits() orelse 0});
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
try writer.print(":{d}:{d}", .{
@@ -863,7 +863,7 @@ pub const Type = struct {
pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
.val => |val| return val,
- .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits(0)),
+ .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
}
}
@@ -905,7 +905,7 @@ pub const Type = struct {
return .{ .scalar = intAbiAlignment(int_type.bits, target) };
},
.ptr_type, .anyframe_type => {
- return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
+ return .{ .scalar = ptrAbiAlignment(target) };
},
.array_type => |array_type| {
return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat);
@@ -920,6 +920,9 @@ pub const Type = struct {
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return .{ .scalar = Alignment.fromByteUnits(alignment) };
},
+ .stage2_c => {
+ return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat);
+ },
.stage2_x86_64 => {
if (vector_type.child == .bool_type) {
if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
@@ -966,12 +969,12 @@ pub const Type = struct {
.usize,
.isize,
+ => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target) },
+
.export_options,
.extern_options,
.type_info,
- => return .{
- .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
- },
+ => return .{ .scalar = ptrAbiAlignment(target) },
.c_char => return .{ .scalar = cTypeAlign(target, .char) },
.c_short => return .{ .scalar = cTypeAlign(target, .short) },
@@ -1160,9 +1163,7 @@ pub const Type = struct {
const child_type = ty.optionalChild(mod);
switch (child_type.zigTypeTag(mod)) {
- .Pointer => return .{
- .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
- },
+ .Pointer => return .{ .scalar = ptrAbiAlignment(target) },
.ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
.NoReturn => return .{ .scalar = .@"1" },
else => {},
@@ -1274,6 +1275,10 @@ pub const Type = struct {
const total_bits = elem_bits * vector_type.len;
break :total_bytes (total_bits + 7) / 8;
},
+ .stage2_c => total_bytes: {
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ break :total_bytes elem_bytes * vector_type.len;
+ },
.stage2_x86_64 => total_bytes: {
if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
@@ -1527,15 +1532,19 @@ pub const Type = struct {
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
return AbiSizeAdvanced{
- .scalar = child_ty.abiAlignment(mod).toByteUnits(0) + payload_size,
+ .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size,
};
}
- fn intAbiSize(bits: u16, target: Target) u64 {
+ pub fn ptrAbiAlignment(target: Target) Alignment {
+ return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8));
+ }
+
+ pub fn intAbiSize(bits: u16, target: Target) u64 {
return intAbiAlignment(bits, target).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8)));
}
- fn intAbiAlignment(bits: u16, target: Target) Alignment {
+ pub fn intAbiAlignment(bits: u16, target: Target) Alignment {
return Alignment.fromByteUnits(@min(
std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))),
target.maxIntAlignment(),
@@ -1572,7 +1581,7 @@ pub const Type = struct {
if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max(
- (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits(0),
+ (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0,
(try elem_ty.abiSizeAdvanced(mod, strat)).scalar,
);
if (elem_size == 0) return 0;
@@ -3016,26 +3025,15 @@ pub const Type = struct {
}
/// Returns none in the case of a tuple which uses the integer index as the field name.
- pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString {
+ pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, field_index),
- .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index),
+ .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index),
+ .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index),
else => unreachable,
};
}
- /// When struct types have no field names, the names are implicitly understood to be
- /// strings corresponding to the field indexes in declaration order. It used to be the
- /// case that a NullTerminatedString would be stored for each field in this case, however,
- /// now, callers must handle the possibility that there are no names stored at all.
- /// Here we fake the previous behavior. Probably something better could be done by examining
- /// all the callsites of this function.
- pub fn legacyStructFieldName(ty: Type, i: u32, mod: *Module) InternPool.NullTerminatedString {
- return ty.structFieldName(i, mod).unwrap() orelse
- mod.intern_pool.getOrPutStringFmt(mod.gpa, "{d}", .{i}) catch @panic("OOM");
- }
-
pub fn structFieldCount(ty: Type, mod: *Module) u32 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
diff --git a/stage1/zig.h b/stage1/zig.h
index 7a1c69575a24..ec7508670d36 100644
--- a/stage1/zig.h
+++ b/stage1/zig.h
@@ -130,22 +130,18 @@ typedef char bool;
#define zig_restrict
#endif
-#if __STDC_VERSION__ >= 201112L
-#define zig_align(alignment) _Alignas(alignment)
-#elif zig_has_attribute(aligned)
-#define zig_align(alignment) __attribute__((aligned(alignment)))
+#if zig_has_attribute(aligned)
+#define zig_under_align(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
-#define zig_align(alignment) __declspec(align(alignment))
+#define zig_under_align(alignment) __declspec(align(alignment))
#else
-#define zig_align zig_align_unavailable
+#define zig_under_align zig_align_unavailable
#endif
-#if zig_has_attribute(aligned)
-#define zig_under_align(alignment) __attribute__((aligned(alignment)))
-#elif _MSC_VER
-#define zig_under_align(alignment) zig_align(alignment)
+#if __STDC_VERSION__ >= 201112L
+#define zig_align(alignment) _Alignas(alignment)
#else
-#define zig_align zig_align_unavailable
+#define zig_align(alignment) zig_under_align(alignment)
#endif
#if zig_has_attribute(aligned)
@@ -165,11 +161,14 @@ typedef char bool;
#endif
#if zig_has_attribute(section)
-#define zig_linksection(name, def, ...) def __attribute__((section(name)))
+#define zig_linksection(name) __attribute__((section(name)))
+#define zig_linksection_fn zig_linksection
#elif _MSC_VER
-#define zig_linksection(name, def, ...) __pragma(section(name, __VA_ARGS__)) __declspec(allocate(name)) def
+#define zig_linksection(name) __pragma(section(name, read, write)) __declspec(allocate(name))
+#define zig_linksection_fn(name) __pragma(section(name, read, execute)) __declspec(code_seg(name))
#else
-#define zig_linksection(name, def, ...) zig_linksection_unavailable
+#define zig_linksection(name) zig_linksection_unavailable
+#define zig_linksection_fn zig_linksection
#endif
#if zig_has_builtin(unreachable) || defined(zig_gnuc)
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index c249daa0674c..5005c8c79abd 100644
Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index b19ab8ae0ce5..36366fd52caf 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -624,7 +624,6 @@ test "sub-aligned pointer field access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/14904
@@ -694,5 +693,5 @@ test "zero-bit fields in extern struct pad fields appropriately" {
try expect(@intFromPtr(&s) % 2 == 0);
try expect(@intFromPtr(&s.y) - @intFromPtr(&s.x) == 2);
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
- try expect(@fieldParentPtr(S, "a", &s.a) == &s);
+ try expect(@as(*S, @fieldParentPtr("a", &s.a)) == &s);
}
diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig
index adb0e66ed661..bd2063a3a34c 100644
--- a/test/behavior/field_parent_ptr.zig
+++ b/test/behavior/field_parent_ptr.zig
@@ -1,126 +1,1924 @@
const expect = @import("std").testing.expect;
const builtin = @import("builtin");
-test "@fieldParentPtr non-first field" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+test "@fieldParentPtr struct" {
+ const C = struct {
+ a: bool = true,
+ b: f32 = 3.14,
+ c: struct { u8 } = .{42},
+ d: i32 = 12345,
+ };
- try testParentFieldPtr(&foo.c);
- try comptime testParentFieldPtr(&foo.c);
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = .{255} };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{255} };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{255} };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{255} };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
}
-test "@fieldParentPtr first field" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+test "@fieldParentPtr extern struct" {
+ const C = extern struct {
+ a: bool = true,
+ b: f32 = 3.14,
+ c: extern struct { x: u8 } = .{ .x = 42 },
+ d: i32 = 12345,
+ };
+
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
- try testParentFieldPtrFirst(&foo.a);
- try comptime testParentFieldPtrFirst(&foo.a);
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
}
-const Foo = struct {
- a: bool,
- b: f32,
- c: i32,
- d: i32,
-};
+test "@fieldParentPtr extern struct first zero-bit field" {
+ const C = extern struct {
+ a: u0 = 0,
+ b: f32 = 3.14,
+ c: i32 = 12345,
+ };
-const foo = Foo{
- .a = true,
- .b = 0.123,
- .c = 1234,
- .d = -10,
-};
+ {
+ const c: C = .{ .a = 0 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 0 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 0 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 0 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
-fn testParentFieldPtr(c: *const i32) !void {
- try expect(c == &foo.c);
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
- const base = @fieldParentPtr(Foo, "c", c);
- try expect(base == &foo);
- try expect(&base.c == c);
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
}
-fn testParentFieldPtrFirst(a: *const bool) !void {
- try expect(a == &foo.a);
+test "@fieldParentPtr extern struct middle zero-bit field" {
+ const C = extern struct {
+ a: f32 = 3.14,
+ b: u0 = 0,
+ c: i32 = 12345,
+ };
+
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
- const base = @fieldParentPtr(Foo, "a", a);
- try expect(base == &foo);
- try expect(&base.a == a);
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
}
-test "@fieldParentPtr untagged union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+test "@fieldParentPtr extern struct last zero-bit field" {
+ const C = extern struct {
+ a: f32 = 3.14,
+ b: i32 = 12345,
+ c: u0 = 0,
+ };
+
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = -1111111111 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = -1111111111 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = -1111111111 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = -1111111111 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = 0 };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = 0 };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = 0 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = 0 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+}
+
+test "@fieldParentPtr unaligned packed struct" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const C = packed struct {
+ a: bool = true,
+ b: f32 = 3.14,
+ c: packed struct { x: u8 } = .{ .x = 42 },
+ d: i32 = 12345,
+ };
+
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
- try testFieldParentPtrUnion(&bar.c);
- try comptime testFieldParentPtrUnion(&bar.c);
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
}
-const Bar = union(enum) {
- a: bool,
- b: f32,
- c: i32,
- d: i32,
-};
+test "@fieldParentPtr aligned packed struct" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const C = packed struct {
+ a: f32 = 3.14,
+ b: i32 = 12345,
+ c: packed struct { x: u8 } = .{ .x = 42 },
+ d: bool = true,
+ };
+
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = -1111111111 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = -1111111111 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = -1111111111 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = -1111111111 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .d = false };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = false };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = false };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = false };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+}
+
+test "@fieldParentPtr nested packed struct" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ {
+ const C = packed struct {
+ a: u8,
+ b: packed struct {
+ a: u8,
+ b: packed struct {
+ a: u8,
+ },
+ },
+ };
+
+ {
+ const c: C = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ const pcbba = &c.b.b.a;
+ const pcbb: @TypeOf(&c.b.b) = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ const pcb: @TypeOf(&c.b) = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+
+ {
+ var c: C = undefined;
+ c = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ var pcbba: @TypeOf(&c.b.b.a) = undefined;
+ pcbba = &c.b.b.a;
+ var pcbb: @TypeOf(&c.b.b) = undefined;
+ pcbb = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ var pcb: @TypeOf(&c.b) = undefined;
+ pcb = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+ }
+
+ {
+ const C = packed struct {
+ a: u8,
+ b: packed struct {
+ a: u9,
+ b: packed struct {
+ a: u8,
+ },
+ },
+ };
+
+ {
+ const c: C = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ const pcbba = &c.b.b.a;
+ const pcbb: @TypeOf(&c.b.b) = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ const pcb: @TypeOf(&c.b) = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+
+ {
+ var c: C = undefined;
+ c = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ var pcbba: @TypeOf(&c.b.b.a) = undefined;
+ pcbba = &c.b.b.a;
+ var pcbb: @TypeOf(&c.b.b) = undefined;
+ pcbb = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ var pcb: @TypeOf(&c.b) = undefined;
+ pcb = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+ }
+
+ {
+ const C = packed struct {
+ a: u9,
+ b: packed struct {
+ a: u7,
+ b: packed struct {
+ a: u8,
+ },
+ },
+ };
+
+ {
+ const c: C = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ const pcbba = &c.b.b.a;
+ const pcbb: @TypeOf(&c.b.b) = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ const pcb: @TypeOf(&c.b) = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+
+ {
+ var c: C = undefined;
+ c = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ var pcbba: @TypeOf(&c.b.b.a) = undefined;
+ pcbba = &c.b.b.a;
+ var pcbb: @TypeOf(&c.b.b) = undefined;
+ pcbb = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ var pcb: @TypeOf(&c.b) = undefined;
+ pcb = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+ }
-const bar = Bar{ .c = 42 };
+ {
+ const C = packed struct {
+ a: u9,
+ b: packed struct {
+ a: u8,
+ b: packed struct {
+ a: u8,
+ },
+ },
+ };
-fn testFieldParentPtrUnion(c: *const i32) !void {
- try expect(c == &bar.c);
+ {
+ const c: C = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ const pcbba = &c.b.b.a;
+ const pcbb: @TypeOf(&c.b.b) = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ const pcb: @TypeOf(&c.b) = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
- const base = @fieldParentPtr(Bar, "c", c);
- try expect(base == &bar);
- try expect(&base.c == c);
+ {
+ var c: C = undefined;
+ c = .{ .a = 0, .b = .{ .a = 0, .b = .{ .a = 0 } } };
+ var pcbba: @TypeOf(&c.b.b.a) = undefined;
+ pcbba = &c.b.b.a;
+ var pcbb: @TypeOf(&c.b.b) = undefined;
+ pcbb = @alignCast(@fieldParentPtr("a", pcbba));
+ try expect(pcbb == &c.b.b);
+ var pcb: @TypeOf(&c.b) = undefined;
+ pcb = @alignCast(@fieldParentPtr("b", pcbb));
+ try expect(pcb == &c.b);
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcb));
+ try expect(pc == &c);
+ }
+ }
+}
+
+test "@fieldParentPtr packed struct first zero-bit field" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const C = packed struct {
+ a: u0 = 0,
+ b: f32 = 3.14,
+ c: i32 = 12345,
+ };
+
+ {
+ const c: C = .{ .a = 0 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 0 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 0 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 0 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 666.667 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+}
+
+test "@fieldParentPtr packed struct middle zero-bit field" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const C = packed struct {
+ a: f32 = 3.14,
+ b: u0 = 0,
+ c: i32 = 12345,
+ };
+
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = -1111111111 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+}
+
+test "@fieldParentPtr packed struct last zero-bit field" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const C = packed struct {
+ a: f32 = 3.14,
+ b: i32 = 12345,
+ c: u0 = 0,
+ };
+
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 666.667 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = -1111111111 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = -1111111111 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = -1111111111 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = -1111111111 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = 0 };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = 0 };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = 0 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = 0 };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
}
test "@fieldParentPtr tagged union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ const C = union(enum) {
+ a: bool,
+ b: f32,
+ c: struct { u8 },
+ d: i32,
+ };
+
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = .{255} };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{255} };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{255} };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{255} };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
- try testFieldParentPtrTaggedUnion(&bar_tagged.c);
- try comptime testFieldParentPtrTaggedUnion(&bar_tagged.c);
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
}
-const BarTagged = union(enum) {
- a: bool,
- b: f32,
- c: i32,
- d: i32,
-};
+test "@fieldParentPtr untagged union" {
+ const C = union {
+ a: bool,
+ b: f32,
+ c: struct { u8 },
+ d: i32,
+ };
+
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
-const bar_tagged = BarTagged{ .c = 42 };
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
-fn testFieldParentPtrTaggedUnion(c: *const i32) !void {
- try expect(c == &bar_tagged.c);
+ {
+ const c: C = .{ .c = .{255} };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{255} };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{255} };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{255} };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
- const base = @fieldParentPtr(BarTagged, "c", c);
- try expect(base == &bar_tagged);
- try expect(&base.c == c);
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
}
test "@fieldParentPtr extern union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ const C = extern union {
+ a: bool,
+ b: f32,
+ c: extern struct { x: u8 },
+ d: i32,
+ };
+
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+}
+
+test "@fieldParentPtr packed union" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const C = packed union {
+ a: bool,
+ b: f32,
+ c: packed struct { x: u8 },
+ d: i32,
+ };
+
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = false };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ const pc: *const C = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ const pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .c = .{ .x = 255 } };
+ var pcf: @TypeOf(&c.c) = undefined;
+ pcf = &c.c;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("c", pcf));
+ try expect(pc == &c);
+ }
- try testFieldParentPtrExternUnion(&bar_extern.c);
- try comptime testFieldParentPtrExternUnion(&bar_extern.c);
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ const pc: *const C = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ const pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .d = -1111111111 };
+ var pcf: @TypeOf(&c.d) = undefined;
+ pcf = &c.d;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("d", pcf));
+ try expect(pc == &c);
+ }
}
-const BarExtern = extern union {
- a: bool,
- b: f32,
- c: i32,
- d: i32,
-};
+test "@fieldParentPtr tagged union all zero-bit fields" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
-const bar_extern = BarExtern{ .c = 42 };
+ const C = union(enum) {
+ a: u0,
+ b: i0,
+ };
-fn testFieldParentPtrExternUnion(c: *const i32) !void {
- try expect(c == &bar_extern.c);
+ {
+ const c: C = .{ .a = 0 };
+ const pcf = &c.a;
+ const pc: *const C = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 0 };
+ const pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .a = 0 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .a = 0 };
+ var pcf: @TypeOf(&c.a) = undefined;
+ pcf = &c.a;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("a", pcf));
+ try expect(pc == &c);
+ }
- const base = @fieldParentPtr(BarExtern, "c", c);
- try expect(base == &bar_extern);
- try expect(&base.c == c);
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ const pc: *const C = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ const pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ const c: C = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *const C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
+ {
+ var c: C = undefined;
+ c = .{ .b = 0 };
+ var pcf: @TypeOf(&c.b) = undefined;
+ pcf = &c.b;
+ var pc: *C = undefined;
+ pc = @alignCast(@fieldParentPtr("b", pcf));
+ try expect(pc == &c);
+ }
}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index f85e783342a1..5ab3b0d38d54 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -1392,13 +1392,13 @@ test "fieldParentPtr of a zero-bit field" {
{
const a = A{ .u = 0 };
const b_ptr = &a.b;
- const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ const a_ptr: *const A = @fieldParentPtr("b", b_ptr);
try std.testing.expectEqual(&a, a_ptr);
}
{
var a = A{ .u = 0 };
const b_ptr = &a.b;
- const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ const a_ptr: *A = @fieldParentPtr("b", b_ptr);
try std.testing.expectEqual(&a, a_ptr);
}
}
@@ -1406,17 +1406,17 @@ test "fieldParentPtr of a zero-bit field" {
{
const a = A{ .u = 0 };
const c_ptr = &a.b.c;
- const b_ptr = @fieldParentPtr(@TypeOf(a.b), "c", c_ptr);
+ const b_ptr: @TypeOf(&a.b) = @fieldParentPtr("c", c_ptr);
try std.testing.expectEqual(&a.b, b_ptr);
- const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ const a_ptr: *const A = @fieldParentPtr("b", b_ptr);
try std.testing.expectEqual(&a, a_ptr);
}
{
var a = A{ .u = 0 };
const c_ptr = &a.b.c;
- const b_ptr = @fieldParentPtr(@TypeOf(a.b), "c", c_ptr);
+ const b_ptr: @TypeOf(&a.b) = @fieldParentPtr("c", c_ptr);
try std.testing.expectEqual(&a.b, b_ptr);
- const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ const a_ptr: *const A = @fieldParentPtr("b", b_ptr);
try std.testing.expectEqual(&a, a_ptr);
}
}
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index bfd913774f3d..2b52df45a1c1 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -222,7 +222,7 @@ test "fieldParentPtr of tuple" {
var x: u32 = 0;
_ = &x;
const tuple = .{ x, x };
- try testing.expect(&tuple == @fieldParentPtr(@TypeOf(tuple), "1", &tuple[1]));
+ try testing.expect(&tuple == @as(@TypeOf(&tuple), @fieldParentPtr("1", &tuple[1])));
}
test "fieldParentPtr of anon struct" {
@@ -233,7 +233,7 @@ test "fieldParentPtr of anon struct" {
var x: u32 = 0;
_ = &x;
const anon_st = .{ .foo = x, .bar = x };
- try testing.expect(&anon_st == @fieldParentPtr(@TypeOf(anon_st), "bar", &anon_st.bar));
+ try testing.expect(&anon_st == @as(@TypeOf(&anon_st), @fieldParentPtr("bar", &anon_st.bar)));
}
test "offsetOf tuple" {
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 4b2ab52c599e..042ee5a986fe 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -1176,18 +1176,22 @@ test "@shlWithOverflow" {
test "alignment of vectors" {
try expect(@alignOf(@Vector(2, u8)) == switch (builtin.zig_backend) {
else => 2,
+ .stage2_c => @alignOf(u8),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(2, u1)) == switch (builtin.zig_backend) {
else => 1,
+ .stage2_c => @alignOf(u1),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(1, u1)) == switch (builtin.zig_backend) {
else => 1,
+ .stage2_c => @alignOf(u1),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(2, u16)) == switch (builtin.zig_backend) {
else => 4,
+ .stage2_c => @alignOf(u16),
.stage2_x86_64 => 16,
});
}
diff --git a/test/cases/compile_errors/fieldParentPtr-bad_field_name.zig b/test/cases/compile_errors/fieldParentPtr-bad_field_name.zig
index d3e487d3ce68..c897db4abb9e 100644
--- a/test/cases/compile_errors/fieldParentPtr-bad_field_name.zig
+++ b/test/cases/compile_errors/fieldParentPtr-bad_field_name.zig
@@ -2,12 +2,12 @@ const Foo = extern struct {
derp: i32,
};
export fn foo(a: *i32) *Foo {
- return @fieldParentPtr(Foo, "a", a);
+ return @fieldParentPtr("a", a);
}
// error
// backend=stage2
// target=native
//
-// :5:33: error: no field named 'a' in struct 'tmp.Foo'
+// :5:28: error: no field named 'a' in struct 'tmp.Foo'
// :1:20: note: struct declared here
diff --git a/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig b/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig
index 2147fb8aed6b..7eeb62b14631 100644
--- a/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig
+++ b/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig
@@ -9,7 +9,7 @@ const foo = Foo{
comptime {
const field_ptr: *i32 = @ptrFromInt(0x1234);
- const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr);
+ const another_foo_ptr: *const Foo = @fieldParentPtr("b", field_ptr);
_ = another_foo_ptr;
}
@@ -17,4 +17,4 @@ comptime {
// backend=stage2
// target=native
//
-// :12:55: error: pointer value not based on parent struct
+// :12:62: error: pointer value not based on parent struct
diff --git a/test/cases/compile_errors/fieldParentPtr-comptime_wrong_field_index.zig b/test/cases/compile_errors/fieldParentPtr-comptime_wrong_field_index.zig
index 7a37eb2adcdf..4ee347494100 100644
--- a/test/cases/compile_errors/fieldParentPtr-comptime_wrong_field_index.zig
+++ b/test/cases/compile_errors/fieldParentPtr-comptime_wrong_field_index.zig
@@ -8,7 +8,7 @@ const foo = Foo{
};
comptime {
- const another_foo_ptr = @fieldParentPtr(Foo, "b", &foo.a);
+ const another_foo_ptr: *const Foo = @fieldParentPtr("b", &foo.a);
_ = another_foo_ptr;
}
@@ -16,5 +16,5 @@ comptime {
// backend=stage2
// target=native
//
-// :11:29: error: field 'b' has index '1' but pointer value is index '0' of struct 'tmp.Foo'
+// :11:41: error: field 'b' has index '1' but pointer value is index '0' of struct 'tmp.Foo'
// :1:13: note: struct declared here
diff --git a/test/cases/compile_errors/fieldParentPtr-field_pointer_is_not_pointer.zig b/test/cases/compile_errors/fieldParentPtr-field_pointer_is_not_pointer.zig
index 8a57d08c3ba1..832269268a51 100644
--- a/test/cases/compile_errors/fieldParentPtr-field_pointer_is_not_pointer.zig
+++ b/test/cases/compile_errors/fieldParentPtr-field_pointer_is_not_pointer.zig
@@ -1,12 +1,12 @@
const Foo = extern struct {
a: i32,
};
-export fn foo(a: i32) *Foo {
- return @fieldParentPtr(Foo, "a", a);
+export fn foo(a: i32) *const Foo {
+ return @fieldParentPtr("a", a);
}
// error
// backend=stage2
// target=native
//
-// :5:38: error: expected pointer type, found 'i32'
+// :5:33: error: expected pointer type, found 'i32'
diff --git a/test/cases/compile_errors/fieldParentPtr-non_pointer.zig b/test/cases/compile_errors/fieldParentPtr-non_pointer.zig
new file mode 100644
index 000000000000..31f15783d8ea
--- /dev/null
+++ b/test/cases/compile_errors/fieldParentPtr-non_pointer.zig
@@ -0,0 +1,10 @@
+const Foo = i32;
+export fn foo(a: *i32) Foo {
+ return @fieldParentPtr("a", a);
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :3:12: error: expected pointer type, found 'i32'
diff --git a/test/cases/compile_errors/fieldParentPtr-non_struct.zig b/test/cases/compile_errors/fieldParentPtr-non_struct.zig
deleted file mode 100644
index 7950c885371d..000000000000
--- a/test/cases/compile_errors/fieldParentPtr-non_struct.zig
+++ /dev/null
@@ -1,10 +0,0 @@
-const Foo = i32;
-export fn foo(a: *i32) *Foo {
- return @fieldParentPtr(Foo, "a", a);
-}
-
-// error
-// backend=llvm
-// target=native
-//
-// :3:28: error: expected struct or union type, found 'i32'
diff --git a/test/cases/compile_errors/fieldParentPtr_on_comptime_field.zig b/test/cases/compile_errors/fieldParentPtr_on_comptime_field.zig
index fb95ea691c4a..ff18791c4972 100644
--- a/test/cases/compile_errors/fieldParentPtr_on_comptime_field.zig
+++ b/test/cases/compile_errors/fieldParentPtr_on_comptime_field.zig
@@ -5,7 +5,7 @@ pub export fn entry1() void {
@offsetOf(T, "a");
}
pub export fn entry2() void {
- @fieldParentPtr(T, "a", undefined);
+ @as(*T, @fieldParentPtr("a", undefined));
}
// error
@@ -13,4 +13,4 @@ pub export fn entry2() void {
// target=native
//
// :5:5: error: no offset available for comptime field
-// :8:5: error: cannot get @fieldParentPtr of a comptime field
+// :8:29: error: cannot get @fieldParentPtr of a comptime field
diff --git a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
index 22bd90b0684c..bb52c544214b 100644
--- a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
+++ b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
@@ -8,7 +8,7 @@ export fn entry() u32 {
// backend=stage2
// target=native
//
-// :3:23: error: cast increases pointer alignment
+// :3:23: error: @ptrCast increases pointer alignment
// :3:32: note: '*u8' has alignment '1'
// :3:23: note: '*u32' has alignment '4'
// :3:23: note: use @alignCast to assert pointer alignment
diff --git a/test/cases/compile_errors/invalid_bit_pointer.zig b/test/cases/compile_errors/invalid_bit_pointer.zig
new file mode 100644
index 000000000000..e05aaad07a1d
--- /dev/null
+++ b/test/cases/compile_errors/invalid_bit_pointer.zig
@@ -0,0 +1,13 @@
+comptime {
+ _ = *align(1:32:4) u8;
+}
+comptime {
+ _ = *align(1:25:4) u8;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:18: error: packed type 'u8' at bit offset 32 starts 0 bits after the end of a 4 byte host integer
+// :5:18: error: packed type 'u8' at bit offset 25 ends 1 bits after the end of a 4 byte host integer
diff --git a/test/cases/compile_errors/nested_ptr_cast_bad_operand.zig b/test/cases/compile_errors/nested_ptr_cast_bad_operand.zig
index ec7ee3075ce4..ca594aa37a00 100644
--- a/test/cases/compile_errors/nested_ptr_cast_bad_operand.zig
+++ b/test/cases/compile_errors/nested_ptr_cast_bad_operand.zig
@@ -16,7 +16,7 @@ export fn c() void {
//
// :3:45: error: null pointer casted to type '*const u32'
// :6:34: error: expected pointer type, found 'comptime_int'
-// :9:22: error: cast increases pointer alignment
+// :9:22: error: @ptrCast increases pointer alignment
// :9:71: note: '?*const u8' has alignment '1'
// :9:22: note: '?*f32' has alignment '4'
// :9:22: note: use @alignCast to assert pointer alignment
diff --git a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
index a704ea456b41..0c4bb0a1e24a 100644
--- a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
+++ b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
@@ -8,5 +8,5 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :3:21: error: cast discards const qualifier
+// :3:21: error: @ptrCast discards const qualifier
// :3:21: note: use @constCast to discard const qualifier
diff --git a/test/standalone/cmakedefine/build.zig b/test/standalone/cmakedefine/build.zig
index 52fda369a647..967aa7ecbd96 100644
--- a/test/standalone/cmakedefine/build.zig
+++ b/test/standalone/cmakedefine/build.zig
@@ -86,7 +86,7 @@ fn compare_headers(step: *std.Build.Step, prog_node: *std.Progress.Node) !void {
const expected_fmt = "expected_{s}";
for (step.dependencies.items) |config_header_step| {
- const config_header = @fieldParentPtr(ConfigHeader, "step", config_header_step);
+ const config_header: *ConfigHeader = @fieldParentPtr("step", config_header_step);
const zig_header_path = config_header.output_file.path orelse @panic("Could not locate header file");
diff --git a/test/tests.zig b/test/tests.zig
index 525c6792b524..0c847e6e7fac 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -1164,19 +1164,26 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
compile_c.addCSourceFile(.{
.file = these_tests.getEmittedBin(),
.flags = &.{
- // TODO output -std=c89 compatible C code
+ // Tracking issue for making the C backend generate C89 compatible code:
+ // https://github.com/ziglang/zig/issues/19468
"-std=c99",
"-pedantic",
"-Werror",
- // TODO stop violating these pedantic errors. spotted everywhere
+
+ // Tracking issue for making the C backend generate code
+ // that does not trigger warnings:
+ // https://github.com/ziglang/zig/issues/19467
+
+ // spotted everywhere
"-Wno-builtin-requires-header",
- // TODO stop violating these pedantic errors. spotted on linux
- "-Wno-address-of-packed-member",
+
+ // spotted on linux
"-Wno-gnu-folding-constant",
"-Wno-incompatible-function-pointer-types",
"-Wno-incompatible-pointer-types",
"-Wno-overlength-strings",
- // TODO stop violating these pedantic errors. spotted on darwin
+
+ // spotted on darwin
"-Wno-dollar-in-identifier-extension",
"-Wno-absolute-value",
},
diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py
index ebd5b0b3d0c8..e8263fbc2103 100644
--- a/tools/lldb_pretty_printers.py
+++ b/tools/lldb_pretty_printers.py
@@ -354,7 +354,7 @@ def InstRef_SummaryProvider(value, _=None):
def InstIndex_SummaryProvider(value, _=None):
return 'instructions[%d]' % value.unsigned
-class Module_Decl__Module_Decl_Index_SynthProvider:
+class zig_DeclIndex_SynthProvider:
def __init__(self, value, _=None): self.value = value
def update(self):
try:
@@ -425,7 +425,7 @@ def InternPool_Find(thread):
for frame in thread:
ip = frame.FindVariable('ip') or frame.FindVariable('intern_pool')
if ip: return ip
- mod = frame.FindVariable('mod') or frame.FindVariable('module')
+ mod = frame.FindVariable('zcu') or frame.FindVariable('mod') or frame.FindVariable('module')
if mod:
ip = mod.GetChildMemberWithName('intern_pool')
if ip: return ip
@@ -617,7 +617,7 @@ def type_Type_SummaryProvider(value, _=None):
def value_Value_str_lit(payload):
for frame in payload.thread:
- mod = frame.FindVariable('mod') or frame.FindVariable('module')
+ mod = frame.FindVariable('zcu') or frame.FindVariable('mod') or frame.FindVariable('module')
if mod: break
else: return
return '"%s"' % zig_String_decode(mod.GetChildMemberWithName('string_literal_bytes').GetChildMemberWithName('items'), payload.GetChildMemberWithName('index').unsigned, payload.GetChildMemberWithName('len').unsigned)
@@ -714,7 +714,7 @@ def __lldb_init_module(debugger, _=None):
add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Index', identifier='InstIndex', summary=True)
add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True)
- add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True)
+ add(debugger, category='zig.stage2', type='zig.DeclIndex', synth=True)
add(debugger, category='zig.stage2', type='Module.Namespace::Module.Namespace.Index', synth=True)
add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Index', synth=True)