@@ -5281,7 +5281,7 @@ pub const FuncGen = struct {
52815281 try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
52825282
52835283 const alignment = return_type.abiAlignment(zcu).toLlvm();
5284- const ret_ptr = try self.buildAllocaWorkaround(return_type , alignment);
5284+ const ret_ptr = try self.buildAlloca(llvm_ret_ty , alignment);
52855285 try llvm_args.append(ret_ptr);
52865286 break :blk ret_ptr;
52875287 };
@@ -5329,7 +5329,7 @@ pub const FuncGen = struct {
53295329
53305330 const alignment = param_ty.abiAlignment(zcu).toLlvm();
53315331 const param_llvm_ty = try o.lowerType(param_ty);
5332- const arg_ptr = try self.buildAllocaWorkaround(param_ty , alignment);
5332+ const arg_ptr = try self.buildAlloca(param_llvm_ty , alignment);
53335333 if (isByRef(param_ty, zcu)) {
53345334 const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, "");
53355335 _ = try self.wip.store(.normal, loaded, arg_ptr, alignment);
@@ -5352,7 +5352,7 @@ pub const FuncGen = struct {
53525352 // LLVM does not allow bitcasting structs so we must allocate
53535353 // a local, store as one type, and then load as another type.
53545354 const alignment = param_ty.abiAlignment(zcu).toLlvm();
5355- const int_ptr = try self.buildAllocaWorkaround(param_ty , alignment);
5355+ const int_ptr = try self.buildAlloca(int_llvm_ty , alignment);
53565356 _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
53575357 const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
53585358 try llvm_args.append(loaded);
@@ -5727,7 +5727,7 @@ pub const FuncGen = struct {
57275727 const llvm_va_list_ty = try o.lowerType(va_list_ty);
57285728
57295729 const result_alignment = va_list_ty.abiAlignment(pt.zcu).toLlvm();
5730- const dest_list = try self.buildAllocaWorkaround(va_list_ty , result_alignment);
5730+ const dest_list = try self.buildAlloca(llvm_va_list_ty , result_alignment);
57315731
57325732 _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{dest_list.typeOfWip(&self.wip)}, &.{ dest_list, src_list }, "");
57335733 return if (isByRef(va_list_ty, zcu))
@@ -5752,7 +5752,7 @@ pub const FuncGen = struct {
57525752 const llvm_va_list_ty = try o.lowerType(va_list_ty);
57535753
57545754 const result_alignment = va_list_ty.abiAlignment(pt.zcu).toLlvm();
5755- const dest_list = try self.buildAllocaWorkaround(va_list_ty , result_alignment);
5755+ const dest_list = try self.buildAlloca(llvm_va_list_ty , result_alignment);
57565756
57575757 _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{dest_list.typeOfWip(&self.wip)}, &.{dest_list}, "");
57585758 return if (isByRef(va_list_ty, zcu))
@@ -8030,7 +8030,7 @@ pub const FuncGen = struct {
80308030 self.ret_ptr
80318031 else brk: {
80328032 const alignment = optional_ty.abiAlignment(zcu).toLlvm();
8033- const optional_ptr = try self.buildAllocaWorkaround(optional_ty , alignment);
8033+ const optional_ptr = try self.buildAlloca(llvm_optional_ty , alignment);
80348034 break :brk optional_ptr;
80358035 };
80368036
@@ -8067,7 +8067,7 @@ pub const FuncGen = struct {
80678067 self.ret_ptr
80688068 else brk: {
80698069 const alignment = err_un_ty.abiAlignment(pt.zcu).toLlvm();
8070- const result_ptr = try self.buildAllocaWorkaround(err_un_ty , alignment);
8070+ const result_ptr = try self.buildAlloca(err_un_llvm_ty , alignment);
80718071 break :brk result_ptr;
80728072 };
80738073
@@ -8106,7 +8106,7 @@ pub const FuncGen = struct {
81068106 self.ret_ptr
81078107 else brk: {
81088108 const alignment = err_un_ty.abiAlignment(zcu).toLlvm();
8109- const result_ptr = try self.buildAllocaWorkaround(err_un_ty , alignment);
8109+ const result_ptr = try self.buildAlloca(err_un_llvm_ty , alignment);
81108110 break :brk result_ptr;
81118111 };
81128112
@@ -8640,7 +8640,7 @@ pub const FuncGen = struct {
86408640
86418641 if (isByRef(inst_ty, zcu)) {
86428642 const result_alignment = inst_ty.abiAlignment(zcu).toLlvm();
8643- const alloca_inst = try self.buildAllocaWorkaround(inst_ty , result_alignment);
8643+ const alloca_inst = try self.buildAlloca(llvm_inst_ty , result_alignment);
86448644 {
86458645 const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
86468646 _ = try self.wip.store(.normal, result_val, field_ptr, result_alignment);
@@ -9000,7 +9000,7 @@ pub const FuncGen = struct {
90009000
90019001 if (isByRef(dest_ty, zcu)) {
90029002 const result_alignment = dest_ty.abiAlignment(zcu).toLlvm();
9003- const alloca_inst = try self.buildAllocaWorkaround(dest_ty , result_alignment);
9003+ const alloca_inst = try self.buildAlloca(llvm_dest_ty , result_alignment);
90049004 {
90059005 const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
90069006 _ = try self.wip.store(.normal, result, field_ptr, result_alignment);
@@ -9425,7 +9425,7 @@ pub const FuncGen = struct {
94259425 return self.ng.todo("implement bitcast vector to non-ref array", .{});
94269426 }
94279427 const alignment = inst_ty.abiAlignment(zcu).toLlvm();
9428- const array_ptr = try self.buildAllocaWorkaround(inst_ty , alignment);
9428+ const array_ptr = try self.buildAlloca(llvm_dest_ty , alignment);
94299429 const bitcast_ok = elem_ty.bitSize(zcu) == elem_ty.abiSize(zcu) * 8;
94309430 if (bitcast_ok) {
94319431 _ = try self.wip.store(.normal, operand, array_ptr, alignment);
@@ -9486,7 +9486,7 @@ pub const FuncGen = struct {
94869486
94879487 if (result_is_ref) {
94889488 const alignment = operand_ty.abiAlignment(zcu).max(inst_ty.abiAlignment(zcu)).toLlvm();
9489- const result_ptr = try self.buildAllocaWorkaround(inst_ty , alignment);
9489+ const result_ptr = try self.buildAlloca(llvm_dest_ty , alignment);
94909490 _ = try self.wip.store(.normal, operand, result_ptr, alignment);
94919491 return result_ptr;
94929492 }
@@ -9499,7 +9499,7 @@ pub const FuncGen = struct {
94999499 // but LLVM won't let us bitcast struct values or vectors with padding bits.
95009500 // Therefore, we store operand to alloca, then load for result.
95019501 const alignment = operand_ty.abiAlignment(zcu).max(inst_ty.abiAlignment(zcu)).toLlvm();
9502- const result_ptr = try self.buildAllocaWorkaround(inst_ty , alignment);
9502+ const result_ptr = try self.buildAlloca(llvm_dest_ty , alignment);
95039503 _ = try self.wip.store(.normal, operand, result_ptr, alignment);
95049504 return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
95059505 }
@@ -9608,9 +9608,9 @@ pub const FuncGen = struct {
96089608 if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
96099609 return (try o.lowerPtrToVoid(ptr_ty)).toValue();
96109610
9611- // const pointee_llvm_ty = try o.lowerType(pointee_type);
9611+ const pointee_llvm_ty = try o.lowerType(pointee_type);
96129612 const alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
9613- return self.buildAllocaWorkaround(pointee_type , alignment);
9613+ return self.buildAlloca(pointee_llvm_ty , alignment);
96149614 }
96159615
96169616 fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -9622,9 +9622,9 @@ pub const FuncGen = struct {
96229622 if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
96239623 return (try o.lowerPtrToVoid(ptr_ty)).toValue();
96249624 if (self.ret_ptr != .none) return self.ret_ptr;
9625- // const ret_llvm_ty = try o.lowerType(ret_ty);
9625+ const ret_llvm_ty = try o.lowerType(ret_ty);
96269626 const alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
9627- return self.buildAllocaWorkaround(ret_ty , alignment);
9627+ return self.buildAlloca(ret_llvm_ty , alignment);
96289628 }
96299629
96309630 /// Use this instead of builder.buildAlloca, because this function makes sure to
@@ -9638,16 +9638,6 @@ pub const FuncGen = struct {
96389638 return buildAllocaInner(&self.wip, llvm_ty, alignment, target);
96399639 }
96409640
9641- // Workaround for https://github.com/ziglang/zig/issues/16392
9642- fn buildAllocaWorkaround(
9643- self: *FuncGen,
9644- ty: Type,
9645- alignment: Builder.Alignment,
9646- ) Allocator.Error!Builder.Value {
9647- const o = self.ng.object;
9648- return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt.zcu), .i8), alignment);
9649- }
9650-
96519641 fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
96529642 const o = self.ng.object;
96539643 const pt = o.pt;
@@ -10686,7 +10676,7 @@ pub const FuncGen = struct {
1068610676 const llvm_result_ty = accum_init.typeOfWip(&self.wip);
1068710677
1068810678 // Allocate and initialize our mutable variables
10689- const i_ptr = try self.buildAllocaWorkaround(Type.usize , .default);
10679+ const i_ptr = try self.buildAlloca(usize_ty , .default);
1069010680 _ = try self.wip.store(.normal, try o.builder.intValue(usize_ty, 0), i_ptr, .default);
1069110681 const accum_ptr = try self.buildAlloca(llvm_result_ty, .default);
1069210682 _ = try self.wip.store(.normal, accum_init, accum_ptr, .default);
@@ -10899,7 +10889,7 @@ pub const FuncGen = struct {
1089910889 // TODO in debug builds init to undef so that the padding will be 0xaa
1090010890 // even if we fully populate the fields.
1090110891 const alignment = result_ty.abiAlignment(zcu).toLlvm();
10902- const alloca_inst = try self.buildAllocaWorkaround(result_ty , alignment);
10892+ const alloca_inst = try self.buildAlloca(llvm_result_ty , alignment);
1090310893
1090410894 for (elements, 0..) |elem, i| {
1090510895 if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
@@ -10936,7 +10926,7 @@ pub const FuncGen = struct {
1093610926 const llvm_usize = try o.lowerType(Type.usize);
1093710927 const usize_zero = try o.builder.intValue(llvm_usize, 0);
1093810928 const alignment = result_ty.abiAlignment(zcu).toLlvm();
10939- const alloca_inst = try self.buildAllocaWorkaround(result_ty , alignment);
10929+ const alloca_inst = try self.buildAlloca(llvm_result_ty , alignment);
1094010930
1094110931 const array_info = result_ty.arrayInfo(zcu);
1094210932 const elem_ptr_ty = try pt.ptrType(.{
@@ -11011,7 +11001,7 @@ pub const FuncGen = struct {
1101111001 // We must construct the correct unnamed struct type here, in order to then set
1101211002 // the fields appropriately.
1101311003 const alignment = layout.abi_align.toLlvm();
11014- const result_ptr = try self.buildAllocaWorkaround(union_ty , alignment);
11004+ const result_ptr = try self.buildAlloca(union_llvm_ty , alignment);
1101511005 const llvm_payload = try self.resolveInst(extra.init);
1101611006 const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
1101711007 const field_llvm_ty = try o.lowerType(field_ty);
@@ -11308,7 +11298,7 @@ pub const FuncGen = struct {
1130811298
1130911299 if (isByRef(optional_ty, zcu)) {
1131011300 const payload_alignment = optional_ty.abiAlignment(pt.zcu).toLlvm();
11311- const alloca_inst = try self.buildAllocaWorkaround(optional_ty , payload_alignment);
11301+ const alloca_inst = try self.buildAlloca(optional_llvm_ty , payload_alignment);
1131211302
1131311303 {
1131411304 const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 0, "");
@@ -11451,10 +11441,10 @@ pub const FuncGen = struct {
1145111441 ) !Builder.Value {
1145211442 const o = fg.ng.object;
1145311443 const pt = o.pt;
11454- // const pointee_llvm_ty = try o.lowerType(pointee_type);
11444+ const pointee_llvm_ty = try o.lowerType(pointee_type);
1145511445 const result_align = InternPool.Alignment.fromLlvm(ptr_alignment)
1145611446 .max(pointee_type.abiAlignment(pt.zcu)).toLlvm();
11457- const result_ptr = try fg.buildAllocaWorkaround(pointee_type , result_align);
11447+ const result_ptr = try fg.buildAlloca(pointee_llvm_ty , result_align);
1145811448 const size_bytes = pointee_type.abiSize(pt.zcu);
1145911449 _ = try fg.wip.callMemCpy(
1146011450 result_ptr,
@@ -11515,7 +11505,7 @@ pub const FuncGen = struct {
1151511505
1151611506 if (isByRef(elem_ty, zcu)) {
1151711507 const result_align = elem_ty.abiAlignment(zcu).toLlvm();
11518- const result_ptr = try self.buildAllocaWorkaround(elem_ty , result_align);
11508+ const result_ptr = try self.buildAlloca(elem_llvm_ty , result_align);
1151911509
1152011510 const same_size_int = try o.builder.intType(@intCast(elem_bits));
1152111511 const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
0 commit comments