diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig index a75b7d1d0005..1f95d31c037c 100644 --- a/lib/compiler_rt/common.zig +++ b/lib/compiler_rt/common.zig @@ -17,6 +17,10 @@ pub const want_aeabi = switch (builtin.abi) { }; pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64(); +// Libcalls that involve u128 on Windows x86-64 are expected by LLVM to use the +// calling convention of @Vector(2, u64), rather than what's standard. +pub const want_windows_v2u64_abi = builtin.os.tag == .windows and builtin.cpu.arch == .x86_64; + /// This governs whether to use these symbol names for f16/f32 conversions /// rather than the standard names: /// * __gnu_f2h_ieee diff --git a/lib/compiler_rt/fixdfti.zig b/lib/compiler_rt/fixdfti.zig index b2476ce2f3f1..532b27107208 100644 --- a/lib/compiler_rt/fixdfti.zig +++ b/lib/compiler_rt/fixdfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixdfti_windows_x86_64, .{ .name = "__fixdfti", .linkage = common.linkage }); + } else { + @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage }); + } } pub fn __fixdfti(a: f64) callconv(.C) i128 { return floatToInt(i128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(i128, a)); +} diff --git a/lib/compiler_rt/fixhfti.zig b/lib/compiler_rt/fixhfti.zig index 36fc1bf60758..b6774968ddc3 100644 --- a/lib/compiler_rt/fixhfti.zig +++ b/lib/compiler_rt/fixhfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixhfti_windows_x86_64, .{ .name = "__fixhfti", .linkage = common.linkage }); + } else { + @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage }); + } } -fn __fixhfti(a: f16) callconv(.C) i128 { +pub fn __fixhfti(a: f16) callconv(.C) i128 { return floatToInt(i128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(i128, a)); +} diff --git a/lib/compiler_rt/fixsfti.zig b/lib/compiler_rt/fixsfti.zig index 4bf68ec8b0c9..5aa4068b6265 100644 --- a/lib/compiler_rt/fixsfti.zig +++ b/lib/compiler_rt/fixsfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixsfti_windows_x86_64, .{ .name = "__fixsfti", .linkage = common.linkage }); + } else { + @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage }); + } } pub fn __fixsfti(a: f32) callconv(.C) i128 { return floatToInt(i128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(i128, a)); +} diff --git a/lib/compiler_rt/fixtfti.zig b/lib/compiler_rt/fixtfti.zig index 9ba761729e58..ba46eb85980a 100644 --- a/lib/compiler_rt/fixtfti.zig +++ b/lib/compiler_rt/fixtfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixtfti_windows_x86_64, .{ .name = "__fixtfti", .linkage = common.linkage }); + } else { + @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage }); + } } pub fn __fixtfti(a: f128) callconv(.C) i128 { return floatToInt(i128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(i128, a)); +} diff --git a/lib/compiler_rt/fixunsdfti.zig b/lib/compiler_rt/fixunsdfti.zig index ce3c4aabddd2..00a89ba2d6f0 100644 --- a/lib/compiler_rt/fixunsdfti.zig +++ b/lib/compiler_rt/fixunsdfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixunsdfti_windows_x86_64, .{ .name = "__fixunsdfti", .linkage = common.linkage }); + } else { + @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage }); + } } pub fn __fixunsdfti(a: f64) callconv(.C) u128 { return floatToInt(u128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(u128, a)); +} diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig index b804c52f9613..4f5179bfb5bf 100644 --- a/lib/compiler_rt/fixunshfti.zig +++ b/lib/compiler_rt/fixunshfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixunshfti_windows_x86_64, .{ .name = "__fixunshfti", .linkage = common.linkage }); + } else { + @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage }); + } } pub fn __fixunshfti(a: f16) callconv(.C) u128 { return floatToInt(u128, a); } + +const v2u64 = @import("std").meta.Vector(2, u64); + +fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(u128, a)); +} diff --git a/lib/compiler_rt/fixunssfti.zig b/lib/compiler_rt/fixunssfti.zig index 7b1965b5abf6..0d3fa5d3b965 100644 --- a/lib/compiler_rt/fixunssfti.zig +++ b/lib/compiler_rt/fixunssfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixunssfti_windows_x86_64, .{ .name = "__fixunssfti", .linkage = common.linkage }); + } else { + @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage }); + } } pub fn __fixunssfti(a: f32) callconv(.C) u128 { return floatToInt(u128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(u128, a)); +} diff --git a/lib/compiler_rt/fixunstfti.zig b/lib/compiler_rt/fixunstfti.zig index 5e39db106527..02cabd4d46bb 100644 --- a/lib/compiler_rt/fixunstfti.zig +++ b/lib/compiler_rt/fixunstfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixunstfti_windows_x86_64, .{ .name = "__fixunstfti", .linkage = common.linkage }); + } else { + @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage }); + } } pub fn __fixunstfti(a: f128) callconv(.C) u128 { return floatToInt(u128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(u128, a)); +} diff --git a/lib/compiler_rt/fixunsxfti.zig b/lib/compiler_rt/fixunsxfti.zig index acd41469be13..1cf5891f92ce 100644 --- a/lib/compiler_rt/fixunsxfti.zig +++ b/lib/compiler_rt/fixunsxfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixunsxfti_windows_x86_64, .{ .name = "__fixunsxfti", .linkage = common.linkage }); + } else { + @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage }); + } } pub fn __fixunsxfti(a: f80) callconv(.C) u128 { return floatToInt(u128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(u128, a)); +} diff --git a/lib/compiler_rt/fixxfti.zig b/lib/compiler_rt/fixxfti.zig index fb547f411566..9a40ec3d6a13 100644 --- a/lib/compiler_rt/fixxfti.zig +++ b/lib/compiler_rt/fixxfti.zig @@ -1,12 +1,23 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const floatToInt = @import("./float_to_int.zig").floatToInt; pub const panic = common.panic; comptime { - @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__fixxfti_windows_x86_64, .{ .name = "__fixxfti", .linkage = common.linkage }); + } else { + @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage }); + } } -fn __fixxfti(a: f80) callconv(.C) i128 { +pub fn __fixxfti(a: f80) callconv(.C) i128 { return floatToInt(i128, a); } + +const v2u64 = @Vector(2, u64); + +fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 { + return @bitCast(v2u64, floatToInt(i128, a)); +} diff --git a/lib/compiler_rt/floattidf.zig b/lib/compiler_rt/floattidf.zig index 1f1ac2f2effe..31456948e9d8 100644 --- a/lib/compiler_rt/floattidf.zig +++ b/lib/compiler_rt/floattidf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floattidf_windows_x86_64, .{ .name = "__floattidf", .linkage = common.linkage }); + } else { + @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage }); + } } pub fn __floattidf(a: i128) callconv(.C) f64 { return intToFloat(f64, a); } + +fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 { + return intToFloat(f64, @bitCast(i128, a)); +} diff --git a/lib/compiler_rt/floattihf.zig b/lib/compiler_rt/floattihf.zig index c7e45c7d5371..3e33a0bd8a45 100644 --- a/lib/compiler_rt/floattihf.zig +++ b/lib/compiler_rt/floattihf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floattihf_windows_x86_64, .{ .name = "__floattihf", .linkage = common.linkage }); + } else { + @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage }); + } } -fn __floattihf(a: i128) callconv(.C) f16 { +pub fn __floattihf(a: i128) callconv(.C) f16 { return intToFloat(f16, a); } + +fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 { + return intToFloat(f16, @bitCast(i128, a)); +} diff --git a/lib/compiler_rt/floattisf.zig b/lib/compiler_rt/floattisf.zig index 5eb493d09b02..23ff0d16b402 100644 --- a/lib/compiler_rt/floattisf.zig +++ b/lib/compiler_rt/floattisf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floattisf_windows_x86_64, .{ .name = "__floattisf", .linkage = common.linkage }); + } else { + @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage }); + } } pub fn __floattisf(a: i128) callconv(.C) f32 { return intToFloat(f32, a); } + +fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 { + return intToFloat(f32, @bitCast(i128, a)); +} diff --git a/lib/compiler_rt/floattitf.zig b/lib/compiler_rt/floattitf.zig index 0764c2d2c2b2..c44473cc3d50 100644 --- a/lib/compiler_rt/floattitf.zig +++ b/lib/compiler_rt/floattitf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floattitf_windows_x86_64, .{ .name = "__floattitf", .linkage = common.linkage }); + } else { + @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage }); + } } pub fn __floattitf(a: i128) callconv(.C) f128 { return intToFloat(f128, a); } + +fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 { + return intToFloat(f128, @bitCast(i128, a)); +} diff --git a/lib/compiler_rt/floattixf.zig b/lib/compiler_rt/floattixf.zig index def9bef4d5e0..814880b9ab66 100644 --- a/lib/compiler_rt/floattixf.zig +++ b/lib/compiler_rt/floattixf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floattixf_windows_x86_64, .{ .name = "__floattixf", .linkage = common.linkage }); + } else { + @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage }); + } } -fn __floattixf(a: i128) callconv(.C) f80 { +pub fn __floattixf(a: i128) callconv(.C) f80 { return intToFloat(f80, a); } + +fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 { + return intToFloat(f80, @bitCast(i128, a)); +} diff --git a/lib/compiler_rt/floatuntidf.zig b/lib/compiler_rt/floatuntidf.zig index a77a952fe914..a00175d9a9f2 100644 --- a/lib/compiler_rt/floatuntidf.zig +++ b/lib/compiler_rt/floatuntidf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floatuntidf_windows_x86_64, .{ .name = "__floatuntidf", .linkage = common.linkage }); + } else { + @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage }); + } } pub fn __floatuntidf(a: u128) callconv(.C) f64 { return intToFloat(f64, a); } + +fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 { + return intToFloat(f64, @bitCast(u128, a)); +} diff --git a/lib/compiler_rt/floatuntihf.zig b/lib/compiler_rt/floatuntihf.zig index 0263b1da98bc..3cf7a32d270c 100644 --- a/lib/compiler_rt/floatuntihf.zig +++ b/lib/compiler_rt/floatuntihf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floatuntihf_windows_x86_64, .{ .name = "__floatuntihf", .linkage = common.linkage }); + } else { + @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage }); + } } -fn __floatuntihf(a: u128) callconv(.C) f16 { +pub fn __floatuntihf(a: u128) callconv(.C) f16 { return intToFloat(f16, a); } + +fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 { + return intToFloat(f16, @bitCast(u128, a)); +} diff --git a/lib/compiler_rt/floatuntisf.zig b/lib/compiler_rt/floatuntisf.zig index 3edf63698748..997d57293e7d 100644 --- a/lib/compiler_rt/floatuntisf.zig +++ b/lib/compiler_rt/floatuntisf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floatuntisf_windows_x86_64, .{ .name = "__floatuntisf", .linkage = common.linkage }); + } else { + @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage }); + } } pub fn __floatuntisf(a: u128) callconv(.C) f32 { return intToFloat(f32, a); } + +fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 { + return intToFloat(f32, @bitCast(u128, a)); +} diff --git a/lib/compiler_rt/floatuntitf.zig b/lib/compiler_rt/floatuntitf.zig index 1a755cccdb3b..eb5d7037e9be 100644 --- a/lib/compiler_rt/floatuntitf.zig +++ b/lib/compiler_rt/floatuntitf.zig @@ -1,13 +1,16 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - if (common.want_ppc_abi) { - @export(__floatuntikf, .{ .name = "__floatuntikf", .linkage = common.linkage }); + const symbol_name = if (common.want_ppc_abi) "__floatuntikf" else "__floatuntitf"; + + if (common.want_windows_v2u64_abi) { + @export(__floatuntitf_windows_x86_64, .{ .name = symbol_name, .linkage = common.linkage }); } else { - @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage }); + @export(__floatuntitf, .{ .name = symbol_name, .linkage = common.linkage }); } } @@ -15,6 +18,6 @@ pub fn __floatuntitf(a: u128) callconv(.C) f128 { return intToFloat(f128, a); } -fn __floatuntikf(a: u128) callconv(.C) f128 { - return intToFloat(f128, a); +fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 { + return intToFloat(f128, @bitCast(u128, a)); } diff --git a/lib/compiler_rt/floatuntixf.zig b/lib/compiler_rt/floatuntixf.zig index 07017d1f5738..724af9507514 100644 --- a/lib/compiler_rt/floatuntixf.zig +++ b/lib/compiler_rt/floatuntixf.zig @@ -1,12 +1,21 @@ +const builtin = @import("builtin"); const common = @import("./common.zig"); const intToFloat = @import("./int_to_float.zig").intToFloat; pub const panic = common.panic; comptime { - @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage }); + if (common.want_windows_v2u64_abi) { + @export(__floatuntixf_windows_x86_64, .{ .name = "__floatuntixf", .linkage = common.linkage }); + } else { + @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage }); + } } pub fn __floatuntixf(a: u128) callconv(.C) f80 { return intToFloat(f80, a); } + +fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 { + return intToFloat(f80, @bitCast(u128, a)); +} diff --git a/lib/compiler_rt/modti3.zig b/lib/compiler_rt/modti3.zig index 5fa34938ff9b..9992f716eefd 100644 --- a/lib/compiler_rt/modti3.zig +++ b/lib/compiler_rt/modti3.zig @@ -5,27 +5,13 @@ const std = @import("std"); const builtin = @import("builtin"); const udivmod = @import("udivmod.zig").udivmod; -const arch = builtin.cpu.arch; const common = @import("common.zig"); pub const panic = common.panic; comptime { - if (builtin.os.tag == .windows) { - switch (arch) { - .i386 => { - @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage }); - }, - else => {}, - } - if (arch.isAARCH64()) { - @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage }); - } + if (common.want_windows_v2u64_abi) { + @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage }); } else { @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage }); } @@ -35,10 +21,10 @@ pub fn __modti3(a: i128, b: i128) callconv(.C) i128 { return mod(a, b); } -const v128 = @import("std").meta.Vector(2, u64); +const v2u64 = @Vector(2, u64); -fn __modti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { - return @bitCast(v128, mod(@bitCast(i128, a), @bitCast(i128, b))); +fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { + return @bitCast(v2u64, mod(@bitCast(i128, a), @bitCast(i128, b))); } inline fn mod(a: i128, b: i128) i128 { diff --git a/lib/compiler_rt/multi3.zig b/lib/compiler_rt/multi3.zig index ba41cb79176b..42994a81bda2 100644 --- a/lib/compiler_rt/multi3.zig +++ b/lib/compiler_rt/multi3.zig @@ -4,25 +4,14 @@ const std = @import("std"); const builtin = @import("builtin"); -const arch = builtin.cpu.arch; const native_endian = builtin.cpu.arch.endian(); const common = @import("common.zig"); pub const panic = common.panic; comptime { - if (builtin.os.tag == .windows) { - switch (arch) { - .i386 => { - @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage }); - }, - else => {}, - } + if (common.want_windows_v2u64_abi) { + @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage }); } else { @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage }); } @@ -32,10 +21,10 @@ pub fn __multi3(a: i128, b: i128) callconv(.C) i128 { return mul(a, b); } -const v128 = @Vector(2, u64); +const v2u64 = @Vector(2, u64); -fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { - return @bitCast(v128, mul(@bitCast(i128, a), @bitCast(i128, b))); +fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { + return @bitCast(v2u64, mul(@bitCast(i128, a), @bitCast(i128, b))); } inline fn mul(a: i128, b: i128) i128 { diff --git a/lib/compiler_rt/udivmodti4.zig b/lib/compiler_rt/udivmodti4.zig index 911bf72eedd6..5ccaa787078f 100644 --- a/lib/compiler_rt/udivmodti4.zig +++ b/lib/compiler_rt/udivmodti4.zig @@ -1,24 +1,13 @@ const std = @import("std"); const builtin = @import("builtin"); const udivmod = @import("udivmod.zig").udivmod; -const arch = builtin.cpu.arch; const common = @import("common.zig"); pub const panic = common.panic; comptime { - if (builtin.os.tag == .windows) { - switch (arch) { - .i386 => { - @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage }); - }, - else => {}, - } + if (common.want_windows_v2u64_abi) { + @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage }); } else { @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage }); } @@ -28,10 +17,10 @@ pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 { return udivmod(u128, a, b, maybe_rem); } -const v128 = std.meta.Vector(2, u64); +const v2u64 = @Vector(2, u64); -fn __udivmodti4_windows_x86_64(a: v128, b: v128, maybe_rem: ?*u128) callconv(.C) v128 { - return @bitCast(v128, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem)); +fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.C) v2u64 { + return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem)); } test { diff --git a/lib/compiler_rt/udivti3.zig b/lib/compiler_rt/udivti3.zig index 3e908176bc44..094627ad92d1 100644 --- a/lib/compiler_rt/udivti3.zig +++ b/lib/compiler_rt/udivti3.zig @@ -1,27 +1,13 @@ const std = @import("std"); const builtin = @import("builtin"); const udivmod = @import("udivmod.zig").udivmod; -const arch = builtin.cpu.arch; const common = @import("common.zig"); pub const panic = common.panic; comptime { - if (builtin.os.tag == .windows) { - switch (arch) { - .i386 => { - @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage }); - }, - else => {}, - } - if (arch.isAARCH64()) { - @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage }); - } + if (common.want_windows_v2u64_abi) { + @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage }); } else { @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage }); } @@ -31,8 +17,8 @@ pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 { return udivmod(u128, a, b, null); } -const v128 = std.meta.Vector(2, u64); +const v2u64 = @Vector(2, u64); -fn __udivti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { - return @bitCast(v128, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null)); +fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { + return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null)); } diff --git a/lib/compiler_rt/umodti3.zig b/lib/compiler_rt/umodti3.zig index 65058a599e9d..a9aba96b7e4f 100644 --- a/lib/compiler_rt/umodti3.zig +++ b/lib/compiler_rt/umodti3.zig @@ -1,27 +1,13 @@ const std = @import("std"); const builtin = @import("builtin"); const udivmod = @import("udivmod.zig").udivmod; -const arch = builtin.cpu.arch; const common = @import("common.zig"); pub const panic = common.panic; comptime { - if (builtin.os.tag == .windows) { - switch (arch) { - .i386 => { - @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage }); - }, - else => {}, - } - if (arch.isAARCH64()) { - @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage }); - } + if (common.want_windows_v2u64_abi) { + @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage }); } else { @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage }); } @@ -33,10 +19,10 @@ pub fn __umodti3(a: u128, b: u128) callconv(.C) u128 { return r; } -const v128 = std.meta.Vector(2, u64); +const v2u64 = @Vector(2, u64); -fn __umodti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { +fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { var r: u128 = undefined; _ = udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), &r); - return @bitCast(v128, r); + return @bitCast(v2u64, r); } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 90e70a7c4163..f6b98be44dbc 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -2284,10 +2284,6 @@ test "float.hexadecimal.precision" { } test "float.decimal" { - if (builtin.zig_backend == .stage1 and builtin.os.tag == .windows) { - // https://github.com/ziglang/zig/issues/12063 - return error.SkipZigTest; - } try expectFmt("f64: 152314000000000000000000000000", "f64: {d}", .{@as(f64, 1.52314e+29)}); try expectFmt("f32: 0", "f32: {d}", .{@as(f32, 0.0)}); try expectFmt("f32: 0", "f32: {d:.0}", .{@as(f32, 0.0)}); @@ -2311,10 +2307,6 @@ test "float.decimal" { } test "float.libc.sanity" { - if (builtin.zig_backend == .stage1 and builtin.os.tag == .windows) { - // https://github.com/ziglang/zig/issues/12063 - return error.SkipZigTest; - } try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 916964781)))}); try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 925353389)))}); try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1036831278)))}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index acd571a58bd1..b4e90503a46e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4802,7 +4802,7 @@ pub const FuncGen = struct { const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target)); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); - const extended = e: { + var extended = e: { if (operand_scalar_ty.isSignedInt()) { break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); } else { @@ -4819,7 +4819,16 @@ pub const FuncGen = struct { compiler_rt_operand_abbrev, compiler_rt_dest_abbrev, }) catch unreachable; - const param_types = [1]*const llvm.Type{rt_int_ty}; + + var param_types = [1]*const llvm.Type{rt_int_ty}; + if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) { + // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard + // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. + const v2i64 = self.context.intType(64).vectorType(2); + extended = self.builder.buildBitCast(extended, v2i64, ""); + param_types = [1]*const llvm.Type{v2i64}; + } + const libc_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); const params = [1]*const llvm.Value{extended}; @@ -4851,7 +4860,12 @@ pub const FuncGen = struct { } const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target))); - const libc_ret_ty = self.context.intType(rt_int_bits); + const ret_ty = self.context.intType(rt_int_bits); + const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { + // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard + // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. + break :b self.context.intType(64).vectorType(2); + } else ret_ty; const operand_bits = operand_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits); @@ -4871,13 +4885,11 @@ pub const FuncGen = struct { const libc_fn = self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); const params = [1]*const llvm.Value{operand}; - const result = self.builder.buildCall(libc_fn, ¶ms, params.len, .C, .Auto, ""); - - if (libc_ret_ty == dest_llvm_ty) { - return result; - } + var result = self.builder.buildCall(libc_fn, ¶ms, params.len, .C, .Auto, ""); - return self.builder.buildTrunc(result, dest_llvm_ty, ""); + if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty, ""); + if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty, ""); + return result; } fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value { @@ -6490,8 +6502,15 @@ pub const FuncGen = struct { const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); - const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, ""); - const result = self.builder.buildXor(bitcasted_operand, sign_mask, ""); + const result = if (ty.zigTypeTag() == .Vector) blk: { + const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); + const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); + const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); + break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); + } else blk: { + const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, ""); + break :blk self.builder.buildXor(bitcasted_operand, sign_mask, ""); + }; return self.builder.buildBitCast(result, llvm_ty, ""); }, .add, .sub, .div, .mul => FloatOpStrat{ diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 9c8269549b8c..81e5208e9190 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -3371,14 +3371,12 @@ static LLVMValueRef add_icmp(CodeGen *g, LLVMValueRef val, Icmp kind) { } static LLVMValueRef gen_soft_int_to_float_op(CodeGen *g, LLVMValueRef value_ref, ZigType *operand_type, ZigType *result_type) { - uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0; - // Handle integers of non-pot bitsize by widening them. const size_t bitsize = operand_type->data.integral.bit_count; const bool is_signed = operand_type->data.integral.is_signed; if (bitsize < 32 || !is_power_of_2(bitsize)) { const size_t wider_bitsize = bitsize < 32 ? 32 : round_to_next_power_of_2(bitsize); - ZigType *const wider_type = get_int_type(g, is_signed, wider_bitsize); + ZigType *wider_type = get_int_type(g, is_signed, wider_bitsize); value_ref = gen_widen_or_shorten(g, false, operand_type, wider_type, value_ref); operand_type = wider_type; } @@ -3395,35 +3393,22 @@ static LLVMValueRef gen_soft_int_to_float_op(CodeGen *g, LLVMValueRef value_ref, } int param_count = 1; - LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, result_type->llvm_type); - - LLVMValueRef result; - if (vector_len == 0) { - LLVMValueRef params[1] = {value_ref}; - result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); + LLVMValueRef func_ref; + if ((operand_type->data.integral.bit_count == 128) && (g->zig_target->os == OsWindows) && (g->zig_target->arch == ZigLLVM_x86_64)) { + // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard i128 calling + // convention to adhere to the ABI that LLVM expects compiler-rt to have. + LLVMTypeRef v2i64 = LLVMVectorType(LLVMInt64Type(), 2); + value_ref = LLVMBuildBitCast(g->builder, value_ref, v2i64, ""); + func_ref = get_soft_float_fn(g, fn_name, param_count, v2i64, result_type->llvm_type); } else { - ZigType *alloca_ty = operand_type; - result = build_alloca(g, alloca_ty, "", 0); - - LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type; - for (uint32_t i = 0; i < vector_len; i++) { - LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false); - LLVMValueRef params[1] = { - LLVMBuildExtractElement(g->builder, value_ref, index_value, ""), - }; - LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); - LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""), - call_result, index_value, ""); - } - - result = LLVMBuildLoad(g->builder, result, ""); + func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, result_type->llvm_type); } - return result; + + LLVMValueRef params[1] = {value_ref}; + return LLVMBuildCall(g->builder, func_ref, params, param_count, ""); } static LLVMValueRef gen_soft_float_to_int_op(CodeGen *g, LLVMValueRef value_ref, ZigType *operand_type, ZigType *result_type) { - uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0; - // Handle integers of non-pot bitsize by truncating a sufficiently wide pot integer const size_t bitsize = result_type->data.integral.bit_count; const bool is_signed = result_type->data.integral.is_signed; @@ -3445,46 +3430,41 @@ static LLVMValueRef gen_soft_float_to_int_op(CodeGen *g, LLVMValueRef value_ref, } int param_count = 1; - LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, wider_type->llvm_type); - - LLVMValueRef result; - if (vector_len == 0) { - LLVMValueRef params[1] = {value_ref}; - result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); + LLVMValueRef func_ref; + if ((wider_type->data.integral.bit_count == 128) && (g->zig_target->os == OsWindows) && (g->zig_target->arch == ZigLLVM_x86_64)) { + // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard i128 calling + // convention to adhere to the ABI that LLVM expects compiler-rt to have. + LLVMTypeRef v2i64 = LLVMVectorType(LLVMInt64Type(), 2); + func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, v2i64); } else { - ZigType *alloca_ty = operand_type; - result = build_alloca(g, alloca_ty, "", 0); + func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, wider_type->llvm_type); + } - LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type; - for (uint32_t i = 0; i < vector_len; i++) { - LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false); - LLVMValueRef params[1] = { - LLVMBuildExtractElement(g->builder, value_ref, index_value, ""), - }; - LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); - LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""), - call_result, index_value, ""); - } + LLVMValueRef params[1] = {value_ref}; + LLVMValueRef result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); - result = LLVMBuildLoad(g->builder, result, ""); + if ((wider_type->data.integral.bit_count == 128) && (g->zig_target->os == OsWindows) && (g->zig_target->arch == ZigLLVM_x86_64)) { + result = LLVMBuildBitCast(g->builder, result, wider_type->llvm_type, ""); } // Handle integers of non-pot bitsize by shortening them on the output if (result_type != wider_type) { - return gen_widen_or_shorten(g, false, wider_type, result_type, result); + result = gen_widen_or_shorten(g, false, wider_type, result_type, result); } + return result; } static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LLVMValueRef op2_value, ZigType *operand_type, IrBinOp op_id) { uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0; - LLVMTypeRef return_type = operand_type->llvm_type; int param_count = 2; - const char *compiler_rt_type_abbrev = get_compiler_rt_type_abbrev(operand_type); - const char *math_float_prefix = libc_float_prefix(g, operand_type); - const char *math_float_suffix = libc_float_suffix(g, operand_type); + ZigType *operand_scalar_type = (operand_type->id == ZigTypeIdVector) ? operand_type->data.vector.elem_type : operand_type; + LLVMTypeRef return_scalar_type = operand_scalar_type->llvm_type; + const char *compiler_rt_type_abbrev = get_compiler_rt_type_abbrev(operand_scalar_type); + const char *math_float_prefix = libc_float_prefix(g, operand_scalar_type); + const char *math_float_suffix = libc_float_suffix(g, operand_scalar_type); char fn_name[64]; Icmp res_icmp = NONE; @@ -3511,32 +3491,32 @@ static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LL case IrBinOpShlSat: zig_unreachable(); case IrBinOpCmpEq: - return_type = g->builtin_types.entry_i32->llvm_type; + return_scalar_type = g->builtin_types.entry_i32->llvm_type; snprintf(fn_name, sizeof(fn_name), "__eq%sf2", compiler_rt_type_abbrev); res_icmp = EQ_ZERO; break; case IrBinOpCmpNotEq: - return_type = g->builtin_types.entry_i32->llvm_type; + return_scalar_type = g->builtin_types.entry_i32->llvm_type; snprintf(fn_name, sizeof(fn_name), "__ne%sf2", compiler_rt_type_abbrev); res_icmp = NE_ZERO; break; case IrBinOpCmpLessOrEq: - return_type = g->builtin_types.entry_i32->llvm_type; + return_scalar_type = g->builtin_types.entry_i32->llvm_type; snprintf(fn_name, sizeof(fn_name), "__le%sf2", compiler_rt_type_abbrev); res_icmp = LE_ZERO; break; case IrBinOpCmpLessThan: - return_type = g->builtin_types.entry_i32->llvm_type; + return_scalar_type = g->builtin_types.entry_i32->llvm_type; snprintf(fn_name, sizeof(fn_name), "__le%sf2", compiler_rt_type_abbrev); res_icmp = EQ_NEG; break; case IrBinOpCmpGreaterOrEq: - return_type = g->builtin_types.entry_i32->llvm_type; + return_scalar_type = g->builtin_types.entry_i32->llvm_type; snprintf(fn_name, sizeof(fn_name), "__ge%sf2", compiler_rt_type_abbrev); res_icmp = GE_ZERO; break; case IrBinOpCmpGreaterThan: - return_type = g->builtin_types.entry_i32->llvm_type; + return_scalar_type = g->builtin_types.entry_i32->llvm_type; snprintf(fn_name, sizeof(fn_name), "__ge%sf2", compiler_rt_type_abbrev); res_icmp = EQ_ONE; break; @@ -3569,7 +3549,7 @@ static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LL zig_unreachable(); } - LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, return_type); + LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_scalar_type->llvm_type, return_scalar_type); LLVMValueRef result; if (vector_len == 0) { diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index eb4676310d74..c13db2e26271 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -101,18 +101,20 @@ test "vector float operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - const S = struct { - fn doTheTest() !void { - var v: @Vector(4, f32) = [4]f32{ 10, 20, 30, 40 }; - var x: @Vector(4, f32) = [4]f32{ 1, 2, 3, 4 }; - try expect(mem.eql(f32, &@as([4]f32, v + x), &[4]f32{ 11, 22, 33, 44 })); - try expect(mem.eql(f32, &@as([4]f32, v - x), &[4]f32{ 9, 18, 27, 36 })); - try expect(mem.eql(f32, &@as([4]f32, v * x), &[4]f32{ 10, 40, 90, 160 })); - try expect(mem.eql(f32, &@as([4]f32, -x), &[4]f32{ -1, -2, -3, -4 })); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); + inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| { + const S = struct { + fn doTheTest() !void { + var v: @Vector(4, T) = [4]T{ 10, 20, 30, 40 }; + var x: @Vector(4, T) = [4]T{ 1, 2, 3, 4 }; + try expect(mem.eql(T, &@as([4]T, v + x), &[4]T{ 11, 22, 33, 44 })); + try expect(mem.eql(T, &@as([4]T, v - x), &[4]T{ 9, 18, 27, 36 })); + try expect(mem.eql(T, &@as([4]T, v * x), &[4]T{ 10, 40, 90, 160 })); + try expect(mem.eql(T, &@as([4]T, -x), &[4]T{ -1, -2, -3, -4 })); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); + } } test "vector bit operators" {