Skip to content

Commit df81a79

Browse files
committed
rustc_codegen_llvm: adapt for LLVM 22 change to pass masked intrinsic alignment as an attribute
This was a bit more invasive than I had kind of hoped. An alternate approach would be to add an extra call_intrinsic_with_attrs() that would have the new-in-this-change signature for call_intrinsic, but this felt about equivalent and made it a little easier to audit the relevant callsites of call_intrinsic().
1 parent 6501e64 commit df81a79

File tree

5 files changed

+109
-35
lines changed

5 files changed

+109
-35
lines changed

compiler/rustc_codegen_llvm/src/intrinsic.rs

Lines changed: 73 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1325,6 +1325,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
13251325
};
13261326
}
13271327

1328+
let llvm_version = crate::llvm_util::get_version();
1329+
13281330
/// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
13291331
/// down to an i1 based mask that can be used by llvm intrinsics.
13301332
///
@@ -1808,7 +1810,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
18081810
);
18091811

18101812
// Alignment of T, must be a constant integer value:
1811-
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1813+
let alignment = bx.align_of(in_elem).bytes();
18121814

18131815
// Truncate the mask vector to a vector of i1s:
18141816
let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
@@ -1819,11 +1821,23 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
18191821
// Type of the vector of elements:
18201822
let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
18211823

1822-
return Ok(bx.call_intrinsic(
1823-
"llvm.masked.gather",
1824-
&[llvm_elem_vec_ty, llvm_pointer_vec_ty],
1825-
&[args[1].immediate(), alignment, mask, args[0].immediate()],
1826-
));
1824+
let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
1825+
let alignment = bx.const_i32(alignment as i32);
1826+
&[args[1].immediate(), alignment, mask, args[0].immediate()]
1827+
} else {
1828+
&[args[1].immediate(), mask, args[0].immediate()]
1829+
};
1830+
1831+
let call =
1832+
bx.call_intrinsic("llvm.masked.gather", &[llvm_elem_vec_ty, llvm_pointer_vec_ty], args);
1833+
if llvm_version >= (22, 0, 0) {
1834+
crate::attributes::apply_to_callsite(
1835+
call,
1836+
crate::llvm::AttributePlace::Argument(0),
1837+
&[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
1838+
)
1839+
}
1840+
return Ok(call);
18271841
}
18281842

18291843
if name == sym::simd_masked_load {
@@ -1891,18 +1905,30 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
18911905
let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
18921906

18931907
// Alignment of T, must be a constant integer value:
1894-
let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1908+
let alignment = bx.align_of(values_elem).bytes();
18951909

18961910
let llvm_pointer = bx.type_ptr();
18971911

18981912
// Type of the vector of elements:
18991913
let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
19001914

1901-
return Ok(bx.call_intrinsic(
1902-
"llvm.masked.load",
1903-
&[llvm_elem_vec_ty, llvm_pointer],
1904-
&[args[1].immediate(), alignment, mask, args[2].immediate()],
1905-
));
1915+
let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
1916+
let alignment = bx.const_i32(alignment as i32);
1917+
1918+
&[args[1].immediate(), alignment, mask, args[2].immediate()]
1919+
} else {
1920+
&[args[1].immediate(), mask, args[2].immediate()]
1921+
};
1922+
1923+
let call = bx.call_intrinsic("llvm.masked.load", &[llvm_elem_vec_ty, llvm_pointer], args);
1924+
if llvm_version >= (22, 0, 0) {
1925+
crate::attributes::apply_to_callsite(
1926+
call,
1927+
crate::llvm::AttributePlace::Argument(0),
1928+
&[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
1929+
)
1930+
}
1931+
return Ok(call);
19061932
}
19071933

19081934
if name == sym::simd_masked_store {
@@ -1964,18 +1990,29 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
19641990
let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
19651991

19661992
// Alignment of T, must be a constant integer value:
1967-
let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1993+
let alignment = bx.align_of(values_elem).bytes();
19681994

19691995
let llvm_pointer = bx.type_ptr();
19701996

19711997
// Type of the vector of elements:
19721998
let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
19731999

1974-
return Ok(bx.call_intrinsic(
1975-
"llvm.masked.store",
1976-
&[llvm_elem_vec_ty, llvm_pointer],
1977-
&[args[2].immediate(), args[1].immediate(), alignment, mask],
1978-
));
2000+
let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2001+
let alignment = bx.const_i32(alignment as i32);
2002+
&[args[2].immediate(), args[1].immediate(), alignment, mask]
2003+
} else {
2004+
&[args[2].immediate(), args[1].immediate(), mask]
2005+
};
2006+
2007+
let call = bx.call_intrinsic("llvm.masked.store", &[llvm_elem_vec_ty, llvm_pointer], args);
2008+
if llvm_version >= (22, 0, 0) {
2009+
crate::attributes::apply_to_callsite(
2010+
call,
2011+
crate::llvm::AttributePlace::Argument(1),
2012+
&[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2013+
)
2014+
}
2015+
return Ok(call);
19792016
}
19802017

19812018
if name == sym::simd_scatter {
@@ -2040,7 +2077,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
20402077
);
20412078

20422079
// Alignment of T, must be a constant integer value:
2043-
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
2080+
let alignment = bx.align_of(in_elem).bytes();
20442081

20452082
// Truncate the mask vector to a vector of i1s:
20462083
let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
@@ -2050,12 +2087,25 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
20502087

20512088
// Type of the vector of elements:
20522089
let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2053-
2054-
return Ok(bx.call_intrinsic(
2090+
let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2091+
let alignment = bx.const_i32(alignment as i32);
2092+
&[args[0].immediate(), args[1].immediate(), alignment, mask]
2093+
} else {
2094+
&[args[0].immediate(), args[1].immediate(), mask]
2095+
};
2096+
let call = bx.call_intrinsic(
20552097
"llvm.masked.scatter",
20562098
&[llvm_elem_vec_ty, llvm_pointer_vec_ty],
2057-
&[args[0].immediate(), args[1].immediate(), alignment, mask],
2058-
));
2099+
args,
2100+
);
2101+
if llvm_version >= (22, 0, 0) {
2102+
crate::attributes::apply_to_callsite(
2103+
call,
2104+
crate::llvm::AttributePlace::Argument(1),
2105+
&[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2106+
)
2107+
}
2108+
return Ok(call);
20592109
}
20602110

20612111
macro_rules! arith_red {

tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-gather.rs

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
//
22

33
//@ compile-flags: -C no-prepopulate-passes
4+
//@ revisions: LLVM21 LLVM22
5+
//@ [LLVM22] min-llvm-version: 22
6+
//@ [LLVM21] max-llvm-major-version: 21
47

58
#![crate_type = "lib"]
69
#![feature(repr_simd, core_intrinsics)]
@@ -24,7 +27,8 @@ pub unsafe fn gather_f32x2(
2427
) -> Vec2<f32> {
2528
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
2629
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
27-
// CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
30+
// LLVM21: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
31+
// LLVM22: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> align {{.*}} {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
2832
simd_gather(values, pointers, mask)
2933
}
3034

@@ -37,7 +41,8 @@ pub unsafe fn gather_f32x2_unsigned(
3741
) -> Vec2<f32> {
3842
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
3943
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
40-
// CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
44+
// LLVM21: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
45+
// LLVM22: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> align {{.*}} {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
4146
simd_gather(values, pointers, mask)
4247
}
4348

@@ -50,6 +55,7 @@ pub unsafe fn gather_pf32x2(
5055
) -> Vec2<*const f32> {
5156
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
5257
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
53-
// CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x ptr> {{.*}})
58+
// LLVM21: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x ptr> {{.*}})
59+
// LLVM22: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> align {{.*}} {{.*}}, <2 x i1> [[B]], <2 x ptr> {{.*}})
5460
simd_gather(values, pointers, mask)
5561
}

tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-load.rs

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
//@ compile-flags: -C no-prepopulate-passes
2+
//@ revisions: LLVM21 LLVM22
3+
//@ [LLVM22] min-llvm-version: 22
4+
//@ [LLVM21] max-llvm-major-version: 21
25

36
#![crate_type = "lib"]
47
#![feature(repr_simd, core_intrinsics)]
@@ -18,7 +21,8 @@ pub type Vec4<T> = Simd<T, 4>;
1821
pub unsafe fn load_f32x2(mask: Vec2<i32>, pointer: *const f32, values: Vec2<f32>) -> Vec2<f32> {
1922
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
2023
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
21-
// CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
24+
// LLVM21: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
25+
// LLVM22: call <2 x float> @llvm.masked.load.v2f32.p0(ptr align 4 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
2226
simd_masked_load(mask, pointer, values)
2327
}
2428

@@ -31,7 +35,8 @@ pub unsafe fn load_f32x2_unsigned(
3135
) -> Vec2<f32> {
3236
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
3337
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
34-
// CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
38+
// LLVM21: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
39+
// LLVM22: call <2 x float> @llvm.masked.load.v2f32.p0(ptr align 4 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
3540
simd_masked_load(mask, pointer, values)
3641
}
3742

@@ -44,6 +49,7 @@ pub unsafe fn load_pf32x4(
4449
) -> Vec4<*const f32> {
4550
// CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, {{<i32 31, i32 31, i32 31, i32 31>|splat \(i32 31\)}}
4651
// CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
47-
// CHECK: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]], <4 x ptr> {{.*}})
52+
// LLVM21: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]], <4 x ptr> {{.*}})
53+
// LLVM22: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align {{.*}} {{.*}}, <4 x i1> [[B]], <4 x ptr> {{.*}})
4854
simd_masked_load(mask, pointer, values)
4955
}

tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-store.rs

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
//@ compile-flags: -C no-prepopulate-passes
2+
//@ revisions: LLVM21 LLVM22
3+
//@ [LLVM22] min-llvm-version: 22
4+
//@ [LLVM21] max-llvm-major-version: 21
25

36
#![crate_type = "lib"]
47
#![feature(repr_simd, core_intrinsics)]
@@ -18,7 +21,8 @@ pub type Vec4<T> = Simd<T, 4>;
1821
pub unsafe fn store_f32x2(mask: Vec2<i32>, pointer: *mut f32, values: Vec2<f32>) {
1922
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
2023
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
21-
// CHECK: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
24+
// LLVM21: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
25+
// LLVM22: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr align 4 {{.*}}, <2 x i1> [[B]])
2226
simd_masked_store(mask, pointer, values)
2327
}
2428

@@ -27,7 +31,8 @@ pub unsafe fn store_f32x2(mask: Vec2<i32>, pointer: *mut f32, values: Vec2<f32>)
2731
pub unsafe fn store_f32x2_unsigned(mask: Vec2<u32>, pointer: *mut f32, values: Vec2<f32>) {
2832
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
2933
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
30-
// CHECK: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
34+
// LLVM21: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
35+
// LLVM22: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr align 4 {{.*}}, <2 x i1> [[B]])
3136
simd_masked_store(mask, pointer, values)
3237
}
3338

@@ -36,6 +41,7 @@ pub unsafe fn store_f32x2_unsigned(mask: Vec2<u32>, pointer: *mut f32, values: V
3641
pub unsafe fn store_pf32x4(mask: Vec4<i32>, pointer: *mut *const f32, values: Vec4<*const f32>) {
3742
// CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, {{<i32 31, i32 31, i32 31, i32 31>|splat \(i32 31\)}}
3843
// CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
39-
// CHECK: call void @llvm.masked.store.v4p0.p0(<4 x ptr> {{.*}}, ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]])
44+
// LLVM21: call void @llvm.masked.store.v4p0.p0(<4 x ptr> {{.*}}, ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]])
45+
// LLVM22: call void @llvm.masked.store.v4p0.p0(<4 x ptr> {{.*}}, ptr align {{.*}} {{.*}}, <4 x i1> [[B]])
4046
simd_masked_store(mask, pointer, values)
4147
}

tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-scatter.rs

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
//
22

33
//@ compile-flags: -C no-prepopulate-passes
4+
//@ revisions: LLVM21 LLVM22
5+
//@ [LLVM22] min-llvm-version: 22
6+
//@ [LLVM21] max-llvm-major-version: 21
47

58
#![crate_type = "lib"]
69
#![feature(repr_simd, core_intrinsics)]
@@ -20,7 +23,8 @@ pub type Vec4<T> = Simd<T, 4>;
2023
pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>, values: Vec2<f32>) {
2124
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
2225
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
23-
// CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
26+
// LLVM21: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
27+
// LLVM22: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> align {{.*}} {{.*}}, <2 x i1> [[B]]
2428
simd_scatter(values, pointers, mask)
2529
}
2630

@@ -29,7 +33,8 @@ pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>, values: V
2933
pub unsafe fn scatter_f32x2_unsigned(pointers: Vec2<*mut f32>, mask: Vec2<u32>, values: Vec2<f32>) {
3034
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
3135
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
32-
// CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
36+
// LLVM21: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
37+
// LLVM22: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> align {{.*}} {{.*}}, <2 x i1> [[B]]
3338
simd_scatter(values, pointers, mask)
3439
}
3540

@@ -42,6 +47,7 @@ pub unsafe fn scatter_pf32x2(
4247
) {
4348
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
4449
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
45-
// CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
50+
// LLVM21: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
51+
// LLVM22: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> align {{.*}} {{.*}}, <2 x i1> [[B]]
4652
simd_scatter(values, pointers, mask)
4753
}

0 commit comments

Comments
 (0)