|
| 1 | +; RUN: opt %s -p verify -S -disable-output |
| 2 | + |
| 3 | +; This test ensures that verifier correctly handles very wide and very narrows |
| 4 | +; strides. |
| 5 | + |
| 6 | +define <4 x float> @column.major_load_stride_i8(ptr %m, i32 %arg) { |
| 7 | + %result.1 = call <4 x float> @llvm.matrix.column.major.load.v4f32.i128(ptr %m, i8 16, i1 false, i32 2, i32 2) |
| 8 | + ret <4 x float> %result.1 |
| 9 | +} |
| 10 | + |
| 11 | +define <4 x float> @column.major_load_stride_i128(ptr %m, i32 %arg) { |
| 12 | + %result.1 = call <4 x float> @llvm.matrix.column.major.load.v4f32.i128(ptr %m, i128 u0x10000000000000000, i1 false, i32 2, i32 2) |
| 13 | + ret <4 x float> %result.1 |
| 14 | +} |
| 15 | + |
| 16 | +define void @column.major_store_stride_i8(ptr %m, i64 %arg) { |
| 17 | + call void @llvm.matrix.column.major.store.v4f32.i128(<4 x float> zeroinitializer, ptr %m, i8 16, i1 false, i32 2, i32 2) |
| 18 | + ret void |
| 19 | +} |
| 20 | + |
| 21 | +define void @column.major_store_stride_i128(ptr %m, i64 %arg) { |
| 22 | + call void @llvm.matrix.column.major.store.v4f32.i128(<4 x float> zeroinitializer, ptr %m, i128 u0x10000000000000000, i1 false, i32 2, i32 2) |
| 23 | + ret void |
| 24 | +} |
| 25 | + |
| 26 | +declare <6 x float> @llvm.matrix.column.major.load.v6f32.i8(ptr, i8, i1, i32, i32) |
| 27 | +declare void @llvm.matrix.column.major.store.v4p0.i8(<4 x ptr>, ptr, i8, i1, i32, i32) |
| 28 | +declare <6 x float> @llvm.matrix.column.major.load.v6f32.i128(ptr, i64, i1, i32, i32) |
| 29 | +declare void @llvm.matrix.column.major.store.v4p0.i128(<4 x ptr>, ptr, i64, i1, i32, i32) |
0 commit comments