-
Notifications
You must be signed in to change notification settings - Fork 15k
[AMDGPU] Provide control to force VGPR MFMA form #148079
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Change-Id: Ife390264aef869c61a25d032f47301fab5554b4f
Change-Id: Ic76c5f834352c2d0fc893332733cbb6f2382f2f7
✅ With the latest revision this PR passed the C/C++ code formatter. |
Change-Id: I219dcb465637cb10e1997846f22415d5e3ab5a49
Change-Id: I0740caff1b1453902edd3f1037852c38d37d7117
@llvm/pr-subscribers-backend-amdgpu Author: Jeffrey Byrnes (jrbyrnes) ChangesThis gives an override to the user to force select VGPR form of MFMA. Eventually we will drop this in favor of compiler making better decisions, but this provides a mechanism for users to address the cases where MayNeedAGPRs favors the AGPR form and performance is degraded due to poor RA. Patch is 190.14 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/148079.diff 3 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 67ad28661da43..3e45698772a66 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -29,6 +29,12 @@ enum { MAX_LANES = 64 };
using namespace llvm;
+cl::opt<bool> MFMAVGPRForm(
+ "amdgpu-mfma-vgpr-form", cl::Hidden,
+ cl::desc("Whether to force use VGPR for Opc and Dest of MFMA. If "
+ "unspecified, default to compiler heuristics"),
+ cl::init(false));
+
const GCNTargetMachine &getTM(const GCNSubtarget *STI) {
const SITargetLowering *TLI = STI->getTargetLowering();
return static_cast<const GCNTargetMachine &>(TLI->getTargetMachine());
@@ -69,8 +75,8 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
}
- MayNeedAGPRs = ST.hasMAIInsts();
- if (ST.hasGFX90AInsts() &&
+ MayNeedAGPRs = ST.hasMAIInsts() && !MFMAVGPRForm;
+ if (!MFMAVGPRForm && ST.hasGFX90AInsts() &&
ST.getMaxNumVGPRs(F) <= AMDGPU::VGPR_32RegClass.getNumRegs() &&
!mayUseAGPRs(F))
MayNeedAGPRs = false; // We will select all MAI with VGPR operands.
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll
new file mode 100644
index 0000000000000..87a7c2ef6c95c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 --amdgpu-mfma-vgpr-form=0 < %s | FileCheck -enable-var-scope --check-prefixes=HEURRC %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 --amdgpu-mfma-vgpr-form=1 < %s | FileCheck -enable-var-scope --check-prefixes=VGPRRC %s
+
+declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half>, <8 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg)
+
+define <4 x float> @default(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2) {
+; HEURRC-LABEL: default:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, v8
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, v9
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, v10
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, v11
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3]
+; HEURRC-NEXT: s_nop 7
+; HEURRC-NEXT: v_accvgpr_read_b32 v0, a0
+; HEURRC-NEXT: v_accvgpr_read_b32 v1, a1
+; HEURRC-NEXT: v_accvgpr_read_b32 v2, a2
+; HEURRC-NEXT: v_accvgpr_read_b32 v3, a3
+; HEURRC-NEXT: s_setpc_b64 s[30:31]
+;
+; VGPRRC-LABEL: default:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 0, i32 0, i32 0)
+ ret <4 x float> %result
+}
+
+define <4 x float> @request_agpr(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2) #0 {
+; HEURRC-LABEL: request_agpr:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, v8
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, v9
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, v10
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, v11
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3]
+; HEURRC-NEXT: s_nop 7
+; HEURRC-NEXT: v_accvgpr_read_b32 v0, a0
+; HEURRC-NEXT: v_accvgpr_read_b32 v1, a1
+; HEURRC-NEXT: v_accvgpr_read_b32 v2, a2
+; HEURRC-NEXT: v_accvgpr_read_b32 v3, a3
+; HEURRC-NEXT: s_setpc_b64 s[30:31]
+;
+; VGPRRC-LABEL: request_agpr:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 0, i32 0, i32 0)
+ ret <4 x float> %result
+}
+
+define <4 x float> @request_no_agpr(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2) #1 {
+; HEURRC-LABEL: request_no_agpr:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; HEURRC-NEXT: s_setpc_b64 s[30:31]
+;
+; VGPRRC-LABEL: request_no_agpr:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 0, i32 0, i32 0)
+ ret <4 x float> %result
+}
+
+attributes #0 = { "amdgpu-agpr-alloc"="32,256" }
+attributes #1 = { "amdgpu-agpr-alloc"="0,0" }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
index 4628a9c15391b..866dba7746565 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -global-isel=0 < %s | FileCheck -enable-var-scope --check-prefixes=GCN,SDAG %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -global-isel=1 -global-isel-abort=2 -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -global-isel=0 --amdgpu-mfma-vgpr-form=0 < %s | FileCheck -enable-var-scope --check-prefixes=HEURRC %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -global-isel=0 --amdgpu-mfma-vgpr-form=1 < %s | FileCheck -enable-var-scope --check-prefixes=VGPRRC %s
declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half>, <8 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg)
declare <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.f16(<8 x half>, <8 x half>, <16 x float>, i32 immarg, i32 immarg, i32 immarg)
@@ -25,6 +27,48 @@ define <4 x float> @test_mfma_f32_16x16x32_f16(<8 x half> %arg0, <8 x half> %arg
; GCN-NEXT: v_accvgpr_read_b32 v2, a2
; GCN-NEXT: v_accvgpr_read_b32 v3, a3
; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; HEURRC-LABEL: test_mfma_f32_16x16x32_f16:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, v8
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, v9
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, v10
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, v11
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3]
+; HEURRC-NEXT: s_nop 7
+; HEURRC-NEXT: v_accvgpr_read_b32 v0, a0
+; HEURRC-NEXT: v_accvgpr_read_b32 v1, a1
+; HEURRC-NEXT: v_accvgpr_read_b32 v2, a2
+; HEURRC-NEXT: v_accvgpr_read_b32 v3, a3
+; HEURRC-NEXT: s_setpc_b64 s[30:31]
+;
+; VGPRRC-LABEL: test_mfma_f32_16x16x32_f16:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: s_setpc_b64 s[30:31]
+; AGPR-LABEL: test_mfma_f32_16x16x32_f16:
+; AGPR: ; %bb.0:
+; AGPR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; AGPR-NEXT: v_accvgpr_write_b32 a0, v8
+; AGPR-NEXT: v_accvgpr_write_b32 a1, v9
+; AGPR-NEXT: v_accvgpr_write_b32 a2, v10
+; AGPR-NEXT: v_accvgpr_write_b32 a3, v11
+; AGPR-NEXT: s_nop 1
+; AGPR-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3]
+; AGPR-NEXT: s_nop 7
+; AGPR-NEXT: v_accvgpr_read_b32 v0, a0
+; AGPR-NEXT: v_accvgpr_read_b32 v1, a1
+; AGPR-NEXT: v_accvgpr_read_b32 v2, a2
+; AGPR-NEXT: v_accvgpr_read_b32 v3, a3
+; AGPR-NEXT: s_setpc_b64 s[30:31]
+; VGPR-LABEL: test_mfma_f32_16x16x32_f16:
+; VGPR: ; %bb.0:
+; VGPR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPR-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPR-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 0, i32 0, i32 0)
ret <4 x float> %result
}
@@ -45,6 +89,48 @@ define <4 x float> @test_mfma_f32_16x16x32_f16__flags(<8 x half> %arg0, <8 x hal
; GCN-NEXT: v_accvgpr_read_b32 v2, a2
; GCN-NEXT: v_accvgpr_read_b32 v3, a3
; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; HEURRC-LABEL: test_mfma_f32_16x16x32_f16__flags:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, v8
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, v9
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, v10
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, v11
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3] cbsz:1 abid:1 blgp:1
+; HEURRC-NEXT: s_nop 7
+; HEURRC-NEXT: v_accvgpr_read_b32 v0, a0
+; HEURRC-NEXT: v_accvgpr_read_b32 v1, a1
+; HEURRC-NEXT: v_accvgpr_read_b32 v2, a2
+; HEURRC-NEXT: v_accvgpr_read_b32 v3, a3
+; HEURRC-NEXT: s_setpc_b64 s[30:31]
+;
+; VGPRRC-LABEL: test_mfma_f32_16x16x32_f16__flags:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:1 abid:1 blgp:1
+; VGPRRC-NEXT: s_setpc_b64 s[30:31]
+; AGPR-LABEL: test_mfma_f32_16x16x32_f16__flags:
+; AGPR: ; %bb.0:
+; AGPR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; AGPR-NEXT: v_accvgpr_write_b32 a0, v8
+; AGPR-NEXT: v_accvgpr_write_b32 a1, v9
+; AGPR-NEXT: v_accvgpr_write_b32 a2, v10
+; AGPR-NEXT: v_accvgpr_write_b32 a3, v11
+; AGPR-NEXT: s_nop 1
+; AGPR-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3] cbsz:1 abid:1 blgp:1
+; AGPR-NEXT: s_nop 7
+; AGPR-NEXT: v_accvgpr_read_b32 v0, a0
+; AGPR-NEXT: v_accvgpr_read_b32 v1, a1
+; AGPR-NEXT: v_accvgpr_read_b32 v2, a2
+; AGPR-NEXT: v_accvgpr_read_b32 v3, a3
+; AGPR-NEXT: s_setpc_b64 s[30:31]
+; VGPR-LABEL: test_mfma_f32_16x16x32_f16__flags:
+; VGPR: ; %bb.0:
+; VGPR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPR-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:1 abid:1 blgp:1
+; VGPR-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 1, i32 1, i32 1)
ret <4 x float> %result
}
@@ -91,6 +177,84 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd(ptr addrsp
; GISEL-NEXT: s_nop 6
; GISEL-NEXT: global_store_dwordx4 v0, a[0:3], s[6:7]
; GISEL-NEXT: s_endpgm
+;
+; HEURRC-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; HEURRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; HEURRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; HEURRC-NEXT: v_mov_b32_e32 v8, 0
+; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, s0
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, s1
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, s2
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, s3
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3]
+; HEURRC-NEXT: s_nop 7
+; HEURRC-NEXT: global_store_dwordx4 v8, a[0:3], s[6:7]
+; HEURRC-NEXT: s_endpgm
+;
+; VGPRRC-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; VGPRRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; VGPRRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; VGPRRC-NEXT: v_mov_b32_e32 v12, 0
+; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
+; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
+; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPRRC-NEXT: s_nop 1
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: s_nop 7
+; VGPRRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPRRC-NEXT: s_endpgm
+; AGPR-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
+; AGPR: ; %bb.0:
+; AGPR-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; AGPR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; AGPR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; AGPR-NEXT: v_mov_b32_e32 v8, 0
+; AGPR-NEXT: s_waitcnt lgkmcnt(0)
+; AGPR-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; AGPR-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; AGPR-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; AGPR-NEXT: v_accvgpr_write_b32 a0, s0
+; AGPR-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; AGPR-NEXT: v_accvgpr_write_b32 a1, s1
+; AGPR-NEXT: v_accvgpr_write_b32 a2, s2
+; AGPR-NEXT: v_accvgpr_write_b32 a3, s3
+; AGPR-NEXT: s_nop 1
+; AGPR-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3]
+; AGPR-NEXT: s_nop 7
+; AGPR-NEXT: global_store_dwordx4 v8, a[0:3], s[6:7]
+; AGPR-NEXT: s_endpgm
+; VGPR-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
+; VGPR: ; %bb.0:
+; VGPR-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; VGPR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; VGPR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; VGPR-NEXT: v_mov_b32_e32 v12, 0
+; VGPR-NEXT: s_waitcnt lgkmcnt(0)
+; VGPR-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; VGPR-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; VGPR-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; VGPR-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
+; VGPR-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; VGPR-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPR-NEXT: s_nop 1
+; VGPR-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPR-NEXT: s_nop 7
+; VGPR-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPR-NEXT: s_endpgm
%result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 0, i32 0, i32 0)
store <4 x float> %result, ptr addrspace(1) %out
ret void
@@ -138,6 +302,84 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags(ptr
; GISEL-NEXT: s_nop 6
; GISEL-NEXT: global_store_dwordx4 v0, a[0:3], s[6:7]
; GISEL-NEXT: s_endpgm
+;
+; HEURRC-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; HEURRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; HEURRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; HEURRC-NEXT: v_mov_b32_e32 v8, 0
+; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, s0
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, s1
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, s2
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, s3
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3] cbsz:3 abid:2 blgp:1
+; HEURRC-NEXT: s_nop 7
+; HEURRC-NEXT: global_store_dwordx4 v8, a[0:3], s[6:7]
+; HEURRC-NEXT: s_endpgm
+;
+; VGPRRC-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; VGPRRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; VGPRRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; VGPRRC-NEXT: v_mov_b32_e32 v12, 0
+; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
+; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
+; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPRRC-NEXT: s_nop 1
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; VGPRRC-NEXT: s_nop 7
+; VGPRRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPRRC-NEXT: s_endpgm
+; AGPR-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
+; AGPR: ; %bb.0:
+; AGPR-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; AGPR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; AGPR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; AGPR-NEXT: v_mov_b32_e32 v8, 0
+; AGPR-NEXT: s_waitcnt lgkmcnt(0)
+; AGPR-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; AGPR-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; AGPR-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; AGPR-NEXT: v_accvgpr_write_b32 a0, s0
+; AGPR-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; AGPR-NEXT: v_accvgpr_write_b32 a1, s1
+; AGPR-NEXT: v_accvgpr_write_b32 a2, s2
+; AGPR-NEXT: v_accvgpr_write_b32 a3, s3
+; AGPR-NEXT: s_nop 1
+; AGPR-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3] cbsz:3 abid:2 blgp:1
+; AGPR-NEXT: s_nop 7
+; AGPR-NEXT: global_store_dwordx4 v8, a[0:3], s[6:7]
+; AGPR-NEXT: s_endpgm
+; VGPR-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
+; VGPR: ; %bb.0:
+; VGPR-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; VGPR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; VGPR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; VGPR-NEXT: v_mov_b32_e32 v12, 0
+; VGPR-NEXT: s_waitcnt lgkmcnt(0)
+; VGPR-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; VGPR-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
+; VGPR-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
+; VGPR-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
+; VGPR-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
+; VGPR-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPR-NEXT: s_nop 1
+; VGPR-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; VGPR-NEXT: s_nop 7
+; VGPR-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPR-NEXT: s_endpgm
%result = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %arg2, i32 3, i32 2, i32 1)
store <4 x float> %result, ptr addrspace(1) %out
ret void
@@ -271,6 +513,258 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_endpgm
+;
+; HEURRC-LABEL: test_mfma_f32_32x32x16_f16:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], 48
+; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 32
+; HEURRC-NEXT: v_mov_b64_e32 v[16:17], 16
+; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11
+; HEURRC-NEXT: v_accvgpr_write_b32 a4, s12
+; HEURRC-NEXT: v_accvgpr_write_b32 a5, s13
+; HEURRC-NEXT: v_accvgpr_write_b32 a6, s14
+; HEURRC-NEXT: v_accvgpr_write_b32 a7, s15
+; HEURRC-NEXT: v_accvgpr_write_b32 a8, s16
+; HEURRC-NEXT: v_accvgpr_write_b32 a9, s17
+; HEURRC-NEXT: v_accvgpr_write_b32 a10, s18
+; HEURRC-NEXT: v_accvgpr_write_b32 a11, s19
+; HEURRC-NEXT: v_accvgpr_write_b32 a12, s20
+; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21
+; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22
+; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
+; HEURRC-NEXT: v_mov_b64_e32 v[18:19], 0
+; HEURRC-NEXT: v_mov_b32_e32 v8, s16
+; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15]
+; HEURRC-NEXT: v_mov_b32_e32 v0, s20
+; HEURRC-NEXT...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Change-Id: I7c30d3d3384d5a63bea9a8b4f110260165026911
// optimal RC for Opc and Dest of MFMA. In particular, there are high RP cases | ||
// where it is better to produce the VGPR form (e.g. if there are VGPR users | ||
// of the MFMA result). | ||
cl::opt<bool> MFMAVGPRForm( |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing static
@@ -69,8 +79,8 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F, | |||
PSInputAddr = AMDGPU::getInitialPSInputAddr(F); | |||
} | |||
|
|||
MayNeedAGPRs = ST.hasMAIInsts(); | |||
if (ST.hasGFX90AInsts() && | |||
MayNeedAGPRs = ST.hasMAIInsts() && !MFMAVGPRForm; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This breaks if used with gfx908, so it's impossible to flip the default
This gives an override to the user to force select VGPR form of MFMA. Eventually we will drop this in favor of compiler making better decisions, but this provides a mechanism for users to address the cases where MayNeedAGPRs favors the AGPR form and performance is degraded due to poor RA.
This gives an override to the user to force select VGPR form of MFMA. Eventually we will drop this in favor of compiler making better decisions, but this provides a mechanism for users to address the cases where MayNeedAGPRs favors the AGPR form and performance is degraded due to poor RA.
This gives an override to the user to force select VGPR form of MFMA. Eventually we will drop this in favor of compiler making better decisions, but this provides a mechanism for users to address the cases where MayNeedAGPRs favors the AGPR form and performance is degraded due to poor RA.
This gives an override to the user to force select VGPR form of MFMA. Eventually we will drop this in favor of compiler making better decisions, but this provides a mechanism for users to address the cases where MayNeedAGPRs favors the AGPR form and performance is degraded due to poor RA.