Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AMDGPU] Save/Restore SCC bit across waterfall loop. #68363

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6079,6 +6079,17 @@ loadMBUFScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);

// Save SCC. Waterfall Loop may overwrite SCC.
Register SaveSCCReg;
bool SCCNotDead = (MBB.computeRegisterLiveness(TRI, AMDGPU::SCC, MI, 30) !=
MachineBasicBlock::LQR_Dead);
if (SCCNotDead) {
Comment on lines +6083 to +6086
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nits: call this SCCLive? Even better, rewrite so you don't need it:

Suggested change
Register SaveSCCReg;
bool SCCNotDead = (MBB.computeRegisterLiveness(TRI, AMDGPU::SCC, MI, 30) !=
MachineBasicBlock::LQR_Dead);
if (SCCNotDead) {
Register SaveSCCReg;
if (MBB.computeRegisterLiveness(TRI, AMDGPU::SCC, MI, 30) !=
MachineBasicBlock::LQR_Dead) {

Then below you can just test:

  // Restore SCC.
  if (SaveSCCReg) ...

SaveSCCReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(MBB, Begin, DL, TII.get(AMDGPU::S_CSELECT_B32), SaveSCCReg)
.addImm(1)
.addImm(0);
}

Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);

// Save the EXEC mask
Expand Down Expand Up @@ -6134,8 +6145,15 @@ loadMBUFScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,

emitLoadScalarOpsFromVGPRLoop(TII, MRI, MBB, *LoopBB, *BodyBB, DL, ScalarOps);

// Restore the EXEC mask
MachineBasicBlock::iterator First = RemainderBB->begin();
// Restore SCC
if (SCCNotDead) {
BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_CMP_LG_U32))
.addReg(SaveSCCReg, RegState::Kill)
.addImm(0);
}

// Restore the EXEC mask
BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
return BodyBB;
}
Expand Down
83 changes: 83 additions & 0 deletions llvm/test/CodeGen/AMDGPU/waterfall_kills_scc.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX906 %s
declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32 immarg) #0
declare void @llvm.amdgcn.raw.buffer.store.f32(float, <4 x i32>, i32, i32, i32 immarg) #1

; Check that the compiler doesn't crash with a "undefined physical register" error;
; bb.0 sets SCC bit in s_cmp_eq_u32 s0, 1
; bb.1 overrides it
; bb.2 uses the value from bb.0
; Preserve SCC across bb.1 with s_cselect_b32 s5, 1, 0 -> s_cmp_lg_u32 s5, 0
; Otherwise, we will see the following error.
;*** Bad machine code: Using an undefined physical register ***
;- function: foo
;- basic block: %bb.3 (0x53198c0)
;- instruction: %33.sub1:sgpr_128 = S_CSELECT_B32 1072693248, 0, implicit $scc
;- operand 3: implicit $scc

define amdgpu_kernel void @foo(i1 %cmp1) {
; GFX906-LABEL: foo:
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX906-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
; GFX906-NEXT: s_mov_b32 s10, -1
; GFX906-NEXT: s_mov_b32 s11, 0xe00000
; GFX906-NEXT: s_add_u32 s8, s8, s3
; GFX906-NEXT: s_addc_u32 s9, s9, 0
; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0
; GFX906-NEXT: buffer_load_dword v4, off, s[8:11], 0 offset:4
; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:8
; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:12
; GFX906-NEXT: s_load_dword s4, s[0:1], 0x24
; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x1c
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: s_bitcmp1_b32 s4, 0
; GFX906-NEXT: s_mul_i32 s0, s2, s3
; GFX906-NEXT: v_mul_u32_u24_e32 v1, s3, v1
; GFX906-NEXT: v_mad_u32_u24 v0, s0, v0, v1
; GFX906-NEXT: v_add_lshl_u32 v2, v0, v2, 4
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_mov_b32 s4, 0
; GFX906-NEXT: v_mov_b32_e32 v1, v0
; GFX906-NEXT: s_cselect_b32 s5, 1, 0
; GFX906-NEXT: s_mov_b64 s[2:3], exec
; GFX906-NEXT: ds_write_b64 v2, v[0:1]
; GFX906-NEXT: .LBB0_1: ; =>This Inner Loop Header: Depth=1
; GFX906-NEXT: s_waitcnt vmcnt(3)
; GFX906-NEXT: v_readfirstlane_b32 s0, v3
; GFX906-NEXT: s_waitcnt vmcnt(2)
; GFX906-NEXT: v_readfirstlane_b32 s1, v4
; GFX906-NEXT: v_cmp_eq_u64_e32 vcc, s[0:1], v[3:4]
; GFX906-NEXT: s_waitcnt vmcnt(1)
; GFX906-NEXT: v_readfirstlane_b32 s0, v5
; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: v_readfirstlane_b32 s1, v6
; GFX906-NEXT: v_cmp_eq_u64_e64 s[0:1], s[0:1], v[5:6]
; GFX906-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX906-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6
; GFX906-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX906-NEXT: s_cbranch_execnz .LBB0_1
; GFX906-NEXT: ; %bb.2:
; GFX906-NEXT: s_cmp_lg_u32 s5, 0
; GFX906-NEXT: s_mov_b64 exec, s[2:3]
; GFX906-NEXT: s_cselect_b32 s5, 0x3ff00000, 0
; GFX906-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
; GFX906-NEXT: s_mov_b32 s5, s4
; GFX906-NEXT: s_mov_b32 s6, s4
; GFX906-NEXT: s_mov_b32 s7, s4
; GFX906-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX906-NEXT: s_endpgm
entry:
%wbr = alloca <4 x i32>, align 16, addrspace(5)
store ptr null, ptr addrspace(5) %wbr, align 16
%wbr_1 = load <4 x i32>, ptr addrspace(5) null, align 16
%call1 = tail call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %wbr_1, i32 0, i32 0, i32 0)
%0 = fpext float %call1 to double
%sel1 = select i1 %cmp1, double 1.000000e+00, double 0.000000e+00
%sel2 = select i1 %cmp1, double %0, double 0.000000e+00
%mul = fmul double %sel2, 0.000000e+00
%fptruncate = fptrunc double %sel1 to float
tail call void @llvm.amdgcn.raw.buffer.store.f32(float %fptruncate, <4 x i32> zeroinitializer, i32 0, i32 0, i32 0)
ret void
}