diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 53dc540cbd635..30535ae88f7ba 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -61,6 +61,14 @@ static cl::opt UseDivergentRegisterIndexing( cl::desc("Use indirect register addressing for divergent indexes"), cl::init(false)); +// TODO: This option should be removed once we switch to always using PTRADD in +// the SelectionDAG. +static cl::opt UseSelectionDAGPTRADD( + "amdgpu-use-sdag-ptradd", cl::Hidden, + cl::desc("Generate ISD::PTRADD nodes for 64-bit pointer arithmetic in the " + "SelectionDAG ISel"), + cl::init(false)); + static bool denormalModeIsFlushAllF32(const MachineFunction &MF) { const SIMachineFunctionInfo *Info = MF.getInfo(); return Info->getMode().FP32Denormals == DenormalMode::getPreserveSign(); @@ -10457,6 +10465,11 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, } } +bool SITargetLowering::shouldPreservePtrArith(const Function &F, + EVT PtrVT) const { + return UseSelectionDAGPTRADD && PtrVT == MVT::i64; +} + // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: // offset (the offset that is included in bounds checking and swizzling, to be // split between the instruction's voffset and immoffset fields) and soffset diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index e14611d999641..d71a22722129e 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -260,6 +260,8 @@ class SITargetLowering final : public AMDGPUTargetLowering { bool shouldExpandVectorDynExt(SDNode *N) const; + bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override; + private: // Analyze a combined offset from an amdgcn_s_buffer_load intrinsic and store // the three offsets (voffset, soffset and instoffset) into the SDValue[3] diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index 360fd05cb3d96..1419f63202a7c 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -1376,6 +1376,20 @@ def : GCNPat < (i32 (V_MOV_B32_e32 (i32 0))), sub1) >; +//===----------------------------------------------------------------------===// +// PTRADD Patterns +//===----------------------------------------------------------------------===// + +// GlobalISel shouldn't generate 64-bit addition pseudos. +let GISelShouldIgnore = 1 in { +def : GCNPat< + (DivergentBinFrag i64:$src0, i64:$src1), + (V_ADD_U64_PSEUDO $src0, $src1)>; +def : GCNPat< + (UniformBinFrag i64:$src0, i64:$src1), + (S_ADD_U64_PSEUDO $src0, $src1)>; +} + /********** ============================================ **********/ /********** Extraction, Insertion, Building and Casting **********/ /********** ============================================ **********/ diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll new file mode 100644 index 0000000000000..653d4b85a9a5b --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll @@ -0,0 +1,537 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_PTRADD +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_LEGACY +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_PTRADD +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_LEGACY +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_PTRADD +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_LEGACY +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_PTRADD +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_LEGACY +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_PTRADD +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_LEGACY + +; Tests for the ISD::PTRADD SelectionDAG opcode. This only tests 64-bit address +; spaces since PTRADD is currently only used for these. + +; Check that basic pointer arithmetic can be lowered. +define ptr @gep_as0(ptr %p, i64 %offset) { +; GFX8-LABEL: gep_as0: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 5, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX942_PTRADD-LABEL: gep_as0: +; GFX942_PTRADD: ; %bb.0: ; %entry +; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942_PTRADD-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3] +; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, 5 +; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31] +; +; GFX942_LEGACY-LABEL: gep_as0: +; GFX942_LEGACY: ; %bb.0: ; %entry +; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1] +; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, 5 +; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: gep_as0: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, 5 +; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: gep_as0: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 5 +; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: gep_as0: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_lshlrev_b64_e32 v[2:3], 2, v[2:3] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, 5 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo +; GFX12-NEXT: s_setpc_b64 s[30:31] +entry: + %gep1 = getelementptr inbounds i32, ptr %p, i64 %offset + %gep2 = getelementptr inbounds i8, ptr %gep1, i64 5 + ret ptr %gep2 +} + +define amdgpu_kernel void @gep_as0_uniform(ptr %p, i64 %offset, ptr %ret) { +; GFX8-LABEL: gep_as0_uniform: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 +; GFX8-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x10 +; GFX8-NEXT: s_add_i32 s12, s12, s17 +; GFX8-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; GFX8-NEXT: s_mov_b32 flat_scratch_lo, s13 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX8-NEXT: s_add_u32 s0, s0, s2 +; GFX8-NEXT: s_addc_u32 s1, s1, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NEXT: s_add_u32 s0, s4, 4 +; GFX8-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NEXT: s_addc_u32 s1, s5, 0 +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: flat_store_dword v[2:3], v5 +; GFX8-NEXT: flat_store_dword v[0:1], v4 +; GFX8-NEXT: s_endpgm +; +; GFX942-LABEL: gep_as0_uniform: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GFX942-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX942-NEXT: s_add_u32 s0, s0, s2 +; GFX942-NEXT: s_addc_u32 s1, s1, s3 +; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; GFX942-NEXT: s_endpgm +; +; GFX10-LABEL: gep_as0_uniform: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_add_u32 s12, s12, s17 +; GFX10-NEXT: s_addc_u32 s13, s13, 0 +; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12 +; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 +; GFX10-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x10 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: s_add_u32 s0, s0, s2 +; GFX10-NEXT: s_addc_u32 s1, s1, s3 +; GFX10-NEXT: v_mov_b32_e32 v1, s5 +; GFX10-NEXT: v_mov_b32_e32 v3, s1 +; GFX10-NEXT: v_mov_b32_e32 v2, s0 +; GFX10-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: gep_as0_uniform: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GFX11-NEXT: s_add_u32 s0, s0, s2 +; GFX11-NEXT: s_addc_u32 s1, s1, s3 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: flat_store_b64 v[0:1], v[2:3] +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: gep_as0_uniform: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: flat_store_b64 v[0:1], v[2:3] +; GFX12-NEXT: s_endpgm +entry: + %gep = getelementptr inbounds i32, ptr %p, i64 %offset + store ptr %gep, ptr %ret + ret void +} + +; Check that pointer arithmetic with multiple indexing steps can be lowered. +define ptr @multi_gep_as0(ptr %p, i64 %offset) { +; GFX8-LABEL: multi_gep_as0: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 5, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX942_PTRADD-LABEL: multi_gep_as0: +; GFX942_PTRADD: ; %bb.0: ; %entry +; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942_PTRADD-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3] +; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, 5 +; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31] +; +; GFX942_LEGACY-LABEL: multi_gep_as0: +; GFX942_LEGACY: ; %bb.0: ; %entry +; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1] +; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, 5 +; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: multi_gep_as0: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, 5 +; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: multi_gep_as0: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 5 +; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: multi_gep_as0: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_lshlrev_b64_e32 v[2:3], 2, v[2:3] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, 5 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo +; GFX12-NEXT: s_setpc_b64 s[30:31] +entry: + %gep1 = getelementptr inbounds i32, ptr %p, i64 %offset + %gep2 = getelementptr inbounds i8, ptr %gep1, i64 5 + ret ptr %gep2 +} + +define amdgpu_kernel void @multi_gep_as0_uniform(ptr %p, i64 %offset, ptr %ret) { +; GFX8-LABEL: multi_gep_as0_uniform: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 +; GFX8-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x10 +; GFX8-NEXT: s_add_i32 s12, s12, s17 +; GFX8-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; GFX8-NEXT: s_mov_b32 flat_scratch_lo, s13 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX8-NEXT: s_add_u32 s0, s0, s2 +; GFX8-NEXT: s_addc_u32 s1, s1, s3 +; GFX8-NEXT: s_add_u32 s0, s0, 5 +; GFX8-NEXT: s_addc_u32 s1, s1, 0 +; GFX8-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NEXT: s_add_u32 s0, s4, 4 +; GFX8-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NEXT: s_addc_u32 s1, s5, 0 +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: flat_store_dword v[2:3], v5 +; GFX8-NEXT: flat_store_dword v[0:1], v4 +; GFX8-NEXT: s_endpgm +; +; GFX942-LABEL: multi_gep_as0_uniform: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GFX942-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX942-NEXT: s_add_u32 s0, s0, s2 +; GFX942-NEXT: s_addc_u32 s1, s1, s3 +; GFX942-NEXT: s_add_u32 s0, s0, 5 +; GFX942-NEXT: s_addc_u32 s1, s1, 0 +; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; GFX942-NEXT: s_endpgm +; +; GFX10-LABEL: multi_gep_as0_uniform: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_add_u32 s12, s12, s17 +; GFX10-NEXT: s_addc_u32 s13, s13, 0 +; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12 +; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 +; GFX10-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x10 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: s_add_u32 s0, s0, s2 +; GFX10-NEXT: s_addc_u32 s1, s1, s3 +; GFX10-NEXT: s_add_u32 s0, s0, 5 +; GFX10-NEXT: s_addc_u32 s1, s1, 0 +; GFX10-NEXT: v_mov_b32_e32 v1, s5 +; GFX10-NEXT: v_mov_b32_e32 v3, s1 +; GFX10-NEXT: v_mov_b32_e32 v2, s0 +; GFX10-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: multi_gep_as0_uniform: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GFX11-NEXT: s_add_u32 s0, s0, s2 +; GFX11-NEXT: s_addc_u32 s1, s1, s3 +; GFX11-NEXT: s_add_u32 s0, s0, 5 +; GFX11-NEXT: s_addc_u32 s1, s1, 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: flat_store_b64 v[0:1], v[2:3] +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: multi_gep_as0_uniform: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], 5 +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: flat_store_b64 v[0:1], v[2:3] +; GFX12-NEXT: s_endpgm +entry: + %gep1 = getelementptr inbounds i32, ptr %p, i64 %offset + %gep2 = getelementptr inbounds i8, ptr %gep1, i64 5 + store ptr %gep2, ptr %ret + ret void +} + +; Check that constant offsets are folded into memory instructions. + +define void @fold_as0(ptr %from, ptr %to) { +; GFX8-LABEL: fold_as0: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: flat_load_dword v4, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX8-NEXT: flat_store_dword v[0:1], v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX942-LABEL: fold_as0: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: flat_load_dword v0, v[0:1] offset:8 +; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX942-NEXT: flat_store_dword v[2:3], v0 offset:8 +; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: fold_as0: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: flat_load_dword v0, v[0:1] offset:8 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: flat_store_dword v[2:3], v0 offset:8 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fold_as0: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: flat_load_b32 v0, v[0:1] offset:8 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: flat_store_b32 v[2:3], v0 offset:8 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: fold_as0: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: flat_load_b32 v0, v[0:1] offset:8 +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: flat_store_b32 v[2:3], v0 offset:8 +; GFX12-NEXT: s_wait_dscnt 0x0 +; GFX12-NEXT: s_setpc_b64 s[30:31] +entry: + %gep.from = getelementptr inbounds i8, ptr %from, i64 8 + %val = load i32, ptr %gep.from, align 4 + %gep.to = getelementptr inbounds i8, ptr %to, i64 8 + store i32 %val, ptr %gep.to, align 4 + ret void +} + +define void @fold_as1(ptr addrspace(1) %from, ptr addrspace(1) %to) { +; GFX8-LABEL: fold_as1: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: flat_load_dword v4, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: flat_store_dword v[0:1], v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX942-LABEL: fold_as1: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: global_store_dword v[2:3], v0, off offset:8 +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: fold_as1: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_store_dword v[2:3], v0, off offset:8 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fold_as1: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: global_load_b32 v0, v[0:1], off offset:8 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b32 v[2:3], v0, off offset:8 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: fold_as1: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v0, v[0:1], off offset:8 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_store_b32 v[2:3], v0, off offset:8 +; GFX12-NEXT: s_setpc_b64 s[30:31] +entry: + %gep.from = getelementptr inbounds i8, ptr addrspace(1) %from, i64 8 + %val = load i32, ptr addrspace(1) %gep.from, align 4 + %gep.to = getelementptr inbounds i8, ptr addrspace(1) %to, i64 8 + store i32 %val, ptr addrspace(1) %gep.to, align 4 + ret void +} + +define void @fold_as4(ptr addrspace(4) %from, ptr addrspace(1) %to) { +; GFX8-LABEL: fold_as4: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: flat_load_dword v4, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: flat_store_dword v[0:1], v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX942-LABEL: fold_as4: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: global_store_dword v[2:3], v0, off offset:8 +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: fold_as4: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_store_dword v[2:3], v0, off offset:8 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fold_as4: +; GFX11: ; %bb.0: ; %entry +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: global_load_b32 v0, v[0:1], off offset:8 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b32 v[2:3], v0, off offset:8 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: fold_as4: +; GFX12: ; %bb.0: ; %entry +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v0, v[0:1], off offset:8 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_store_b32 v[2:3], v0, off offset:8 +; GFX12-NEXT: s_setpc_b64 s[30:31] +entry: + %gep.from = getelementptr inbounds i8, ptr addrspace(4) %from, i64 8 + %val = load i32, ptr addrspace(4) %gep.from, align 4 + %gep.to = getelementptr inbounds i8, ptr addrspace(1) %to, i64 8 + store i32 %val, ptr addrspace(1) %gep.to, align 4 + ret void +} + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX10_LEGACY: {{.*}} +; GFX10_PTRADD: {{.*}} +; GFX11_LEGACY: {{.*}} +; GFX11_PTRADD: {{.*}} +; GFX12_LEGACY: {{.*}} +; GFX12_PTRADD: {{.*}} +; GFX8_LEGACY: {{.*}} +; GFX8_PTRADD: {{.*}}