diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 3626ac45a4860..8a8abf5654cd4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2766,6 +2766,33 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { } } + // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if + // that transformation can't block an offset folding at any use of the ptradd. + // This should be done late, after legalization, so that it doesn't block + // other ptradd combines that could enable more offset folding. + if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) { + bool TransformCanBreakAddrMode = false; + if (auto *C = dyn_cast(N1)) { + TargetLoweringBase::AddrMode AM; + AM.HasBaseReg = true; + AM.BaseOffs = C->getSExtValue(); + TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { + if (auto *LoadStore = dyn_cast(User); + LoadStore && LoadStore->getBasePtr().getNode() == N) { + unsigned AS = LoadStore->getAddressSpace(); + EVT AccessVT = LoadStore->getMemoryVT(); + Type *AccessTy = AccessVT.getTypeForEVT(*DAG.getContext()); + return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, + AS); + } + return false; + }); + } + + if (!TransformCanBreakAddrMode) + return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint); + } + return SDValue(); } diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll index 893deb35fe822..a03bd97309c6b 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll @@ -100,7 +100,7 @@ define void @baseptr_null(i64 %offset, i8 %v) { ; Taken from implicit-kernarg-backend-usage.ll, tests the PTRADD handling in the ; assertalign DAG combine. -define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) #0 { +define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) { ; GFX942-LABEL: llvm_amdgcn_queue_ptr: ; GFX942: ; %bb.0: ; GFX942-NEXT: v_mov_b32_e32 v2, 0 @@ -416,6 +416,58 @@ entry: ret void } +; Check that ptradds can be lowered to disjoint ORs. +define ptr @gep_disjoint_or(ptr %base) { +; GFX942-LABEL: gep_disjoint_or: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_and_or_b32 v0, v0, -16, 4 +; GFX942-NEXT: s_setpc_b64 s[30:31] + %p = call ptr @llvm.ptrmask(ptr %base, i64 s0xf0) + %gep = getelementptr nuw inbounds i8, ptr %p, i64 4 + ret ptr %gep +} + +; Check that AssertAlign nodes between ptradd nodes don't block offset folding, +; taken from preload-implicit-kernargs.ll +define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) { +; GFX942_PTRADD-LABEL: random_incorrect_offset: +; GFX942_PTRADD: ; %bb.1: +; GFX942_PTRADD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_PTRADD-NEXT: s_branch .LBB21_0 +; GFX942_PTRADD-NEXT: .p2align 8 +; GFX942_PTRADD-NEXT: ; %bb.2: +; GFX942_PTRADD-NEXT: .LBB21_0: +; GFX942_PTRADD-NEXT: s_load_dword s0, s[4:5], 0xa +; GFX942_PTRADD-NEXT: v_mov_b32_e32 v0, 0 +; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_PTRADD-NEXT: v_mov_b32_e32 v1, s0 +; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[8:9] +; GFX942_PTRADD-NEXT: s_endpgm +; +; GFX942_LEGACY-LABEL: random_incorrect_offset: +; GFX942_LEGACY: ; %bb.1: +; GFX942_LEGACY-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_LEGACY-NEXT: s_branch .LBB21_0 +; GFX942_LEGACY-NEXT: .p2align 8 +; GFX942_LEGACY-NEXT: ; %bb.2: +; GFX942_LEGACY-NEXT: .LBB21_0: +; GFX942_LEGACY-NEXT: s_mov_b32 s0, 8 +; GFX942_LEGACY-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2 +; GFX942_LEGACY-NEXT: v_mov_b32_e32 v0, 0 +; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_LEGACY-NEXT: v_mov_b32_e32 v1, s0 +; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[8:9] +; GFX942_LEGACY-NEXT: s_endpgm + %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() + %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2 + %load = load i32, ptr addrspace(4) %gep + store i32 %load, ptr addrspace(1) %out + ret void +} + declare void @llvm.memcpy.p0.p4.i64(ptr noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) !0 = !{}