diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index cabecbec175b3..a19f93903e15a 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -2290,6 +2290,12 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { case Intrinsic::llrint: ISD = ISD::LLRINT; break; + case Intrinsic::lround: + ISD = ISD::LROUND; + break; + case Intrinsic::llround: + ISD = ISD::LLROUND; + break; case Intrinsic::round: ISD = ISD::FROUND; break; diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 67a51c12b508e..9fb5896f3df40 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -1191,9 +1191,6 @@ static const CostTblEntry VectorIntrinsicCostTable[]{ {Intrinsic::roundeven, MVT::f64, 9}, {Intrinsic::rint, MVT::f32, 7}, {Intrinsic::rint, MVT::f64, 7}, - {Intrinsic::lrint, MVT::i32, 1}, - {Intrinsic::lrint, MVT::i64, 1}, - {Intrinsic::llrint, MVT::i64, 1}, {Intrinsic::nearbyint, MVT::f32, 9}, {Intrinsic::nearbyint, MVT::f64, 9}, {Intrinsic::bswap, MVT::i16, 3}, @@ -1262,11 +1259,29 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, switch (ICA.getID()) { case Intrinsic::lrint: case Intrinsic::llrint: - // We can't currently lower half or bfloat vector lrint/llrint. - if (auto *VecTy = dyn_cast(ICA.getArgTypes()[0]); - VecTy && VecTy->getElementType()->is16bitFPTy()) - return InstructionCost::getInvalid(); - [[fallthrough]]; + case Intrinsic::lround: + case Intrinsic::llround: { + auto LT = getTypeLegalizationCost(RetTy); + switch (ICA.getID()) { + case Intrinsic::lrint: + case Intrinsic::llrint: + // We can't currently lower half or bfloat vector lrint/llrint. + if (auto *VecTy = dyn_cast(ICA.getArgTypes()[0]); + VecTy && VecTy->getElementType()->is16bitFPTy()) + return InstructionCost::getInvalid(); + break; + case Intrinsic::lround: + case Intrinsic::llround: + // We can't currently lower scalable-vector lround/llround. + if (LT.second.isScalableVector()) + return InstructionCost::getInvalid(); + break; + } + if (ST->hasVInstructions() && LT.second.isVector()) + return LT.first * + getRISCVInstructionCost(RISCV::VFCVT_X_F_V, LT.second, CostKind); + break; + } case Intrinsic::ceil: case Intrinsic::floor: case Intrinsic::trunc: diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll index a0818d487d151..2edceb6425ead 100644 --- a/llvm/test/Analysis/CostModel/RISCV/fround.ll +++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll @@ -436,23 +436,23 @@ define void @lrint() { ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call @llvm.lrint.nxv16i64.nxv16bf16( undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.lrint.i64.f32(float undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call @llvm.lrint.nxv1i64.nxv1f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call @llvm.lrint.nxv2i64.nxv2f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call @llvm.lrint.nxv4i64.nxv4f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call @llvm.lrint.nxv8i64.nxv8f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %20 = call @llvm.lrint.nxv16i64.nxv16f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call @llvm.lrint.nxv2i64.nxv2f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call @llvm.lrint.nxv4i64.nxv4f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call @llvm.lrint.nxv8i64.nxv8f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call @llvm.lrint.nxv16i64.nxv16f32( undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.lrint.i64.f64(double undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call @llvm.lrint.nxv1i64.nxv1f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call @llvm.lrint.nxv2i64.nxv2f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call @llvm.lrint.nxv4i64.nxv4f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call @llvm.lrint.nxv8i64.nxv8f64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call @llvm.lrint.nxv2i64.nxv2f64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call @llvm.lrint.nxv4i64.nxv4f64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call @llvm.lrint.nxv8i64.nxv8f64( undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; call i64 @llvm.lrint.i64.bf16(bfloat undef) @@ -528,23 +528,23 @@ define void @llrint() { ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call @llvm.llrint.nxv16i64.nxv16bf16( undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.llrint.i64.f32(float undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call @llvm.llrint.nxv1i64.nxv1f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call @llvm.llrint.nxv2i64.nxv2f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call @llvm.llrint.nxv4i64.nxv4f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call @llvm.llrint.nxv8i64.nxv8f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %20 = call @llvm.llrint.nxv16i64.nxv16f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call @llvm.llrint.nxv2i64.nxv2f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call @llvm.llrint.nxv4i64.nxv4f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call @llvm.llrint.nxv8i64.nxv8f32( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call @llvm.llrint.nxv16i64.nxv16f32( undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.llrint.i64.f64(double undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call @llvm.llrint.nxv1i64.nxv1f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call @llvm.llrint.nxv2i64.nxv2f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call @llvm.llrint.nxv4i64.nxv4f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call @llvm.llrint.nxv8i64.nxv8f64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call @llvm.llrint.nxv2i64.nxv2f64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call @llvm.llrint.nxv4i64.nxv4f64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call @llvm.llrint.nxv8i64.nxv8f64( undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; call i64 @llvm.llrint.i64.bf16(bfloat undef) @@ -606,6 +606,200 @@ define void @llrint_fp16() { ret void } +define void @lround() { +; CHECK-LABEL: 'lround' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i32 @llvm.lround.i32.bf16(bfloat poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i32> @llvm.lround.v2i32.v2bf16(<2 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i32> @llvm.lround.v4i32.v4bf16(<4 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x i32> @llvm.lround.v8i32.v8bf16(<8 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %5 = call <16 x i32> @llvm.lround.v16i32.v16bf16(<16 x bfloat> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call @llvm.lround.nxv1i32.nxv1bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call @llvm.lround.nxv2i32.nxv2bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call @llvm.lround.nxv4i32.nxv4bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call @llvm.lround.nxv8i32.nxv8bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call @llvm.lround.nxv16i32.nxv16bf16( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i32 @llvm.lround.i32.f32(float poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i32> @llvm.lround.v4i32.v4f32(<4 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <8 x i32> @llvm.lround.v8i32.v8f32(<8 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %15 = call <16 x i32> @llvm.lround.v16i32.v16f32(<16 x float> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call @llvm.lround.nxv1i32.nxv1f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call @llvm.lround.nxv2i32.nxv2f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call @llvm.lround.nxv4i32.nxv4f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %19 = call @llvm.lround.nxv8i32.nxv8f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %20 = call @llvm.lround.nxv16i32.nxv16f32( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lround.i32.f64(double poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lround.v2i32.v2f64(<2 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call @llvm.lround.nxv1i32.nxv1f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call @llvm.lround.nxv2i32.nxv2f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %28 = call @llvm.lround.nxv4i32.nxv4f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %29 = call @llvm.lround.nxv8i32.nxv8f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %30 = call @llvm.lround.nxv16i32.nxv16f64( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call i64 @llvm.lround.i64.bf16(bfloat poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.lround.v2i64.v2bf16(<2 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.lround.v4i64.v4bf16(<4 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.lround.v8i64.v8bf16(<8 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.lround.v16i64.v16bf16(<16 x bfloat> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %36 = call @llvm.lround.nxv1i64.nxv1bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %37 = call @llvm.lround.nxv2i64.nxv2bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %38 = call @llvm.lround.nxv4i64.nxv4bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %39 = call @llvm.lround.nxv8i64.nxv8bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %40 = call @llvm.lround.nxv16i64.nxv16bf16( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %41 = call i64 @llvm.lround.i64.f32(float poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %42 = call <2 x i64> @llvm.lround.v2i64.v2f32(<2 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %43 = call <4 x i64> @llvm.lround.v4i64.v4f32(<4 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %44 = call <8 x i64> @llvm.lround.v8i64.v8f32(<8 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %45 = call <16 x i64> @llvm.lround.v16i64.v16f32(<16 x float> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %46 = call @llvm.lround.nxv1i64.nxv1f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %47 = call @llvm.lround.nxv2i64.nxv2f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %48 = call @llvm.lround.nxv4i64.nxv4f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %49 = call @llvm.lround.nxv8i64.nxv8f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %50 = call @llvm.lround.nxv16i64.nxv16f32( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %51 = call i64 @llvm.lround.i64.f64(double poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %52 = call <2 x i64> @llvm.lround.v2i64.v2f64(<2 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %53 = call <4 x i64> @llvm.lround.v4i64.v4f64(<4 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %54 = call <8 x i64> @llvm.lround.v8i64.v8f64(<8 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %55 = call <16 x i64> @llvm.lround.v16i64.v16f64(<16 x double> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %56 = call @llvm.lround.nxv1i64.nxv1f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %57 = call @llvm.lround.nxv2i64.nxv2f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %58 = call @llvm.lround.nxv4i64.nxv4f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %59 = call @llvm.lround.nxv8i64.nxv8f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %60 = call @llvm.lround.nxv16i64.nxv16f64( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call i32 @llvm.lround.i32.bf16(bfloat poison) + call <2 x i32> @llvm.lround.v2i32.v2bf16(<2 x bfloat> poison) + call <4 x i32> @llvm.lround.v4i32.v4bf16(<4 x bfloat> poison) + call <8 x i32> @llvm.lround.v8i32.v8bf16(<8 x bfloat> poison) + call <16 x i32> @llvm.lround.v16i32.v16bf16(<16 x bfloat> poison) + call @llvm.lround.nxv1i32.nxv1bf16( poison) + call @llvm.lround.nxv2i32.nxv2bf16( poison) + call @llvm.lround.nxv4i32.nxv4bf16( poison) + call @llvm.lround.nxv8i32.nxv8bf16( poison) + call @llvm.lround.nxv16i32.nxv16bf16( poison) + call i32 @llvm.lround.i32.f32(float poison) + call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> poison) + call <4 x i32> @llvm.lround.v4i32.v4f32(<4 x float> poison) + call <8 x i32> @llvm.lround.v8i32.v8f32(<8 x float> poison) + call <16 x i32> @llvm.lround.v16i32.v16f32(<16 x float> poison) + call @llvm.lround.nxv1i32.nxv1f32( poison) + call @llvm.lround.nxv2i32.nxv2f32( poison) + call @llvm.lround.nxv4i32.nxv4f32( poison) + call @llvm.lround.nxv8i32.nxv8f32( poison) + call @llvm.lround.nxv16i32.nxv16f32( poison) + call i32 @llvm.lround.i32.f64(double poison) + call <2 x i32> @llvm.lround.v2i32.v2f64(<2 x double> poison) + call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison) + call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison) + call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison) + call @llvm.lround.nxv1i32.nxv1f64( poison) + call @llvm.lround.nxv2i32.nxv2f64( poison) + call @llvm.lround.nxv4i32.nxv4f64( poison) + call @llvm.lround.nxv8i32.nxv8f64( poison) + call @llvm.lround.nxv16i32.nxv16f64( poison) + call i64 @llvm.lround.i64.bf16(bfloat poison) + call <2 x i64> @llvm.lround.v2i64.v2bf16(<2 x bfloat> poison) + call <4 x i64> @llvm.lround.v4i64.v4bf16(<4 x bfloat> poison) + call <8 x i64> @llvm.lround.v8i64.v8bf16(<8 x bfloat> poison) + call <16 x i64> @llvm.lround.v16i64.v16bf16(<16 x bfloat> poison) + call @llvm.lround.nxv1i64.nxv1bf16( poison) + call @llvm.lround.nxv2i64.nxv2bf16( poison) + call @llvm.lround.nxv4i64.nxv4bf16( poison) + call @llvm.lround.nxv8i64.nxv8bf16( poison) + call @llvm.lround.nxv16i64.nxv16bf16( poison) + call i64 @llvm.lround.i64.f32(float poison) + call <2 x i64> @llvm.lround.v2i64.v2f32(<2 x float> poison) + call <4 x i64> @llvm.lround.v4i64.v4f32(<4 x float> poison) + call <8 x i64> @llvm.lround.v8i64.v8f32(<8 x float> poison) + call <16 x i64> @llvm.lround.v16i64.v16f32(<16 x float> poison) + call @llvm.lround.nxv1i64.nxv1f32( poison) + call @llvm.lround.nxv2i64.nxv2f32( poison) + call @llvm.lround.nxv4i64.nxv4f32( poison) + call @llvm.lround.nxv8i64.nxv8f32( poison) + call @llvm.lround.nxv16i64.nxv16f32( poison) + call i64 @llvm.lround.i64.f64(double poison) + call <2 x i64> @llvm.lround.v2i64.v2f64(<2 x double> poison) + call <4 x i64> @llvm.lround.v4i64.v4f64(<4 x double> poison) + call <8 x i64> @llvm.lround.v8i64.v8f64(<8 x double> poison) + call <16 x i64> @llvm.lround.v16i64.v16f64(<16 x double> poison) + call @llvm.lround.nxv1i64.nxv1f64( poison) + call @llvm.lround.nxv2i64.nxv2f64( poison) + call @llvm.lround.nxv4i64.nxv4f64( poison) + call @llvm.lround.nxv8i64.nxv8f64( poison) + call @llvm.lround.nxv16i64.nxv16f64( poison) + ret void +} + +define void @llround() { +; CHECK-LABEL: 'llround' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llround.i64.bf16(bfloat poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llround.v2i64.v2bf16(<2 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x i64> @llvm.llround.v4i64.v4bf16(<4 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call <8 x i64> @llvm.llround.v8i64.v8bf16(<8 x bfloat> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %5 = call <16 x i64> @llvm.llround.v16i64.v16bf16(<16 x bfloat> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call @llvm.llround.nxv1i64.nxv1bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call @llvm.llround.nxv2i64.nxv2bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call @llvm.llround.nxv4i64.nxv4bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call @llvm.llround.nxv8i64.nxv8bf16( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call @llvm.llround.nxv16i64.nxv16bf16( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.llround.i64.f32(float poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call @llvm.llround.nxv1i64.nxv1f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call @llvm.llround.nxv2i64.nxv2f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call @llvm.llround.nxv4i64.nxv4f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %19 = call @llvm.llround.nxv8i64.nxv8f32( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %20 = call @llvm.llround.nxv16i64.nxv16f32( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.llround.i64.f64(double poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i64> @llvm.llround.v16i64.v16f64(<16 x double> poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call @llvm.llround.nxv1i64.nxv1f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call @llvm.llround.nxv2i64.nxv2f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %28 = call @llvm.llround.nxv4i64.nxv4f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %29 = call @llvm.llround.nxv8i64.nxv8f64( poison) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %30 = call @llvm.llround.nxv16i64.nxv16f64( poison) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call i64 @llvm.llround.i64.bf16(bfloat poison) + call <2 x i64> @llvm.llround.v2i64.v2bf16(<2 x bfloat> poison) + call <4 x i64> @llvm.llround.v4i64.v4bf16(<4 x bfloat> poison) + call <8 x i64> @llvm.llround.v8i64.v8bf16(<8 x bfloat> poison) + call <16 x i64> @llvm.llround.v16i64.v16bf16(<16 x bfloat> poison) + call @llvm.llround.nxv1i64.nxv1bf16( poison) + call @llvm.llround.nxv2i64.nxv2bf16( poison) + call @llvm.llround.nxv4i64.nxv4bf16( poison) + call @llvm.llround.nxv8i64.nxv8bf16( poison) + call @llvm.llround.nxv16i64.nxv16bf16( poison) + call i64 @llvm.llround.i64.f32(float poison) + call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> poison) + call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> poison) + call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> poison) + call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> poison) + call @llvm.llround.nxv1i64.nxv1f32( poison) + call @llvm.llround.nxv2i64.nxv2f32( poison) + call @llvm.llround.nxv4i64.nxv4f32( poison) + call @llvm.llround.nxv8i64.nxv8f32( poison) + call @llvm.llround.nxv16i64.nxv16f32( poison) + call i64 @llvm.llround.i64.f64(double poison) + call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> poison) + call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> poison) + call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> poison) + call <16 x i64> @llvm.llround.v16i64.v16f64(<16 x double> poison) + call @llvm.llround.nxv1i64.nxv1f64( poison) + call @llvm.llround.nxv2i64.nxv2f64( poison) + call @llvm.llround.nxv4i64.nxv4f64( poison) + call @llvm.llround.nxv8i64.nxv8f64( poison) + call @llvm.llround.nxv16i64.nxv16f64( poison) + ret void +} + define void @nearbyint() { ; CHECK-LABEL: 'nearbyint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call bfloat @llvm.nearbyint.bf16(bfloat undef) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll new file mode 100644 index 0000000000000..d3e818c22c3ea --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll @@ -0,0 +1,1030 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64 + +define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) { +; RV32-LABEL: llround_v1i64_v1f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 0(sp) +; RV32-NEXT: sw a1, 4(sp) +; RV32-NEXT: mv a0, sp +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v1i64_v1f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: ret + %a = call <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float> %x) + ret <1 x i64> %a +} +declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>) + +define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) { +; RV32-LABEL: llround_v2i64_v2f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl1r.v v8, (a2) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 32 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v2i64_v2f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; RV64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v9 +; RV64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-NEXT: ret + %a = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> %x) + ret <2 x i64> %a +} +declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>) + +define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) { +; RV32-LABEL: llround_v3i64_v3f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a1, a0, 1 +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 2 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a1, a0, 1 +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 32 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v3i64_v3f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: vslidedown.vi v11, v8, 3 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v9 +; RV64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v11 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float> %x) + ret <3 x i64> %a +} +declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>) + +define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) { +; RV32-LABEL: llround_v4i64_v4f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a1, a0, 1 +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 2 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a1, a0, 1 +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 32 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v4i64_v4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: vslidedown.vi v11, v8, 3 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v9 +; RV64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v11 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> %x) + ret <4 x i64> %a +} +declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>) + +define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) { +; RV32-LABEL: llround_v8i64_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -208 +; RV32-NEXT: .cfi_def_cfa_offset 208 +; RV32-NEXT: sw ra, 204(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 200(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 208 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 64(sp) +; RV32-NEXT: sw a1, 68(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 7 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 120(sp) +; RV32-NEXT: sw a1, 124(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 6 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 112(sp) +; RV32-NEXT: sw a1, 116(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 5 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 104(sp) +; RV32-NEXT: sw a1, 108(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 4 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 96(sp) +; RV32-NEXT: sw a1, 100(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 88(sp) +; RV32-NEXT: sw a1, 92(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 2 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 80(sp) +; RV32-NEXT: sw a1, 84(sp) +; RV32-NEXT: addi a0, sp, 192 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 72(sp) +; RV32-NEXT: sw a1, 76(sp) +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: addi sp, s0, -208 +; RV32-NEXT: .cfi_def_cfa sp, 208 +; RV32-NEXT: lw ra, 204(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 200(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: addi sp, sp, 208 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v8i64_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -128 +; RV64-NEXT: .cfi_def_cfa_offset 128 +; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 128 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: vslidedown.vi v10, v8, 7 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vslidedown.vi v10, v8, 6 +; RV64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vslidedown.vi v10, v8, 5 +; RV64-NEXT: fcvt.l.s a2, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vslidedown.vi v10, v8, 4 +; RV64-NEXT: fcvt.l.s a3, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v9, v8, 3 +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: vslidedown.vi v8, v8, 1 +; RV64-NEXT: fcvt.l.s a4, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v9 +; RV64-NEXT: fcvt.l.s a5, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: fcvt.l.s a6, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: sd a4, 32(sp) +; RV64-NEXT: sd a3, 40(sp) +; RV64-NEXT: sd a2, 48(sp) +; RV64-NEXT: sd a1, 56(sp) +; RV64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-NEXT: sd a0, 0(sp) +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: sd a6, 16(sp) +; RV64-NEXT: sd a5, 24(sp) +; RV64-NEXT: mv a0, sp +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi sp, s0, -128 +; RV64-NEXT: .cfi_def_cfa sp, 128 +; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 +; RV64-NEXT: addi sp, sp, 128 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret + %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x) + ret <8 x i64> %a +} +declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>) + +define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) { +; RV32-LABEL: llround_v16i64_v16f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -400 +; RV32-NEXT: .cfi_def_cfa_offset 400 +; RV32-NEXT: sw ra, 396(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 392(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 400 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -128 +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: flw fa0, 124(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 248(sp) +; RV32-NEXT: sw a1, 252(sp) +; RV32-NEXT: flw fa0, 120(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 240(sp) +; RV32-NEXT: sw a1, 244(sp) +; RV32-NEXT: flw fa0, 116(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 232(sp) +; RV32-NEXT: sw a1, 236(sp) +; RV32-NEXT: flw fa0, 112(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 224(sp) +; RV32-NEXT: sw a1, 228(sp) +; RV32-NEXT: flw fa0, 108(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 216(sp) +; RV32-NEXT: sw a1, 220(sp) +; RV32-NEXT: flw fa0, 104(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 208(sp) +; RV32-NEXT: sw a1, 212(sp) +; RV32-NEXT: flw fa0, 100(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 200(sp) +; RV32-NEXT: sw a1, 204(sp) +; RV32-NEXT: flw fa0, 96(sp) +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 192(sp) +; RV32-NEXT: sw a1, 196(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 128(sp) +; RV32-NEXT: sw a1, 132(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 152(sp) +; RV32-NEXT: sw a1, 156(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 2 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 144(sp) +; RV32-NEXT: sw a1, 148(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 136(sp) +; RV32-NEXT: sw a1, 140(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 7 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 184(sp) +; RV32-NEXT: sw a1, 188(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 6 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 176(sp) +; RV32-NEXT: sw a1, 180(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 5 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 168(sp) +; RV32-NEXT: sw a1, 172(sp) +; RV32-NEXT: addi a0, sp, 384 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 4 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llroundf +; RV32-NEXT: sw a0, 160(sp) +; RV32-NEXT: sw a1, 164(sp) +; RV32-NEXT: li a0, 32 +; RV32-NEXT: addi a1, sp, 128 +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vle32.v v8, (a1) +; RV32-NEXT: addi sp, s0, -400 +; RV32-NEXT: .cfi_def_cfa sp, 400 +; RV32-NEXT: lw ra, 396(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 392(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: addi sp, sp, 400 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v16i64_v16f32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -384 +; RV64-NEXT: .cfi_def_cfa_offset 384 +; RV64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 384 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -128 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: flw fa5, 124(sp) +; RV64-NEXT: vfmv.f.s fa4, v8 +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 3 +; RV64-NEXT: vslidedown.vi v12, v8, 2 +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: sd a0, 248(sp) +; RV64-NEXT: flw fa5, 120(sp) +; RV64-NEXT: vslidedown.vi v13, v8, 1 +; RV64-NEXT: fcvt.l.s a0, fa4, rmm +; RV64-NEXT: vfmv.f.s fa4, v10 +; RV64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-NEXT: sd a1, 240(sp) +; RV64-NEXT: flw fa5, 116(sp) +; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 7 +; RV64-NEXT: fcvt.l.s a1, fa4, rmm +; RV64-NEXT: vfmv.f.s fa4, v12 +; RV64-NEXT: fcvt.l.s a2, fa5, rmm +; RV64-NEXT: sd a2, 232(sp) +; RV64-NEXT: flw fa5, 112(sp) +; RV64-NEXT: fcvt.l.s a2, fa4, rmm +; RV64-NEXT: vfmv.f.s fa4, v13 +; RV64-NEXT: vslidedown.vi v12, v8, 6 +; RV64-NEXT: fcvt.l.s a3, fa5, rmm +; RV64-NEXT: sd a3, 224(sp) +; RV64-NEXT: flw fa5, 108(sp) +; RV64-NEXT: fcvt.l.s a3, fa4, rmm +; RV64-NEXT: vfmv.f.s fa4, v10 +; RV64-NEXT: vslidedown.vi v10, v8, 5 +; RV64-NEXT: fcvt.l.s a4, fa5, rmm +; RV64-NEXT: sd a4, 216(sp) +; RV64-NEXT: flw fa5, 104(sp) +; RV64-NEXT: fcvt.l.s a4, fa4, rmm +; RV64-NEXT: vfmv.f.s fa4, v12 +; RV64-NEXT: fcvt.l.s a5, fa4, rmm +; RV64-NEXT: fcvt.l.s a6, fa5, rmm +; RV64-NEXT: sd a6, 208(sp) +; RV64-NEXT: flw fa5, 100(sp) +; RV64-NEXT: vfmv.f.s fa4, v10 +; RV64-NEXT: fcvt.l.s a6, fa4, rmm +; RV64-NEXT: vslidedown.vi v8, v8, 4 +; RV64-NEXT: fcvt.l.s a7, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: sd a7, 200(sp) +; RV64-NEXT: fcvt.l.s a7, fa5, rmm +; RV64-NEXT: flw fa5, 96(sp) +; RV64-NEXT: sd a0, 128(sp) +; RV64-NEXT: sd a3, 136(sp) +; RV64-NEXT: sd a2, 144(sp) +; RV64-NEXT: sd a1, 152(sp) +; RV64-NEXT: sd a7, 160(sp) +; RV64-NEXT: sd a6, 168(sp) +; RV64-NEXT: sd a5, 176(sp) +; RV64-NEXT: sd a4, 184(sp) +; RV64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-NEXT: sd a0, 192(sp) +; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi sp, s0, -384 +; RV64-NEXT: .cfi_def_cfa sp, 384 +; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 +; RV64-NEXT: addi sp, sp, 384 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret + %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x) + ret <16 x i64> %a +} +declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>) + +define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) { +; RV32-LABEL: llround_v1i64_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 0(sp) +; RV32-NEXT: sw a1, 4(sp) +; RV32-NEXT: mv a0, sp +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v1i64_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: ret + %a = call <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double> %x) + ret <1 x i64> %a +} +declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>) + +define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) { +; RV32-LABEL: llround_v2i64_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl1r.v v8, (a2) # vscale x 8-byte Folded Reload +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 32 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v2i64_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v9 +; RV64-NEXT: fcvt.l.d a1, fa5, rmm +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-NEXT: ret + %a = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> %x) + ret <2 x i64> %a +} +declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>) + +define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) { +; RV32-LABEL: llround_v4i64_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 4 * vlenb +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 2 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 32 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v4i64_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vslidedown.vi v12, v8, 1 +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: vslidedown.vi v8, v8, 3 +; RV64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v12 +; RV64-NEXT: fcvt.l.d a1, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v10 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: vslide1down.vx v8, v10, a1 +; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: ret + %a = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> %x) + ret <4 x i64> %a +} +declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>) + +define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) { +; RV32-LABEL: llround_v8i64_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -272 +; RV32-NEXT: .cfi_def_cfa_offset 272 +; RV32-NEXT: sw ra, 268(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 264(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 272 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: addi a0, sp, 256 +; RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: fld fa0, 120(sp) +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 184(sp) +; RV32-NEXT: sw a1, 188(sp) +; RV32-NEXT: fld fa0, 112(sp) +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 176(sp) +; RV32-NEXT: sw a1, 180(sp) +; RV32-NEXT: fld fa0, 104(sp) +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 168(sp) +; RV32-NEXT: sw a1, 172(sp) +; RV32-NEXT: fld fa0, 96(sp) +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 160(sp) +; RV32-NEXT: sw a1, 164(sp) +; RV32-NEXT: addi a0, sp, 256 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 128(sp) +; RV32-NEXT: sw a1, 132(sp) +; RV32-NEXT: addi a0, sp, 256 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 136(sp) +; RV32-NEXT: sw a1, 140(sp) +; RV32-NEXT: addi a0, sp, 256 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 152(sp) +; RV32-NEXT: sw a1, 156(sp) +; RV32-NEXT: addi a0, sp, 256 +; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 2 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: call llround +; RV32-NEXT: sw a0, 144(sp) +; RV32-NEXT: sw a1, 148(sp) +; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: addi sp, s0, -272 +; RV32-NEXT: .cfi_def_cfa sp, 272 +; RV32-NEXT: lw ra, 268(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 264(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: addi sp, sp, 272 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: llround_v8i64_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -192 +; RV64-NEXT: .cfi_def_cfa_offset 192 +; RV64-NEXT: sd ra, 184(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 176(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 192 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: mv a0, sp +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: fld fa5, 56(sp) +; RV64-NEXT: vfmv.f.s fa4, v8 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 1 +; RV64-NEXT: fcvt.l.d a0, fa4, rmm +; RV64-NEXT: fcvt.l.d a1, fa5, rmm +; RV64-NEXT: sd a1, 120(sp) +; RV64-NEXT: fld fa5, 48(sp) +; RV64-NEXT: vfmv.f.s fa4, v10 +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 3 +; RV64-NEXT: fcvt.l.d a1, fa4, rmm +; RV64-NEXT: fcvt.l.d a2, fa5, rmm +; RV64-NEXT: sd a2, 112(sp) +; RV64-NEXT: fld fa5, 40(sp) +; RV64-NEXT: vfmv.f.s fa4, v10 +; RV64-NEXT: fcvt.l.d a2, fa4, rmm +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: fcvt.l.d a3, fa5, rmm +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: sd a3, 104(sp) +; RV64-NEXT: fcvt.l.d a3, fa5, rmm +; RV64-NEXT: fld fa5, 32(sp) +; RV64-NEXT: sd a0, 64(sp) +; RV64-NEXT: sd a1, 72(sp) +; RV64-NEXT: sd a3, 80(sp) +; RV64-NEXT: sd a2, 88(sp) +; RV64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-NEXT: sd a0, 96(sp) +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi sp, s0, -192 +; RV64-NEXT: .cfi_def_cfa sp, 192 +; RV64-NEXT: ld ra, 184(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 176(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 +; RV64-NEXT: addi sp, sp, 192 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret + %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x) + ret <8 x i64> %a +} +declare <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll new file mode 100644 index 0000000000000..ca2b221828990 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll @@ -0,0 +1,926 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \ +; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \ +; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \ +; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64 + +define <1 x iXLen> @lround_v1f32(<1 x float> %x) { +; RV32-LABEL: lround_v1f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vmv.s.x v8, a0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v1f32: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vmv.s.x v8, a0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v1f32: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-i64-NEXT: vmv.s.x v8, a0 +; RV64-i64-NEXT: ret + %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float> %x) + ret <1 x iXLen> %a +} +declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>) + +define <2 x iXLen> @lround_v2f32(<2 x float> %x) { +; RV32-LABEL: lround_v2f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v9 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v2f32: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v9 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: vmv.v.x v8, a0 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v2f32: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v9 +; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-i64-NEXT: vmv.v.x v8, a0 +; RV64-i64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i64-NEXT: ret + %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float> %x) + ret <2 x iXLen> %a +} +declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>) + +define <3 x iXLen> @lround_v3f32(<3 x float> %x) { +; RV32-LABEL: lround_v3f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vslidedown.vi v10, v8, 2 +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v9 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v10 +; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vslide1down.vx v8, v9, a1 +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v3f32: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v9 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v10 +; RV64-i32-NEXT: vmv.v.x v9, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vslide1down.vx v8, v9, a1 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v3f32: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i64-NEXT: vslidedown.vi v11, v8, 3 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v9 +; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-i64-NEXT: vmv.v.x v8, a0 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64-i64-NEXT: vfmv.f.s fa5, v11 +; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-i64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i64-NEXT: ret + %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float> %x) + ret <3 x iXLen> %a +} +declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>) + +define <4 x iXLen> @lround_v4f32(<4 x float> %x) { +; RV32-LABEL: lround_v4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vslidedown.vi v10, v8, 2 +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v9 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v10 +; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vslide1down.vx v8, v9, a1 +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v4f32: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v9 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v10 +; RV64-i32-NEXT: vmv.v.x v9, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vslide1down.vx v8, v9, a1 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v4f32: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i64-NEXT: vslidedown.vi v11, v8, 3 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v9 +; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-i64-NEXT: vmv.v.x v8, a0 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64-i64-NEXT: vfmv.f.s fa5, v11 +; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-i64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i64-NEXT: ret + %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float> %x) + ret <4 x iXLen> %a +} +declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>) + +define <8 x iXLen> @lround_v8f32(<8 x float> %x) { +; RV32-LABEL: lround_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vslidedown.vi v11, v8, 2 +; RV32-NEXT: vslidedown.vi v12, v8, 3 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v10 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v11 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v12 +; RV32-NEXT: vslidedown.vi v12, v8, 4 +; RV32-NEXT: vslide1down.vx v10, v10, a1 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v12 +; RV32-NEXT: vslidedown.vi v12, v8, 5 +; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v12 +; RV32-NEXT: vslidedown.vi v12, v8, 6 +; RV32-NEXT: vslidedown.vi v8, v8, 7 +; RV32-NEXT: vslide1down.vx v10, v10, a1 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v12 +; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vslide1down.vx v8, v10, a1 +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v8f32: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v10, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vslidedown.vi v11, v8, 2 +; RV64-i32-NEXT: vslidedown.vi v12, v8, 3 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v10 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v11 +; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64-i32-NEXT: vmv.v.x v10, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v12 +; RV64-i32-NEXT: vslidedown.vi v12, v8, 4 +; RV64-i32-NEXT: vslide1down.vx v10, v10, a1 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v12 +; RV64-i32-NEXT: vslidedown.vi v12, v8, 5 +; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v12 +; RV64-i32-NEXT: vslidedown.vi v12, v8, 6 +; RV64-i32-NEXT: vslidedown.vi v8, v8, 7 +; RV64-i32-NEXT: vslide1down.vx v10, v10, a1 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v12 +; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vslide1down.vx v8, v10, a1 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v8f32: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: addi sp, sp, -128 +; RV64-i64-NEXT: .cfi_def_cfa_offset 128 +; RV64-i64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64-i64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64-i64-NEXT: .cfi_offset ra, -8 +; RV64-i64-NEXT: .cfi_offset s0, -16 +; RV64-i64-NEXT: addi s0, sp, 128 +; RV64-i64-NEXT: .cfi_def_cfa s0, 0 +; RV64-i64-NEXT: andi sp, sp, -64 +; RV64-i64-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 7 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 6 +; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 5 +; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 4 +; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v9, v8, 3 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i64-NEXT: vslidedown.vi v8, v8, 1 +; RV64-i64-NEXT: fcvt.l.s a4, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v9 +; RV64-i64-NEXT: fcvt.l.s a5, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: fcvt.l.s a6, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: sd a4, 32(sp) +; RV64-i64-NEXT: sd a3, 40(sp) +; RV64-i64-NEXT: sd a2, 48(sp) +; RV64-i64-NEXT: sd a1, 56(sp) +; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-i64-NEXT: sd a0, 0(sp) +; RV64-i64-NEXT: sd a1, 8(sp) +; RV64-i64-NEXT: sd a6, 16(sp) +; RV64-i64-NEXT: sd a5, 24(sp) +; RV64-i64-NEXT: mv a0, sp +; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-i64-NEXT: vle64.v v8, (a0) +; RV64-i64-NEXT: addi sp, s0, -128 +; RV64-i64-NEXT: .cfi_def_cfa sp, 128 +; RV64-i64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64-i64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64-i64-NEXT: .cfi_restore ra +; RV64-i64-NEXT: .cfi_restore s0 +; RV64-i64-NEXT: addi sp, sp, 128 +; RV64-i64-NEXT: .cfi_def_cfa_offset 0 +; RV64-i64-NEXT: ret + %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x) + ret <8 x iXLen> %a +} +declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>) + +define <16 x iXLen> @lround_v16f32(<16 x float> %x) { +; RV32-LABEL: lround_v16f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -192 +; RV32-NEXT: .cfi_def_cfa_offset 192 +; RV32-NEXT: sw ra, 188(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 184(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 192 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: mv a0, sp +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: flw fa5, 60(sp) +; RV32-NEXT: vfmv.f.s fa4, v8 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 3 +; RV32-NEXT: vslidedown.vi v11, v8, 2 +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: sw a0, 124(sp) +; RV32-NEXT: flw fa5, 56(sp) +; RV32-NEXT: fcvt.w.s a0, fa4, rmm +; RV32-NEXT: vfmv.f.s fa4, v10 +; RV32-NEXT: vslidedown.vi v10, v8, 1 +; RV32-NEXT: fcvt.w.s a1, fa5, rmm +; RV32-NEXT: sw a1, 120(sp) +; RV32-NEXT: flw fa5, 52(sp) +; RV32-NEXT: fcvt.w.s a1, fa4, rmm +; RV32-NEXT: vfmv.f.s fa4, v11 +; RV32-NEXT: fcvt.w.s a2, fa4, rmm +; RV32-NEXT: fcvt.w.s a3, fa5, rmm +; RV32-NEXT: sw a3, 116(sp) +; RV32-NEXT: flw fa5, 48(sp) +; RV32-NEXT: vfmv.f.s fa4, v10 +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 7 +; RV32-NEXT: fcvt.w.s a3, fa4, rmm +; RV32-NEXT: fcvt.w.s a4, fa5, rmm +; RV32-NEXT: sw a4, 112(sp) +; RV32-NEXT: flw fa5, 44(sp) +; RV32-NEXT: vfmv.f.s fa4, v10 +; RV32-NEXT: vslidedown.vi v10, v8, 6 +; RV32-NEXT: fcvt.w.s a4, fa4, rmm +; RV32-NEXT: fcvt.w.s a5, fa5, rmm +; RV32-NEXT: sw a5, 108(sp) +; RV32-NEXT: flw fa5, 40(sp) +; RV32-NEXT: vfmv.f.s fa4, v10 +; RV32-NEXT: vslidedown.vi v10, v8, 5 +; RV32-NEXT: fcvt.w.s a5, fa4, rmm +; RV32-NEXT: fcvt.w.s a6, fa5, rmm +; RV32-NEXT: sw a6, 104(sp) +; RV32-NEXT: flw fa5, 36(sp) +; RV32-NEXT: vfmv.f.s fa4, v10 +; RV32-NEXT: fcvt.w.s a6, fa4, rmm +; RV32-NEXT: vslidedown.vi v8, v8, 4 +; RV32-NEXT: fcvt.w.s a7, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: sw a7, 100(sp) +; RV32-NEXT: fcvt.w.s a7, fa5, rmm +; RV32-NEXT: flw fa5, 32(sp) +; RV32-NEXT: sw a0, 64(sp) +; RV32-NEXT: sw a3, 68(sp) +; RV32-NEXT: sw a2, 72(sp) +; RV32-NEXT: sw a1, 76(sp) +; RV32-NEXT: sw a7, 80(sp) +; RV32-NEXT: sw a6, 84(sp) +; RV32-NEXT: sw a5, 88(sp) +; RV32-NEXT: sw a4, 92(sp) +; RV32-NEXT: fcvt.w.s a0, fa5, rmm +; RV32-NEXT: sw a0, 96(sp) +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: addi sp, s0, -192 +; RV32-NEXT: .cfi_def_cfa sp, 192 +; RV32-NEXT: lw ra, 188(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 184(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: addi sp, sp, 192 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v16f32: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: addi sp, sp, -192 +; RV64-i32-NEXT: .cfi_def_cfa_offset 192 +; RV64-i32-NEXT: sd ra, 184(sp) # 8-byte Folded Spill +; RV64-i32-NEXT: sd s0, 176(sp) # 8-byte Folded Spill +; RV64-i32-NEXT: .cfi_offset ra, -8 +; RV64-i32-NEXT: .cfi_offset s0, -16 +; RV64-i32-NEXT: addi s0, sp, 192 +; RV64-i32-NEXT: .cfi_def_cfa s0, 0 +; RV64-i32-NEXT: andi sp, sp, -64 +; RV64-i32-NEXT: mv a0, sp +; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64-i32-NEXT: vse32.v v8, (a0) +; RV64-i32-NEXT: flw fa5, 60(sp) +; RV64-i32-NEXT: vfmv.f.s fa4, v8 +; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v10, v8, 3 +; RV64-i32-NEXT: vslidedown.vi v11, v8, 2 +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: sw a0, 124(sp) +; RV64-i32-NEXT: flw fa5, 56(sp) +; RV64-i32-NEXT: fcvt.w.s a0, fa4, rmm +; RV64-i32-NEXT: vfmv.f.s fa4, v10 +; RV64-i32-NEXT: vslidedown.vi v10, v8, 1 +; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm +; RV64-i32-NEXT: sw a1, 120(sp) +; RV64-i32-NEXT: flw fa5, 52(sp) +; RV64-i32-NEXT: fcvt.w.s a1, fa4, rmm +; RV64-i32-NEXT: vfmv.f.s fa4, v11 +; RV64-i32-NEXT: fcvt.w.s a2, fa4, rmm +; RV64-i32-NEXT: fcvt.w.s a3, fa5, rmm +; RV64-i32-NEXT: sw a3, 116(sp) +; RV64-i32-NEXT: flw fa5, 48(sp) +; RV64-i32-NEXT: vfmv.f.s fa4, v10 +; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV64-i32-NEXT: vslidedown.vi v10, v8, 7 +; RV64-i32-NEXT: fcvt.w.s a3, fa4, rmm +; RV64-i32-NEXT: fcvt.w.s a4, fa5, rmm +; RV64-i32-NEXT: sw a4, 112(sp) +; RV64-i32-NEXT: flw fa5, 44(sp) +; RV64-i32-NEXT: vfmv.f.s fa4, v10 +; RV64-i32-NEXT: vslidedown.vi v10, v8, 6 +; RV64-i32-NEXT: fcvt.w.s a4, fa4, rmm +; RV64-i32-NEXT: fcvt.w.s a5, fa5, rmm +; RV64-i32-NEXT: sw a5, 108(sp) +; RV64-i32-NEXT: flw fa5, 40(sp) +; RV64-i32-NEXT: vfmv.f.s fa4, v10 +; RV64-i32-NEXT: vslidedown.vi v10, v8, 5 +; RV64-i32-NEXT: fcvt.w.s a5, fa4, rmm +; RV64-i32-NEXT: fcvt.w.s a6, fa5, rmm +; RV64-i32-NEXT: sw a6, 104(sp) +; RV64-i32-NEXT: flw fa5, 36(sp) +; RV64-i32-NEXT: vfmv.f.s fa4, v10 +; RV64-i32-NEXT: fcvt.w.s a6, fa4, rmm +; RV64-i32-NEXT: vslidedown.vi v8, v8, 4 +; RV64-i32-NEXT: fcvt.w.s a7, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: sw a7, 100(sp) +; RV64-i32-NEXT: fcvt.w.s a7, fa5, rmm +; RV64-i32-NEXT: flw fa5, 32(sp) +; RV64-i32-NEXT: sw a0, 64(sp) +; RV64-i32-NEXT: sw a3, 68(sp) +; RV64-i32-NEXT: sw a2, 72(sp) +; RV64-i32-NEXT: sw a1, 76(sp) +; RV64-i32-NEXT: sw a7, 80(sp) +; RV64-i32-NEXT: sw a6, 84(sp) +; RV64-i32-NEXT: sw a5, 88(sp) +; RV64-i32-NEXT: sw a4, 92(sp) +; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm +; RV64-i32-NEXT: sw a0, 96(sp) +; RV64-i32-NEXT: addi a0, sp, 64 +; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64-i32-NEXT: vle32.v v8, (a0) +; RV64-i32-NEXT: addi sp, s0, -192 +; RV64-i32-NEXT: .cfi_def_cfa sp, 192 +; RV64-i32-NEXT: ld ra, 184(sp) # 8-byte Folded Reload +; RV64-i32-NEXT: ld s0, 176(sp) # 8-byte Folded Reload +; RV64-i32-NEXT: .cfi_restore ra +; RV64-i32-NEXT: .cfi_restore s0 +; RV64-i32-NEXT: addi sp, sp, 192 +; RV64-i32-NEXT: .cfi_def_cfa_offset 0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v16f32: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: addi sp, sp, -384 +; RV64-i64-NEXT: .cfi_def_cfa_offset 384 +; RV64-i64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64-i64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64-i64-NEXT: .cfi_offset ra, -8 +; RV64-i64-NEXT: .cfi_offset s0, -16 +; RV64-i64-NEXT: addi s0, sp, 384 +; RV64-i64-NEXT: .cfi_def_cfa s0, 0 +; RV64-i64-NEXT: andi sp, sp, -128 +; RV64-i64-NEXT: addi a0, sp, 64 +; RV64-i64-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64-i64-NEXT: vse32.v v8, (a0) +; RV64-i64-NEXT: flw fa5, 124(sp) +; RV64-i64-NEXT: vfmv.f.s fa4, v8 +; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v10, v8, 3 +; RV64-i64-NEXT: vslidedown.vi v12, v8, 2 +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: sd a0, 248(sp) +; RV64-i64-NEXT: flw fa5, 120(sp) +; RV64-i64-NEXT: vslidedown.vi v13, v8, 1 +; RV64-i64-NEXT: fcvt.l.s a0, fa4, rmm +; RV64-i64-NEXT: vfmv.f.s fa4, v10 +; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm +; RV64-i64-NEXT: sd a1, 240(sp) +; RV64-i64-NEXT: flw fa5, 116(sp) +; RV64-i64-NEXT: vsetivli zero, 1, e32, m2, ta, ma +; RV64-i64-NEXT: vslidedown.vi v10, v8, 7 +; RV64-i64-NEXT: fcvt.l.s a1, fa4, rmm +; RV64-i64-NEXT: vfmv.f.s fa4, v12 +; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm +; RV64-i64-NEXT: sd a2, 232(sp) +; RV64-i64-NEXT: flw fa5, 112(sp) +; RV64-i64-NEXT: fcvt.l.s a2, fa4, rmm +; RV64-i64-NEXT: vfmv.f.s fa4, v13 +; RV64-i64-NEXT: vslidedown.vi v12, v8, 6 +; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm +; RV64-i64-NEXT: sd a3, 224(sp) +; RV64-i64-NEXT: flw fa5, 108(sp) +; RV64-i64-NEXT: fcvt.l.s a3, fa4, rmm +; RV64-i64-NEXT: vfmv.f.s fa4, v10 +; RV64-i64-NEXT: vslidedown.vi v10, v8, 5 +; RV64-i64-NEXT: fcvt.l.s a4, fa5, rmm +; RV64-i64-NEXT: sd a4, 216(sp) +; RV64-i64-NEXT: flw fa5, 104(sp) +; RV64-i64-NEXT: fcvt.l.s a4, fa4, rmm +; RV64-i64-NEXT: vfmv.f.s fa4, v12 +; RV64-i64-NEXT: fcvt.l.s a5, fa4, rmm +; RV64-i64-NEXT: fcvt.l.s a6, fa5, rmm +; RV64-i64-NEXT: sd a6, 208(sp) +; RV64-i64-NEXT: flw fa5, 100(sp) +; RV64-i64-NEXT: vfmv.f.s fa4, v10 +; RV64-i64-NEXT: fcvt.l.s a6, fa4, rmm +; RV64-i64-NEXT: vslidedown.vi v8, v8, 4 +; RV64-i64-NEXT: fcvt.l.s a7, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: sd a7, 200(sp) +; RV64-i64-NEXT: fcvt.l.s a7, fa5, rmm +; RV64-i64-NEXT: flw fa5, 96(sp) +; RV64-i64-NEXT: sd a0, 128(sp) +; RV64-i64-NEXT: sd a3, 136(sp) +; RV64-i64-NEXT: sd a2, 144(sp) +; RV64-i64-NEXT: sd a1, 152(sp) +; RV64-i64-NEXT: sd a7, 160(sp) +; RV64-i64-NEXT: sd a6, 168(sp) +; RV64-i64-NEXT: sd a5, 176(sp) +; RV64-i64-NEXT: sd a4, 184(sp) +; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm +; RV64-i64-NEXT: sd a0, 192(sp) +; RV64-i64-NEXT: addi a0, sp, 128 +; RV64-i64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-i64-NEXT: vle64.v v8, (a0) +; RV64-i64-NEXT: addi sp, s0, -384 +; RV64-i64-NEXT: .cfi_def_cfa sp, 384 +; RV64-i64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64-i64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64-i64-NEXT: .cfi_restore ra +; RV64-i64-NEXT: .cfi_restore s0 +; RV64-i64-NEXT: addi sp, sp, 384 +; RV64-i64-NEXT: .cfi_def_cfa_offset 0 +; RV64-i64-NEXT: ret + %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x) + ret <16 x iXLen> %a +} +declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>) + +define <1 x iXLen> @lround_v1f64(<1 x double> %x) { +; RV32-LABEL: lround_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: fcvt.w.d a0, fa5, rmm +; RV32-NEXT: vmv.s.x v8, a0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v1f64: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm +; RV64-i32-NEXT: vmv.s.x v8, a0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v1f64: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-i64-NEXT: vmv.s.x v8, a0 +; RV64-i64-NEXT: ret + %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double> %x) + ret <1 x iXLen> %a +} +declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>) + +define <2 x iXLen> @lround_v2f64(<2 x double> %x) { +; RV32-LABEL: lround_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: fcvt.w.d a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v9 +; RV32-NEXT: fcvt.w.d a1, fa5, rmm +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v2f64: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v9 +; RV64-i32-NEXT: fcvt.w.d a1, fa5, rmm +; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-i32-NEXT: vmv.v.x v8, a0 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v2f64: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v9 +; RV64-i64-NEXT: fcvt.l.d a1, fa5, rmm +; RV64-i64-NEXT: vmv.v.x v8, a0 +; RV64-i64-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i64-NEXT: ret + %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double> %x) + ret <2 x iXLen> %a +} +declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>) + +define <4 x iXLen> @lround_v4f64(<4 x double> %x) { +; RV32-LABEL: lround_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vslidedown.vi v12, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 2 +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: fcvt.w.d a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v12 +; RV32-NEXT: fcvt.w.d a1, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v10 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: fcvt.w.d a0, fa5, rmm +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV32-NEXT: vslide1down.vx v8, v9, a1 +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: fcvt.w.d a0, fa5, rmm +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v4f64: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v12, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-i32-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 +; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v12 +; RV64-i32-NEXT: fcvt.w.d a1, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v10 +; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-i32-NEXT: vmv.v.x v9, a0 +; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm +; RV64-i32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64-i32-NEXT: vslide1down.vx v8, v9, a1 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v4f64: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v12, v8, 1 +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-i64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-i64-NEXT: vslidedown.vi v8, v8, 3 +; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v12 +; RV64-i64-NEXT: fcvt.l.d a1, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v10 +; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-i64-NEXT: vmv.v.x v10, a0 +; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: vslide1down.vx v8, v10, a1 +; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i64-NEXT: ret + %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double> %x) + ret <4 x iXLen> %a +} +declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>) + +define <8 x iXLen> @lround_v8f64(<8 x double> %x) { +; RV32-LABEL: lround_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -128 +; RV32-NEXT: .cfi_def_cfa_offset 128 +; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 128 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: mv a0, sp +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32-NEXT: vslidedown.vi v14, v8, 1 +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v12, v8, 2 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: vfmv.f.s fa4, v14 +; RV32-NEXT: fcvt.w.d a0, fa5, rmm +; RV32-NEXT: vfmv.f.s fa5, v12 +; RV32-NEXT: vfmv.f.s fa3, v8 +; RV32-NEXT: fcvt.w.d a1, fa4, rmm +; RV32-NEXT: fcvt.w.d a2, fa5, rmm +; RV32-NEXT: fcvt.w.d a3, fa3, rmm +; RV32-NEXT: fld fa5, 32(sp) +; RV32-NEXT: fld fa4, 40(sp) +; RV32-NEXT: fld fa3, 48(sp) +; RV32-NEXT: fld fa2, 56(sp) +; RV32-NEXT: fcvt.w.d a4, fa5, rmm +; RV32-NEXT: fcvt.w.d a5, fa4, rmm +; RV32-NEXT: fcvt.w.d a6, fa3, rmm +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: vslide1down.vx v8, v8, a2 +; RV32-NEXT: vslide1down.vx v8, v8, a3 +; RV32-NEXT: vslide1down.vx v8, v8, a4 +; RV32-NEXT: vslide1down.vx v8, v8, a5 +; RV32-NEXT: vslide1down.vx v8, v8, a6 +; RV32-NEXT: fcvt.w.d a0, fa2, rmm +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: addi sp, s0, -128 +; RV32-NEXT: .cfi_def_cfa sp, 128 +; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: addi sp, sp, 128 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-i32-LABEL: lround_v8f64: +; RV64-i32: # %bb.0: +; RV64-i32-NEXT: addi sp, sp, -128 +; RV64-i32-NEXT: .cfi_def_cfa_offset 128 +; RV64-i32-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64-i32-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64-i32-NEXT: .cfi_offset ra, -8 +; RV64-i32-NEXT: .cfi_offset s0, -16 +; RV64-i32-NEXT: addi s0, sp, 128 +; RV64-i32-NEXT: .cfi_def_cfa s0, 0 +; RV64-i32-NEXT: andi sp, sp, -64 +; RV64-i32-NEXT: mv a0, sp +; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i32-NEXT: vslidedown.vi v14, v8, 1 +; RV64-i32-NEXT: vfmv.f.s fa5, v8 +; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-i32-NEXT: vslidedown.vi v12, v8, 2 +; RV64-i32-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-i32-NEXT: vse64.v v8, (a0) +; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 +; RV64-i32-NEXT: vfmv.f.s fa4, v14 +; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm +; RV64-i32-NEXT: vfmv.f.s fa5, v12 +; RV64-i32-NEXT: vfmv.f.s fa3, v8 +; RV64-i32-NEXT: fcvt.w.d a1, fa4, rmm +; RV64-i32-NEXT: fcvt.w.d a2, fa5, rmm +; RV64-i32-NEXT: fcvt.w.d a3, fa3, rmm +; RV64-i32-NEXT: fld fa5, 32(sp) +; RV64-i32-NEXT: fld fa4, 40(sp) +; RV64-i32-NEXT: fld fa3, 48(sp) +; RV64-i32-NEXT: fld fa2, 56(sp) +; RV64-i32-NEXT: fcvt.w.d a4, fa5, rmm +; RV64-i32-NEXT: fcvt.w.d a5, fa4, rmm +; RV64-i32-NEXT: fcvt.w.d a6, fa3, rmm +; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64-i32-NEXT: vmv.v.x v8, a0 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a1 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a2 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a3 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a4 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a5 +; RV64-i32-NEXT: vslide1down.vx v8, v8, a6 +; RV64-i32-NEXT: fcvt.w.d a0, fa2, rmm +; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 +; RV64-i32-NEXT: addi sp, s0, -128 +; RV64-i32-NEXT: .cfi_def_cfa sp, 128 +; RV64-i32-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64-i32-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64-i32-NEXT: .cfi_restore ra +; RV64-i32-NEXT: .cfi_restore s0 +; RV64-i32-NEXT: addi sp, sp, 128 +; RV64-i32-NEXT: .cfi_def_cfa_offset 0 +; RV64-i32-NEXT: ret +; +; RV64-i64-LABEL: lround_v8f64: +; RV64-i64: # %bb.0: +; RV64-i64-NEXT: addi sp, sp, -192 +; RV64-i64-NEXT: .cfi_def_cfa_offset 192 +; RV64-i64-NEXT: sd ra, 184(sp) # 8-byte Folded Spill +; RV64-i64-NEXT: sd s0, 176(sp) # 8-byte Folded Spill +; RV64-i64-NEXT: .cfi_offset ra, -8 +; RV64-i64-NEXT: .cfi_offset s0, -16 +; RV64-i64-NEXT: addi s0, sp, 192 +; RV64-i64-NEXT: .cfi_def_cfa s0, 0 +; RV64-i64-NEXT: andi sp, sp, -64 +; RV64-i64-NEXT: mv a0, sp +; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-i64-NEXT: vse64.v v8, (a0) +; RV64-i64-NEXT: fld fa5, 56(sp) +; RV64-i64-NEXT: vfmv.f.s fa4, v8 +; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-i64-NEXT: vslidedown.vi v10, v8, 1 +; RV64-i64-NEXT: fcvt.l.d a0, fa4, rmm +; RV64-i64-NEXT: fcvt.l.d a1, fa5, rmm +; RV64-i64-NEXT: sd a1, 120(sp) +; RV64-i64-NEXT: fld fa5, 48(sp) +; RV64-i64-NEXT: vfmv.f.s fa4, v10 +; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-i64-NEXT: vslidedown.vi v10, v8, 3 +; RV64-i64-NEXT: fcvt.l.d a1, fa4, rmm +; RV64-i64-NEXT: fcvt.l.d a2, fa5, rmm +; RV64-i64-NEXT: sd a2, 112(sp) +; RV64-i64-NEXT: fld fa5, 40(sp) +; RV64-i64-NEXT: vfmv.f.s fa4, v10 +; RV64-i64-NEXT: fcvt.l.d a2, fa4, rmm +; RV64-i64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-i64-NEXT: fcvt.l.d a3, fa5, rmm +; RV64-i64-NEXT: vfmv.f.s fa5, v8 +; RV64-i64-NEXT: sd a3, 104(sp) +; RV64-i64-NEXT: fcvt.l.d a3, fa5, rmm +; RV64-i64-NEXT: fld fa5, 32(sp) +; RV64-i64-NEXT: sd a0, 64(sp) +; RV64-i64-NEXT: sd a1, 72(sp) +; RV64-i64-NEXT: sd a3, 80(sp) +; RV64-i64-NEXT: sd a2, 88(sp) +; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm +; RV64-i64-NEXT: sd a0, 96(sp) +; RV64-i64-NEXT: addi a0, sp, 64 +; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-i64-NEXT: vle64.v v8, (a0) +; RV64-i64-NEXT: addi sp, s0, -192 +; RV64-i64-NEXT: .cfi_def_cfa sp, 192 +; RV64-i64-NEXT: ld ra, 184(sp) # 8-byte Folded Reload +; RV64-i64-NEXT: ld s0, 176(sp) # 8-byte Folded Reload +; RV64-i64-NEXT: .cfi_restore ra +; RV64-i64-NEXT: .cfi_restore s0 +; RV64-i64-NEXT: addi sp, sp, 192 +; RV64-i64-NEXT: .cfi_def_cfa_offset 0 +; RV64-i64-NEXT: ret + %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x) + ret <8 x iXLen> %a +} +declare <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double>)