diff --git a/bolt/include/bolt/Core/Linker.h b/bolt/include/bolt/Core/Linker.h index 1e0876a0e13d9..66b3ad18e3c7b 100644 --- a/bolt/include/bolt/Core/Linker.h +++ b/bolt/include/bolt/Core/Linker.h @@ -46,13 +46,6 @@ class BOLTLinker { /// Return the address and size of a symbol or std::nullopt if it cannot be /// found. virtual std::optional lookupSymbolInfo(StringRef Name) const = 0; - - /// Return the address of a symbol or std::nullopt if it cannot be found. - std::optional lookupSymbol(StringRef Name) const { - if (const auto Info = lookupSymbolInfo(Name)) - return Info->Address; - return std::nullopt; - } }; } // namespace bolt diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp index 1e427b2df11cf..8385551576098 100644 --- a/bolt/lib/Core/BinaryFunction.cpp +++ b/bolt/lib/Core/BinaryFunction.cpp @@ -4259,10 +4259,10 @@ void BinaryFunction::updateOutputValues(const BOLTLinker &Linker) { if (BC.HasRelocations || isInjected()) { if (hasConstantIsland()) { - const auto DataAddress = - Linker.lookupSymbol(getFunctionConstantIslandLabel()->getName()); - assert(DataAddress && "Cannot find function CI symbol"); - setOutputDataAddress(*DataAddress); + const auto IslandLabelSymInfo = + Linker.lookupSymbolInfo(getFunctionConstantIslandLabel()->getName()); + assert(IslandLabelSymInfo && "Cannot find function CI symbol"); + setOutputDataAddress(IslandLabelSymInfo->Address); for (auto It : Islands->Offsets) { const uint64_t OldOffset = It.first; BinaryData *BD = BC.getBinaryDataAtAddress(getAddress() + OldOffset); @@ -4270,10 +4270,10 @@ void BinaryFunction::updateOutputValues(const BOLTLinker &Linker) { continue; MCSymbol *Symbol = It.second; - const auto NewAddress = Linker.lookupSymbol(Symbol->getName()); - assert(NewAddress && "Cannot find CI symbol"); + const auto SymInfo = Linker.lookupSymbolInfo(Symbol->getName()); + assert(SymInfo && "Cannot find CI symbol"); auto &Section = *getCodeSection(); - const auto NewOffset = *NewAddress - Section.getOutputAddress(); + const auto NewOffset = SymInfo->Address - Section.getOutputAddress(); BD->setOutputLocation(Section, NewOffset); } } @@ -4298,10 +4298,10 @@ void BinaryFunction::updateOutputValues(const BOLTLinker &Linker) { FF.setAddress(ColdStartSymbolInfo->Address); FF.setImageSize(ColdStartSymbolInfo->Size); if (hasConstantIsland()) { - const auto DataAddress = Linker.lookupSymbol( + const auto SymInfo = Linker.lookupSymbolInfo( getFunctionColdConstantIslandLabel()->getName()); - assert(DataAddress && "Cannot find cold CI symbol"); - setOutputColdDataAddress(*DataAddress); + assert(SymInfo && "Cannot find cold CI symbol"); + setOutputColdDataAddress(SymInfo->Address); } } } diff --git a/bolt/lib/Rewrite/JITLinkLinker.cpp b/bolt/lib/Rewrite/JITLinkLinker.cpp index ba483ae4711df..c287dc002623d 100644 --- a/bolt/lib/Rewrite/JITLinkLinker.cpp +++ b/bolt/lib/Rewrite/JITLinkLinker.cpp @@ -125,11 +125,11 @@ struct JITLinkLinker::Context : jitlink::JITLinkContext { std::string SymName = (*Symbol.first).str(); LLVM_DEBUG(dbgs() << "BOLT: looking for " << SymName << "\n"); - if (auto Address = Linker.lookupSymbol(SymName)) { + if (auto SymInfo = Linker.lookupSymbolInfo(SymName)) { LLVM_DEBUG(dbgs() << "Resolved to address 0x" - << Twine::utohexstr(*Address) << "\n"); + << Twine::utohexstr(SymInfo->Address) << "\n"); AllResults[Symbol.first] = orc::ExecutorSymbolDef( - orc::ExecutorAddr(*Address), JITSymbolFlags()); + orc::ExecutorAddr(SymInfo->Address), JITSymbolFlags()); continue; } diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp index 4329235d47049..40f214f840772 100644 --- a/bolt/lib/Rewrite/RewriteInstance.cpp +++ b/bolt/lib/Rewrite/RewriteInstance.cpp @@ -5907,9 +5907,9 @@ void RewriteInstance::writeEHFrameHeader() { } uint64_t RewriteInstance::getNewValueForSymbol(const StringRef Name) { - auto Value = Linker->lookupSymbol(Name); + auto Value = Linker->lookupSymbolInfo(Name); if (Value) - return *Value; + return Value->Address; // Return the original value if we haven't emitted the symbol. BinaryData *BD = BC->getBinaryDataByName(Name); diff --git a/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp index 026f8d35c55c6..059b1239d806b 100644 --- a/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp +++ b/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp @@ -68,10 +68,11 @@ void HugifyRuntimeLibrary::link(BinaryContext &BC, StringRef ToolPath, assert(!RuntimeStartAddress && "We don't currently support linking multiple runtime libraries"); - RuntimeStartAddress = Linker.lookupSymbol("__bolt_hugify_self").value_or(0); - if (!RuntimeStartAddress) { + auto StartSymInfo = Linker.lookupSymbolInfo("__bolt_hugify_self"); + if (!StartSymInfo) { errs() << "BOLT-ERROR: hugify library does not define __bolt_hugify_self: " << LibPath << "\n"; exit(1); } + RuntimeStartAddress = StartSymInfo->Address; } diff --git a/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp index 217b4f23e8572..d6d6ebecd3ec5 100644 --- a/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp +++ b/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp @@ -203,27 +203,35 @@ void InstrumentationRuntimeLibrary::link( if (BC.isMachO()) return; - RuntimeFiniAddress = Linker.lookupSymbol("__bolt_instr_fini").value_or(0); - if (!RuntimeFiniAddress) { + std::optional FiniSymInfo = + Linker.lookupSymbolInfo("__bolt_instr_fini"); + if (!FiniSymInfo) { errs() << "BOLT-ERROR: instrumentation library does not define " "__bolt_instr_fini: " << LibPath << "\n"; exit(1); } - RuntimeStartAddress = Linker.lookupSymbol("__bolt_instr_start").value_or(0); - if (!RuntimeStartAddress) { + RuntimeFiniAddress = FiniSymInfo->Address; + + std::optional StartSymInfo = + Linker.lookupSymbolInfo("__bolt_instr_start"); + if (!StartSymInfo) { errs() << "BOLT-ERROR: instrumentation library does not define " "__bolt_instr_start: " << LibPath << "\n"; exit(1); } + RuntimeStartAddress = StartSymInfo->Address; + outs() << "BOLT-INFO: output linked against instrumentation runtime " "library, lib entry point is 0x" << Twine::utohexstr(RuntimeStartAddress) << "\n"; + + std::optional ClearSymInfo = + Linker.lookupSymbolInfo("__bolt_instr_clear_counters"); + const uint64_t ClearSymAddress = ClearSymInfo ? ClearSymInfo->Address : 0; outs() << "BOLT-INFO: clear procedure is 0x" - << Twine::utohexstr( - Linker.lookupSymbol("__bolt_instr_clear_counters").value_or(0)) - << "\n"; + << Twine::utohexstr(ClearSymAddress) << "\n"; emitTablesAsELFNote(BC); } diff --git a/clang/include/clang/Basic/Builtins.h b/clang/include/clang/Basic/Builtins.h index 6d29b4315e5a7..d1c1ab44417fa 100644 --- a/clang/include/clang/Basic/Builtins.h +++ b/clang/include/clang/Basic/Builtins.h @@ -408,7 +408,8 @@ class Context { unsigned getRequiredVectorWidth(unsigned ID) const; - /// Return true if builtin ID belongs to AuxTarget. + /// Return true if the builtin ID belongs exclusively to the AuxTarget, + /// and false if it belongs to both primary and aux target, or neither. bool isAuxBuiltinID(unsigned ID) const { return ID >= (Builtin::FirstTSBuiltin + NumTargetBuiltins); } diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp index 3e51b4aab1082..d49128c2b40f8 100644 --- a/clang/lib/Format/ContinuationIndenter.cpp +++ b/clang/lib/Format/ContinuationIndenter.cpp @@ -473,9 +473,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { (State.Column + State.Line->Last->TotalLength - Previous.TotalLength > getColumnLimit(State) || CurrentState.BreakBeforeParameter) && - (!Current.isTrailingComment() || Current.NewlinesBefore > 0) && - (Style.BreakConstructorInitializers != FormatStyle::BCIS_BeforeColon || - Style.ColumnLimit > 0 || Current.NewlinesBefore > 0)) { + ((!Current.isTrailingComment() && Style.ColumnLimit > 0) || + Current.NewlinesBefore > 0)) { return true; } diff --git a/clang/lib/Index/IndexTypeSourceInfo.cpp b/clang/lib/Index/IndexTypeSourceInfo.cpp index b986ccde57452..d5d0a3c422871 100644 --- a/clang/lib/Index/IndexTypeSourceInfo.cpp +++ b/clang/lib/Index/IndexTypeSourceInfo.cpp @@ -11,6 +11,7 @@ #include "clang/AST/PrettyPrinter.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" +#include "clang/Sema/HeuristicResolver.h" #include "llvm/ADT/ScopeExit.h" using namespace clang; @@ -207,27 +208,8 @@ class TypeIndexer : public RecursiveASTVisitor { } bool VisitDependentNameTypeLoc(DependentNameTypeLoc TL) { - const DependentNameType *DNT = TL.getTypePtr(); - const NestedNameSpecifier *NNS = DNT->getQualifier(); - const Type *T = NNS->getAsType(); - if (!T) - return true; - const TemplateSpecializationType *TST = - T->getAs(); - if (!TST) - return true; - TemplateName TN = TST->getTemplateName(); - const ClassTemplateDecl *TD = - dyn_cast_or_null(TN.getAsTemplateDecl()); - if (!TD) - return true; - CXXRecordDecl *RD = TD->getTemplatedDecl(); - if (!RD->hasDefinition()) - return true; - RD = RD->getDefinition(); - DeclarationName Name(DNT->getIdentifier()); - std::vector Symbols = RD->lookupDependentName( - Name, [](const NamedDecl *ND) { return isa(ND); }); + std::vector Symbols = + IndexCtx.getResolver()->resolveDependentNameType(TL.getTypePtr()); if (Symbols.size() != 1) return true; return IndexCtx.handleReference(Symbols[0], TL.getNameLoc(), Parent, diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index d95763b22a819..32cc9d33730d5 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -4803,7 +4803,8 @@ bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) { (New->getFormalLinkage() == Linkage::Internal || New->isInline() || isa(New) || New->getDescribedVarTemplate() || New->getNumTemplateParameterLists() || - New->getDeclContext()->isDependentContext())) { + New->getDeclContext()->isDependentContext() || + New->hasAttr())) { // The previous definition is hidden, and multiple definitions are // permitted (in separate TUs). Demote this to a declaration. New->demoteThisDefinitionToDeclaration(); diff --git a/clang/test/Modules/pr127943.cppm b/clang/test/Modules/pr127943.cppm new file mode 100644 index 0000000000000..7cc3be6903e6a --- /dev/null +++ b/clang/test/Modules/pr127943.cppm @@ -0,0 +1,31 @@ +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -std=c++20 %t/repro.cppm -fdeclspec -emit-module-interface -o %t/repro.pcm +// RUN: %clang_cc1 -std=c++20 %t/source.cpp -fdeclspec -fsyntax-only -verify -fprebuilt-module-path=%t + +//--- repro_decl.hpp +#pragma once + +extern "C" +{ + __declspec(selectany) int foo = 0; +} + +//--- repro.cppm +module; +#include "repro_decl.hpp" + +export module repro; + +export inline int func() +{ + return foo; +} + +//--- source.cpp +// expected-no-diagnostics +import repro; + +#include "repro_decl.hpp" diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp index d6d028436d39c..132264486100d 100644 --- a/clang/unittests/Format/FormatTest.cpp +++ b/clang/unittests/Format/FormatTest.cpp @@ -8292,31 +8292,40 @@ TEST_F(FormatTest, BreakConstructorInitializersAfterColon) { Style); Style.ColumnLimit = 0; - verifyFormat("SomeClass::Constructor() :\n" - " a(a) {}", - Style); - verifyFormat("SomeClass::Constructor() noexcept :\n" - " a(a) {}", - Style); - verifyFormat("SomeClass::Constructor() :\n" - " a(a), b(b), c(c) {}", - Style); - verifyFormat("SomeClass::Constructor() :\n" - " a(a) {\n" - " foo();\n" - " bar();\n" - "}", + verifyNoChange("SomeClass::Constructor() :\n" + " a(a) {}", + Style); + verifyNoChange("SomeClass::Constructor() noexcept :\n" + " a(a) {}", + Style); + verifyNoChange("SomeClass::Constructor() :\n" + " a(a), b(b), c(c) {}", + Style); + verifyNoChange("SomeClass::Constructor() :\n" + " a(a) {\n" + " foo();\n" + " bar();\n" + "}", + Style); + verifyFormat("struct Foo {\n" + " int x;\n" + " Foo() : x(0) {}\n" + "};", + "struct Foo {\n" + " int x;\n" + " Foo():x(0) {}\n" + "};", Style); Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None; - verifyFormat("SomeClass::Constructor() :\n" - " a(a), b(b), c(c) {\n" - "}", - Style); - verifyFormat("SomeClass::Constructor() :\n" - " a(a) {\n" - "}", - Style); + verifyNoChange("SomeClass::Constructor() :\n" + " a(a), b(b), c(c) {\n" + "}", + Style); + verifyNoChange("SomeClass::Constructor() :\n" + " a(a) {\n" + "}", + Style); Style.ColumnLimit = 80; Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All; diff --git a/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll b/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll index 1c375bcf1e62f..da32703a7ab1c 100644 --- a/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll +++ b/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll @@ -9,8 +9,9 @@ ; RUN: %clang -c -o %t/bar.o %S/Inputs/bar-ret-void-weak.ll ; RUN: %clang -c -o %t/baz.o %S/Inputs/baz-ret-void-hidden.ll ; RUN: %clang -c -o %t/main.o %s -; RUN: %llvm_jitlink -noexec -show-linked-files %t/main.o -lazy %t/foo.o \ -; RUN: -lazy %t/x.o -lazy %t/bar.o -lazy %t/baz.o | FileCheck %s +; RUN: %llvm_jitlink -num-threads=0 -noexec -show-linked-files %t/main.o \ +; RUN: -lazy %t/foo.o -lazy %t/x.o -lazy %t/bar.o -lazy %t/baz.o \ +; RUN: | FileCheck %s ; ; UNSUPPORTED: system-windows ; REQUIRES: target={{(arm|aarch|x86_)64.*}} diff --git a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h index caec6a913293f..b679ef74870b1 100644 --- a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h +++ b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h @@ -336,6 +336,7 @@ struct IntrinsicLibrary { template mlir::Value genMask(mlir::Type, llvm::ArrayRef); mlir::Value genMatchAllSync(mlir::Type, llvm::ArrayRef); + mlir::Value genMatchAnySync(mlir::Type, llvm::ArrayRef); fir::ExtendedValue genMatmul(mlir::Type, llvm::ArrayRef); fir::ExtendedValue genMatmulTranspose(mlir::Type, llvm::ArrayRef); diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 1b24ed12e04f1..f824e4c621c8e 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -3114,50 +3114,127 @@ class FirConverter : public Fortran::lower::AbstractConverter { llvm::SmallVector ivValues; Fortran::lower::pft::Evaluation *loopEval = &getEval().getFirstNestedEvaluation(); - for (unsigned i = 0; i < nestedLoops; ++i) { - const Fortran::parser::LoopControl *loopControl; - mlir::Location crtLoc = loc; - if (i == 0) { - loopControl = &*outerDoConstruct->GetLoopControl(); - crtLoc = - genLocation(Fortran::parser::FindSourceLocation(outerDoConstruct)); - } else { - auto *doCons = loopEval->getIf(); - assert(doCons && "expect do construct"); - loopControl = &*doCons->GetLoopControl(); - crtLoc = genLocation(Fortran::parser::FindSourceLocation(*doCons)); + if (outerDoConstruct->IsDoConcurrent()) { + // Handle DO CONCURRENT + locs.push_back( + genLocation(Fortran::parser::FindSourceLocation(outerDoConstruct))); + const Fortran::parser::LoopControl *loopControl = + &*outerDoConstruct->GetLoopControl(); + const auto &concurrent = + std::get(loopControl->u); + + if (!std::get>(concurrent.t) + .empty()) + TODO(loc, "DO CONCURRENT with locality spec"); + + const auto &concurrentHeader = + std::get(concurrent.t); + const auto &controls = + std::get>( + concurrentHeader.t); + + for (const auto &control : controls) { + mlir::Value lb = fir::getBase(genExprValue( + *Fortran::semantics::GetExpr(std::get<1>(control.t)), stmtCtx)); + mlir::Value ub = fir::getBase(genExprValue( + *Fortran::semantics::GetExpr(std::get<2>(control.t)), stmtCtx)); + mlir::Value step; + + if (const auto &expr = + std::get>( + control.t)) + step = fir::getBase( + genExprValue(*Fortran::semantics::GetExpr(*expr), stmtCtx)); + else + step = builder->create( + loc, 1); // Use index type directly + + // Ensure lb, ub, and step are of index type using fir.convert + mlir::Type indexType = builder->getIndexType(); + lb = builder->create(loc, indexType, lb); + ub = builder->create(loc, indexType, ub); + step = builder->create(loc, indexType, step); + + lbs.push_back(lb); + ubs.push_back(ub); + steps.push_back(step); + + const auto &name = std::get(control.t); + + // Handle induction variable + mlir::Value ivValue = getSymbolAddress(*name.symbol); + std::size_t ivTypeSize = name.symbol->size(); + if (ivTypeSize == 0) + llvm::report_fatal_error("unexpected induction variable size"); + mlir::Type ivTy = builder->getIntegerType(ivTypeSize * 8); + + if (!ivValue) { + // DO CONCURRENT induction variables are not mapped yet since they are + // local to the DO CONCURRENT scope. + mlir::OpBuilder::InsertPoint insPt = builder->saveInsertionPoint(); + builder->setInsertionPointToStart(builder->getAllocaBlock()); + ivValue = builder->createTemporaryAlloc( + loc, ivTy, toStringRef(name.symbol->name())); + builder->restoreInsertionPoint(insPt); + } + + // Create the hlfir.declare operation using the symbol's name + auto declareOp = builder->create( + loc, ivValue, toStringRef(name.symbol->name())); + ivValue = declareOp.getResult(0); + + // Bind the symbol to the declared variable + bindSymbol(*name.symbol, ivValue); + ivValues.push_back(ivValue); + ivTypes.push_back(ivTy); + ivLocs.push_back(loc); } + } else { + for (unsigned i = 0; i < nestedLoops; ++i) { + const Fortran::parser::LoopControl *loopControl; + mlir::Location crtLoc = loc; + if (i == 0) { + loopControl = &*outerDoConstruct->GetLoopControl(); + crtLoc = genLocation( + Fortran::parser::FindSourceLocation(outerDoConstruct)); + } else { + auto *doCons = loopEval->getIf(); + assert(doCons && "expect do construct"); + loopControl = &*doCons->GetLoopControl(); + crtLoc = genLocation(Fortran::parser::FindSourceLocation(*doCons)); + } - locs.push_back(crtLoc); - - const Fortran::parser::LoopControl::Bounds *bounds = - std::get_if(&loopControl->u); - assert(bounds && "Expected bounds on the loop construct"); - - Fortran::semantics::Symbol &ivSym = - bounds->name.thing.symbol->GetUltimate(); - ivValues.push_back(getSymbolAddress(ivSym)); - - lbs.push_back(builder->createConvert( - crtLoc, idxTy, - fir::getBase(genExprValue(*Fortran::semantics::GetExpr(bounds->lower), - stmtCtx)))); - ubs.push_back(builder->createConvert( - crtLoc, idxTy, - fir::getBase(genExprValue(*Fortran::semantics::GetExpr(bounds->upper), - stmtCtx)))); - if (bounds->step) - steps.push_back(builder->createConvert( + locs.push_back(crtLoc); + + const Fortran::parser::LoopControl::Bounds *bounds = + std::get_if(&loopControl->u); + assert(bounds && "Expected bounds on the loop construct"); + + Fortran::semantics::Symbol &ivSym = + bounds->name.thing.symbol->GetUltimate(); + ivValues.push_back(getSymbolAddress(ivSym)); + + lbs.push_back(builder->createConvert( crtLoc, idxTy, fir::getBase(genExprValue( - *Fortran::semantics::GetExpr(bounds->step), stmtCtx)))); - else // If `step` is not present, assume it is `1`. - steps.push_back(builder->createIntegerConstant(loc, idxTy, 1)); - - ivTypes.push_back(idxTy); - ivLocs.push_back(crtLoc); - if (i < nestedLoops - 1) - loopEval = &*std::next(loopEval->getNestedEvaluations().begin()); + *Fortran::semantics::GetExpr(bounds->lower), stmtCtx)))); + ubs.push_back(builder->createConvert( + crtLoc, idxTy, + fir::getBase(genExprValue( + *Fortran::semantics::GetExpr(bounds->upper), stmtCtx)))); + if (bounds->step) + steps.push_back(builder->createConvert( + crtLoc, idxTy, + fir::getBase(genExprValue( + *Fortran::semantics::GetExpr(bounds->step), stmtCtx)))); + else // If `step` is not present, assume it is `1`. + steps.push_back(builder->createIntegerConstant(loc, idxTy, 1)); + + ivTypes.push_back(idxTy); + ivLocs.push_back(crtLoc); + if (i < nestedLoops - 1) + loopEval = &*std::next(loopEval->getNestedEvaluations().begin()); + } } auto op = builder->create( diff --git a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp index 436f7a1154c7c..70fa18ad65b9b 100644 --- a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp +++ b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp @@ -7,7 +7,6 @@ //===----------------------------------------------------------------------===// #include "flang/Optimizer/Analysis/AliasAnalysis.h" -#include "flang/Optimizer/CodeGen/CGOps.h" #include "flang/Optimizer/Dialect/FIROps.h" #include "flang/Optimizer/Dialect/FIROpsSupport.h" #include "flang/Optimizer/Dialect/FIRType.h" @@ -62,17 +61,13 @@ getOriginalDef(mlir::Value v, mlir::Type ty = defOp->getResultTypes()[0]; llvm::TypeSwitch(defOp) .Case([&](fir::ConvertOp op) { v = op.getValue(); }) - .Case( - [&](auto op) { - v = op.getMemref(); - auto varIf = - llvm::dyn_cast(defOp); - if (varIf) { - attributes |= getAttrsFromVariable(varIf); - isCapturedInInternalProcedure |= - varIf.isCapturedInInternalProcedure(); - } - }) + .Case([&](auto op) { + v = op.getMemref(); + auto varIf = llvm::cast(defOp); + attributes |= getAttrsFromVariable(varIf); + isCapturedInInternalProcedure |= + varIf.isCapturedInInternalProcedure(); + }) .Case([&](auto op) { if (fir::AliasAnalysis::isPointerReference(ty)) attributes.set(fir::AliasAnalysis::Attribute::Pointer); @@ -596,21 +591,19 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v, followBoxData = true; approximateSource = true; }) - .Case( - [&](auto op) { - if (followBoxData) { - v = op->getOperand(0); - defOp = v.getDefiningOp(); - } else - breakFromLoop = true; - }) + .Case([&](auto op) { + if (followBoxData) { + v = op->getOperand(0); + defOp = v.getDefiningOp(); + } else + breakFromLoop = true; + }) .Case([&](auto op) { // If load is inside target and it points to mapped item, // continue tracking. Operation *loadMemrefOp = op.getMemref().getDefiningOp(); bool isDeclareOp = llvm::isa_and_present(loadMemrefOp) || - llvm::isa_and_present(loadMemrefOp) || llvm::isa_and_present(loadMemrefOp); if (isDeclareOp && llvm::isa(loadMemrefOp->getParentOp())) { @@ -673,8 +666,7 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v, global = llvm::cast(op).getSymbol(); breakFromLoop = true; }) - .Case([&](auto op) { + .Case([&](auto op) { bool isPrivateItem = false; if (omp::BlockArgOpenMPOpInterface argIface = dyn_cast(op->getParentOp())) { @@ -708,33 +700,30 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v, return; } } - auto varIf = llvm::dyn_cast(defOp); - if (varIf) { - // While going through a declare operation collect - // the variable attributes from it. Right now, some - // of the attributes are duplicated, e.g. a TARGET dummy - // argument has the target attribute both on its declare - // operation and on the entry block argument. - // In case of host associated use, the declare operation - // is the only carrier of the variable attributes, - // so we have to collect them here. - attributes |= getAttrsFromVariable(varIf); - isCapturedInInternalProcedure |= - varIf.isCapturedInInternalProcedure(); - if (varIf.isHostAssoc()) { - // Do not track past such DeclareOp, because it does not - // currently provide any useful information. The host associated - // access will end up dereferencing the host association tuple, - // so we may as well stop right now. - v = defOp->getResult(0); - // TODO: if the host associated variable is a dummy argument - // of the host, I think, we can treat it as SourceKind::Argument - // for the purpose of alias analysis inside the internal - // procedure. - type = SourceKind::HostAssoc; - breakFromLoop = true; - return; - } + auto varIf = llvm::cast(defOp); + // While going through a declare operation collect + // the variable attributes from it. Right now, some + // of the attributes are duplicated, e.g. a TARGET dummy + // argument has the target attribute both on its declare + // operation and on the entry block argument. + // In case of host associated use, the declare operation + // is the only carrier of the variable attributes, + // so we have to collect them here. + attributes |= getAttrsFromVariable(varIf); + isCapturedInInternalProcedure |= + varIf.isCapturedInInternalProcedure(); + if (varIf.isHostAssoc()) { + // Do not track past such DeclareOp, because it does not + // currently provide any useful information. The host associated + // access will end up dereferencing the host association tuple, + // so we may as well stop right now. + v = defOp->getResult(0); + // TODO: if the host associated variable is a dummy argument + // of the host, I think, we can treat it as SourceKind::Argument + // for the purpose of alias analysis inside the internal procedure. + type = SourceKind::HostAssoc; + breakFromLoop = true; + return; } if (getLastInstantiationPoint) { // Fetch only the innermost instantiation point. diff --git a/flang/lib/Optimizer/Analysis/CMakeLists.txt b/flang/lib/Optimizer/Analysis/CMakeLists.txt index 3249f8a76ae3e..4d4ad882c27d3 100644 --- a/flang/lib/Optimizer/Analysis/CMakeLists.txt +++ b/flang/lib/Optimizer/Analysis/CMakeLists.txt @@ -6,14 +6,12 @@ add_flang_library(FIRAnalysis FIRDialect FIRSupport HLFIRDialect - FIRCodeGen LINK_LIBS FIRBuilder FIRDialect FIRSupport HLFIRDialect - FIRCodeGen MLIR_DEPS MLIRIR diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index 754496921ca3a..d98ee58ace2bc 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -485,6 +485,22 @@ static constexpr IntrinsicHandler handlers[]{ &I::genMatchAllSync, {{{"mask", asValue}, {"value", asValue}, {"pred", asAddr}}}, /*isElemental=*/false}, + {"match_any_syncjd", + &I::genMatchAnySync, + {{{"mask", asValue}, {"value", asValue}}}, + /*isElemental=*/false}, + {"match_any_syncjf", + &I::genMatchAnySync, + {{{"mask", asValue}, {"value", asValue}}}, + /*isElemental=*/false}, + {"match_any_syncjj", + &I::genMatchAnySync, + {{{"mask", asValue}, {"value", asValue}}}, + /*isElemental=*/false}, + {"match_any_syncjx", + &I::genMatchAnySync, + {{{"mask", asValue}, {"value", asValue}}}, + /*isElemental=*/false}, {"matmul", &I::genMatmul, {{{"matrix_a", asAddr}, {"matrix_b", asAddr}}}, @@ -6060,6 +6076,7 @@ mlir::Value IntrinsicLibrary::genMask(mlir::Type resultType, return result; } +// MATCH_ALL_SYNC mlir::Value IntrinsicLibrary::genMatchAllSync(mlir::Type resultType, llvm::ArrayRef args) { @@ -6096,6 +6113,32 @@ IntrinsicLibrary::genMatchAllSync(mlir::Type resultType, return value; } +// MATCH_ANY_SYNC +mlir::Value +IntrinsicLibrary::genMatchAnySync(mlir::Type resultType, + llvm::ArrayRef args) { + assert(args.size() == 2); + bool is32 = args[1].getType().isInteger(32) || args[1].getType().isF32(); + + llvm::StringRef funcName = + is32 ? "llvm.nvvm.match.any.sync.i32p" : "llvm.nvvm.match.any.sync.i64p"; + mlir::MLIRContext *context = builder.getContext(); + mlir::Type i32Ty = builder.getI32Type(); + mlir::Type i64Ty = builder.getI64Type(); + mlir::Type valTy = is32 ? i32Ty : i64Ty; + + mlir::FunctionType ftype = + mlir::FunctionType::get(context, {i32Ty, valTy}, {i32Ty}); + auto funcOp = builder.createFunction(loc, funcName, ftype); + llvm::SmallVector filteredArgs; + filteredArgs.push_back(args[0]); + if (args[1].getType().isF32() || args[1].getType().isF64()) + filteredArgs.push_back(builder.create(loc, valTy, args[1])); + else + filteredArgs.push_back(args[1]); + return builder.create(loc, funcOp, filteredArgs).getResult(0); +} + // MATMUL fir::ExtendedValue IntrinsicLibrary::genMatmul(mlir::Type resultType, diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index 439cc7a856236..bd87215eeb179 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -3575,6 +3575,14 @@ struct UndefOpConversion : public fir::FIROpConversion { llvm::LogicalResult matchAndRewrite(fir::UndefOp undef, OpAdaptor, mlir::ConversionPatternRewriter &rewriter) const override { + if (mlir::isa(undef.getType())) { + // Dummy scoping is used for Fortran analyses like AA. Once it gets to + // pre-codegen rewrite it is erased and a fir.undef is created to + // feed to the fir declare operation. Thus, during codegen, we can + // simply erase is as it is no longer used. + rewriter.eraseOp(undef); + return mlir::success(); + } rewriter.replaceOpWithNewOp( undef, convertType(undef.getType())); return mlir::success(); diff --git a/flang/lib/Semantics/check-cuda.cpp b/flang/lib/Semantics/check-cuda.cpp index c85a84ea5527f..cb7a383284e63 100644 --- a/flang/lib/Semantics/check-cuda.cpp +++ b/flang/lib/Semantics/check-cuda.cpp @@ -525,6 +525,21 @@ static int DoConstructTightNesting( return 0; } innerBlock = &std::get(doConstruct->t); + if (doConstruct->IsDoConcurrent()) { + const auto &loopControl = doConstruct->GetLoopControl(); + if (loopControl) { + if (const auto *concurrentControl{ + std::get_if(&loopControl->u)}) { + const auto &concurrentHeader = + std::get(concurrentControl->t); + const auto &controls = + std::get>( + concurrentHeader.t); + return controls.size(); + } + } + return 0; + } if (innerBlock->size() == 1) { if (const auto *execConstruct{ std::get_if(&innerBlock->front().u)}) { @@ -598,9 +613,14 @@ void CUDAChecker::Enter(const parser::CUFKernelDoConstruct &x) { std::get>(x.t))}; const parser::Block *innerBlock{nullptr}; if (DoConstructTightNesting(doConstruct, innerBlock) < depth) { - context_.Say(source, - "!$CUF KERNEL DO (%jd) must be followed by a DO construct with tightly nested outer levels of counted DO loops"_err_en_US, - std::intmax_t{depth}); + if (doConstruct && doConstruct->IsDoConcurrent()) + context_.Say(source, + "!$CUF KERNEL DO (%jd) must be followed by a DO CONCURRENT construct with at least %jd indices"_err_en_US, + std::intmax_t{depth}, std::intmax_t{depth}); + else + context_.Say(source, + "!$CUF KERNEL DO (%jd) must be followed by a DO construct with tightly nested outer levels of counted DO loops"_err_en_US, + std::intmax_t{depth}); } if (innerBlock) { DeviceContextChecker{context_}.Check(*innerBlock); diff --git a/flang/module/cudadevice.f90 b/flang/module/cudadevice.f90 index c75c5c191ab51..8b31c0c0856fd 100644 --- a/flang/module/cudadevice.f90 +++ b/flang/module/cudadevice.f90 @@ -589,4 +589,27 @@ attributes(device) integer function match_all_syncjd(mask, val, pred) end function end interface +interface match_any_sync + attributes(device) integer function match_any_syncjj(mask, val) +!dir$ ignore_tkr(d) mask, (d) val + integer(4), value :: mask + integer(4), value :: val + end function + attributes(device) integer function match_any_syncjx(mask, val) +!dir$ ignore_tkr(d) mask, (d) val + integer(4), value :: mask + integer(8), value :: val + end function + attributes(device) integer function match_any_syncjf(mask, val) +!dir$ ignore_tkr(d) mask, (d) val + integer(4), value :: mask + real(4), value :: val + end function + attributes(device) integer function match_any_syncjd(mask, val) +!dir$ ignore_tkr(d) mask, (d) val + integer(4), value :: mask + real(8), value :: val + end function +end interface + end module diff --git a/flang/test/Analysis/AliasAnalysis/fircg-as-sources.fir b/flang/test/Analysis/AliasAnalysis/fircg-as-sources.fir deleted file mode 100644 index edb3b1dadb8cd..0000000000000 --- a/flang/test/Analysis/AliasAnalysis/fircg-as-sources.fir +++ /dev/null @@ -1,108 +0,0 @@ -// Check aliasing with the address *in* (not *of*) a local (fir.alloca) pointer -// variable. -// -// Throughout this test, the ".fir" suffix on symbols indicates a version of the -// MLIR after convert-hlfir-to-fir. We would like alias analysis results to be -// the same in both versions. - -// RUN: fir-opt %s -split-input-file -o /dev/null --mlir-disable-threading \ -// RUN: -pass-pipeline='builtin.module(func.func(test-fir-alias-analysis))' \ -// RUN: 2>&1 | FileCheck -match-full-lines %s - -// subroutine test(p1, arr, t_arr, alloc, t_alloc, t, v) -// real, pointer :: p1 -// real :: arr(:) -// real, target :: t_arr(:) -// real, allocatable :: alloc -// real, allocatable, target :: t_alloc -// real, target :: t -// real :: v -// real, pointer :: p0 -// end subroutine test - -// check when fircg.ext_rebox and fircg.ext_declare are in the path of tracing the source -// CHECK-LABEL: Testing : "_QPtest.fir" -// CHECK-DAG: p0.tgt.fir#0 <-> arr(1).fir#0: NoAlias -// CHECK-DAG: p0.tgt.fir#0 <-> t_arr(1).fir#0: MayAlias -// CHECK-DAG: p0.tgt.fir#0 <-> alloc.tgt.fir#0: NoAlias -// CHECK-DAG: p0.tgt.fir#0 <-> t_alloc.tgt.fir#0: MayAlias -// CHECK-DAG: alloc.fir#0 <-> alloc.tgt.fir#0: NoAlias - -func.func @_QPtest.fir(%arg0: !fir.ref>> {fir.bindc_name = "p1"}, %arg1: !fir.box> {fir.bindc_name = "arr"}, %arg2: !fir.box> {fir.bindc_name = "t_arr", fir.target}, %arg3: !fir.ref>> {fir.bindc_name = "alloc"}, %arg4: !fir.ref>> {fir.bindc_name = "t_alloc", fir.target}, %arg5: !fir.ref {fir.bindc_name = "t", fir.target}, %arg6: !fir.ref {fir.bindc_name = "v"}) { - %0 = fir.dummy_scope : !fir.dscope - %1 = fircg.ext_declare %arg3 dummy_scope %0 {test.ptr = "alloc.fir", fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEalloc"} : (!fir.ref>>, !fir.dscope) -> !fir.ref>> - %2 = fir.declare %arg1 dummy_scope %0 {uniq_name = "_QFtestEarr"} : (!fir.box>, !fir.dscope) -> !fir.box> - %3 = fircg.ext_rebox %2 : (!fir.box>) -> !fir.box> - %4 = fir.alloca !fir.box> {bindc_name = "p0", uniq_name = "_QFtestEp0"} - %5 = fircg.ext_declare %4 {test.ptr = "p0.fir", fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEp0"} : (!fir.ref>>) -> !fir.ref>> - %6 = fir.declare %arg0 dummy_scope %0 {test.ptr = "p1.fir", fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEp1"} : (!fir.ref>>, !fir.dscope) -> !fir.ref>> - %7 = fir.declare %arg5 dummy_scope %0 {test.ptr = "t.fir", fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEt"} : (!fir.ref, !fir.dscope) -> !fir.ref - %8 = fir.declare %arg4 dummy_scope %0 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEt_alloc"} : (!fir.ref>>, !fir.dscope) -> !fir.ref>> - %9 = fir.declare %arg2 dummy_scope %0 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEt_arr"} : (!fir.box>, !fir.dscope) -> !fir.box> - %10 = fircg.ext_rebox %9 : (!fir.box>) -> !fir.box> - %11 = fir.declare %arg6 dummy_scope %0 {test.ptr = "v.fir", uniq_name = "_QFtestEv"} : (!fir.ref, !fir.dscope) -> !fir.ref - %12 = fir.load %5 : !fir.ref>> - %13 = fir.box_addr %12 {test.ptr = "p0.tgt.fir"} : (!fir.box>) -> !fir.ptr - %14 = fir.load %6 : !fir.ref>> - %15 = fir.box_addr %14 {test.ptr = "p1.tgt.fir"} : (!fir.box>) -> !fir.ptr - %c1 = arith.constant 1 : index - %16 = fir.array_coor %3 %c1 {test.ptr="arr(1).fir"} : (!fir.box>, index) -> !fir.ref - %c1_0 = arith.constant 1 : index - %17 = fir.array_coor %10 %c1_0 {test.ptr="t_arr(1).fir"} : (!fir.box>, index) -> !fir.ref - %18 = fir.load %1 : !fir.ref>> - %19 = fir.box_addr %18 {test.ptr = "alloc.tgt.fir"} : (!fir.box>) -> !fir.heap - %20 = fir.load %8 : !fir.ref>> - %21 = fir.box_addr %20 {test.ptr = "t_alloc.tgt.fir"} : (!fir.box>) -> !fir.heap - return -} - -// ----- -// CHECK-LABEL: Testing : "_QFPtest3" - -// module pointers -// real, pointer :: p -// end module -// -// program main -// use pointers -// real, target :: var1 = 1, var2 =2 -// p => var1 -// -// call test3(p) -// -// contains -// subroutine test3(p1) -// real, pointer :: p1 -// p1 => var2 -// print *, p -// end subroutine -// end - -// check when there are fircg.ext_embox in the paths -// CHECK-DAG: p#0 <-> box.addr#0: NoAlias -// CHECK-DAG: box.addr#0 <-> func.region0#0: NoAlias -// CHECK-DAG: var2#0 <-> p#0: NoAlias -// CHECK-DAG: var2#0 <-> box.addr#0: MustAlias -// CHECK-DAG: var2#0 <-> func.region0#1: NoAlias -// CHECK-DAG: box.addr#0 <-> func.region0#1: NoAlias - -fir.global @_QMpointersEp : !fir.box> { - %0 = fir.zero_bits !fir.ptr - %1 = fircg.ext_embox %0 : (!fir.ptr) -> !fir.box> - fir.has_value %1 : !fir.box> -} - -fir.global internal @_QFEvar2 target : f32 { - %cst = arith.constant 2.000000e+00 : f32 - fir.has_value %cst : f32 -} - -func.func @_QFPtest3(%arg0: !fir.ref>> {fir.bindc_name = "p1"}, %arg1: !fir.ref) attributes {test.ptr = "func"} { - %3 = fir.load %arg0 {test.ptr = "arg0.load"}: !fir.ref>> - %4 = fir.address_of(@_QFEvar2) {test.ptr = "var2"} : !fir.ref - %5 = fir.address_of(@_QMpointersEp) {test.ptr = "p"} : !fir.ref>> - %6 = fircg.ext_embox %4 : (!fir.ref) -> !fir.box> - %13 = fir.box_addr %6 {test.ptr = "box.addr"} : (!fir.box>) -> !fir.ptr - return -} - diff --git a/flang/test/Lower/CUDA/cuda-device-proc.cuf b/flang/test/Lower/CUDA/cuda-device-proc.cuf index 1210dae8608c8..e7d1dba385bb8 100644 --- a/flang/test/Lower/CUDA/cuda-device-proc.cuf +++ b/flang/test/Lower/CUDA/cuda-device-proc.cuf @@ -131,6 +131,25 @@ end subroutine ! CHECK: fir.convert %{{.*}} : (f64) -> i64 ! CHECK: fir.call @llvm.nvvm.match.all.sync.i64p +attributes(device) subroutine testMatchAny() + integer :: a, mask, v32 + integer(8) :: v64 + real(4) :: r4 + real(8) :: r8 + a = match_any_sync(mask, v32) + a = match_any_sync(mask, v64) + a = match_any_sync(mask, r4) + a = match_any_sync(mask, r8) +end subroutine + +! CHECK-LABEL: func.func @_QPtestmatchany() +! CHECK: fir.call @llvm.nvvm.match.any.sync.i32p +! CHECK: fir.call @llvm.nvvm.match.any.sync.i64p +! CHECK: fir.convert %{{.*}} : (f32) -> i32 +! CHECK: fir.call @llvm.nvvm.match.any.sync.i32p +! CHECK: fir.convert %{{.*}} : (f64) -> i64 +! CHECK: fir.call @llvm.nvvm.match.any.sync.i64p + ! CHECK: func.func private @llvm.nvvm.barrier0() ! CHECK: func.func private @llvm.nvvm.bar.warp.sync(i32) ! CHECK: func.func private @llvm.nvvm.membar.gl() @@ -141,3 +160,5 @@ end subroutine ! CHECK: func.func private @llvm.nvvm.barrier0.or(i32) -> i32 ! CHECK: func.func private @llvm.nvvm.match.all.sync.i32p(i32, i32) -> tuple ! CHECK: func.func private @llvm.nvvm.match.all.sync.i64p(i32, i64) -> tuple +! CHECK: func.func private @llvm.nvvm.match.any.sync.i32p(i32, i32) -> i32 +! CHECK: func.func private @llvm.nvvm.match.any.sync.i64p(i32, i64) -> i32 diff --git a/flang/test/Lower/CUDA/cuda-doconc.cuf b/flang/test/Lower/CUDA/cuda-doconc.cuf new file mode 100644 index 0000000000000..32cd1676b22f4 --- /dev/null +++ b/flang/test/Lower/CUDA/cuda-doconc.cuf @@ -0,0 +1,39 @@ +! RUN: bbc -emit-hlfir -fcuda %s -o - | FileCheck %s + +! Check if do concurrent works inside cuf kernel directive + +subroutine doconc1 + integer :: i, n + integer, managed :: a(3) + a(:) = -1 + n = 3 + n = n - 1 + !$cuf kernel do + do concurrent(i=1:n) + a(i) = 1 + end do +end + +! CHECK: func.func @_QPdoconc1() { +! CHECK: %[[DECL:.*]]:2 = hlfir.declare %{{.*}}#0 {uniq_name = "_QFdoconc1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: cuf.kernel<<<*, *>>> +! CHECK: %{{.*}} = fir.load %[[DECL]]#0 : !fir.ref + +subroutine doconc2 + integer :: i, j, m, n + integer, managed :: a(2, 4) + m = 2 + n = 4 + a(:,:) = -1 + !$cuf kernel do + do concurrent(i=1:m,j=1:n) + a(i,j) = i+j + end do +end + +! CHECK: func.func @_QPdoconc2() { +! CHECK: %[[DECLI:.*]]:2 = hlfir.declare %{{.*}}#0 {uniq_name = "_QFdoconc2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: %[[DECLJ:.*]]:2 = hlfir.declare %{{.*}}#0 {uniq_name = "_QFdoconc2Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: cuf.kernel<<<*, *>>> (%arg0 : i32, %arg1 : i32) = (%{{.*}}, %{{.*}} : index, index) to (%{{.*}}, %{{.*}} : index, index) step (%{{.*}}, %{{.*}} : index, index) { +! CHECK: %{{.*}} = fir.load %[[DECLI]]#0 : !fir.ref +! CHECK: %{{.*}} = fir.load %[[DECLJ]]#0 : !fir.ref diff --git a/flang/test/Semantics/cuf09.cuf b/flang/test/Semantics/cuf09.cuf index 7d32e0d70ba36..a8c62db65c6d5 100644 --- a/flang/test/Semantics/cuf09.cuf +++ b/flang/test/Semantics/cuf09.cuf @@ -133,6 +133,10 @@ program main !$cuf kernel do <<< 1, 2 >>> do concurrent (j=1:10) end do + !ERROR: !$CUF KERNEL DO (2) must be followed by a DO CONCURRENT construct with at least 2 indices + !$cuf kernel do(2) <<< 1, 2 >>> + do concurrent (j=1:10) + end do !$cuf kernel do <<< 1, 2 >>> do 1 j=1,10 1 continue ! ok diff --git a/libc/cmake/modules/LLVMLibCTestRules.cmake b/libc/cmake/modules/LLVMLibCTestRules.cmake index f33db5826537b..03b4b251649e7 100644 --- a/libc/cmake/modules/LLVMLibCTestRules.cmake +++ b/libc/cmake/modules/LLVMLibCTestRules.cmake @@ -751,7 +751,6 @@ function(add_libc_hermetic test_name) target_link_options(${fq_build_target_name} PRIVATE ${LIBC_COMPILE_OPTIONS_DEFAULT} -Wno-multi-gpu "-Wl,--suppress-stack-size-warning" - "-Wl,-mllvm,-nvptx-lower-global-ctor-dtor=1" "-Wl,-mllvm,-nvptx-emit-init-fini-kernel" -march=${LIBC_GPU_TARGET_ARCHITECTURE} -nostdlib -static "--cuda-path=${LIBC_CUDA_ROOT}") diff --git a/libc/src/stdio/scanf_core/CMakeLists.txt b/libc/src/stdio/scanf_core/CMakeLists.txt index 35b8b3d318a9f..ce639fe65a106 100644 --- a/libc/src/stdio/scanf_core/CMakeLists.txt +++ b/libc/src/stdio/scanf_core/CMakeLists.txt @@ -8,6 +8,20 @@ if(scanf_config_copts) list(PREPEND scanf_config_copts "COMPILE_OPTIONS") endif() + +list(APPEND file_deps libc.hdr.types.FILE) +if(LIBC_TARGET_OS_IS_GPU) + list(APPEND file_deps + libc.src.stdio.getc + libc.src.stdio.ungetc + libc.src.stdio.ferror + ) +elseif(LLVM_LIBC_FULL_BUILD) + list(APPEND file_deps + libc.src.__support.File.file + ) +endif() + add_header_library( scanf_config HDRS @@ -52,28 +66,19 @@ add_object_library( .converter .core_structs libc.src.__support.arg_list + ${file_deps} + ${use_system_file} ) -if(LIBC_TARGET_OS_IS_GPU) -add_header_library( - reader - HDRS - reader.h - DEPENDS - libc.src.__support.macros.attributes -) -elseif((TARGET libc.src.__support.File.file) OR (NOT LLVM_LIBC_FULL_BUILD)) add_header_library( reader HDRS reader.h DEPENDS libc.src.__support.macros.attributes - libc.hdr.types.FILE - libc.src.__support.File.file + ${file_deps} ${use_system_file} ) -endif() add_object_library( converter @@ -101,33 +106,19 @@ add_object_library( libc.src.__support.CPP.limits libc.src.__support.char_vector libc.src.__support.str_to_float + ${file_deps} + ${use_system_file} ) -if(LIBC_TARGET_OS_IS_GPU) - add_header_library( - vfscanf_internal - HDRS - vfscanf_internal.h - DEPENDS - .reader - .scanf_main - libc.include.stdio - libc.src.__support.arg_list - libc.src.stdio.getc - libc.src.stdio.ungetc - libc.src.stdio.ferror - ) -elseif(TARGET libc.src.__support.File.file OR (NOT LLVM_LIBC_FULL_BUILD)) - add_header_library( +#TODO: condense the file-related code as possible. +add_header_library( vfscanf_internal HDRS vfscanf_internal.h DEPENDS .reader .scanf_main - libc.include.stdio - libc.src.__support.File.file libc.src.__support.arg_list + ${file_deps} ${use_system_file} - ) -endif() +) diff --git a/libc/test/src/stdio/scanf_core/CMakeLists.txt b/libc/test/src/stdio/scanf_core/CMakeLists.txt index 06735ddb23be7..9cdc6547821ee 100644 --- a/libc/test/src/stdio/scanf_core/CMakeLists.txt +++ b/libc/test/src/stdio/scanf_core/CMakeLists.txt @@ -1,3 +1,8 @@ +if(NOT(LLVM_LIBC_FULL_BUILD)) + # in overlay mode, use the system's file to test the reader. + set(use_system_file "-DLIBC_COPT_STDIO_USE_SYSTEM_FILE") +endif() + add_libc_unittest( parser_test SUITE @@ -22,14 +27,10 @@ add_libc_unittest( DEPENDS libc.src.stdio.scanf_core.reader libc.src.__support.CPP.string_view + COMPILE_OPTIONS + ${use_system_file} ) -if(NOT (TARGET libc.src.__support.File.file)) - # Not all platforms have a file implementation. If file is unvailable, - # then we must skip all the parts that need file. - return() -endif() - add_libc_unittest( converter_test SUITE @@ -40,4 +41,6 @@ add_libc_unittest( libc.src.stdio.scanf_core.reader libc.src.stdio.scanf_core.converter libc.src.__support.CPP.string_view + COMPILE_OPTIONS + ${use_system_file} ) diff --git a/lld/test/wasm/data-segments.ll b/lld/test/wasm/data-segments.ll index 79f1d384919d9..6c401c4873910 100644 --- a/lld/test/wasm/data-segments.ll +++ b/lld/test/wasm/data-segments.ll @@ -17,6 +17,11 @@ ; RUN: wasm-ld -mwasm64 -no-gc-sections --no-entry %t.bulk-mem64.o -o %t.bulk-mem64.wasm ; RUN: obj2yaml %t.bulk-mem64.wasm | FileCheck %s --check-prefixes ACTIVE,ACTIVE64 +;; In -pie mode segments are combined into one active segment. +; RUN: wasm-ld --experimental-pic --import-memory -pie -no-gc-sections --no-entry %t.atomics.bulk-mem.pic.o -o %t.pic.wasm +; RUN: obj2yaml %t.pic.wasm | FileCheck %s --check-prefixes ACTIVE-PIC +; RUN: llvm-objdump --disassemble-symbols=__wasm_call_ctors,__wasm_init_memory --no-show-raw-insn --no-leading-addr %t.pic.wasm | FileCheck %s --check-prefixes PIC-NON-SHARED-DIS + ;; atomics, bulk memory, shared memory => passive segments ; RUN: wasm-ld -no-gc-sections --no-entry --shared-memory --max-memory=131072 %t.atomics.bulk-mem.o -o %t.atomics.bulk-mem.wasm ; RUN: obj2yaml %t.atomics.bulk-mem.wasm | FileCheck %s --check-prefix PASSIVE @@ -28,9 +33,9 @@ ; RUN: llvm-objdump --disassemble-symbols=__wasm_call_ctors,__wasm_init_memory --no-show-raw-insn --no-leading-addr %t.atomics.bulk-mem64.wasm | FileCheck %s --check-prefixes DIS,NOPIC-DIS -DPTR=i64 ;; Also test in combination with PIC/pie -; RUN: wasm-ld --experimental-pic -pie -no-gc-sections --no-entry --shared-memory --max-memory=131072 %t.atomics.bulk-mem.pic.o -o %t.pic.wasm -; RUN: obj2yaml %t.pic.wasm | FileCheck %s --check-prefixes PASSIVE-PIC,PASSIVE32-PIC -; RUN: llvm-objdump --disassemble-symbols=__wasm_call_ctors,__wasm_init_memory --no-show-raw-insn --no-leading-addr %t.pic.wasm | FileCheck %s --check-prefixes DIS,PIC-DIS -DPTR=i32 +; RUN: wasm-ld --experimental-pic -pie -no-gc-sections --no-entry --shared-memory --max-memory=131072 %t.atomics.bulk-mem.pic.o -o %t.shared.pic.wasm +; RUN: obj2yaml %t.shared.pic.wasm | FileCheck %s --check-prefixes PASSIVE-PIC,PASSIVE32-PIC +; RUN: llvm-objdump --disassemble-symbols=__wasm_call_ctors,__wasm_init_memory --no-show-raw-insn --no-leading-addr %t.shared.pic.wasm | FileCheck %s --check-prefixes DIS,PIC-DIS -DPTR=i32 ;; Also test in combination with PIC/pie + wasm64 ; RUN: wasm-ld -mwasm64 --experimental-pic -pie -no-gc-sections --no-entry --shared-memory --max-memory=131072 %t.atomics.bulk-mem.pic-mem64.o -o %t.pic-mem64.wasm @@ -76,6 +81,20 @@ ; ACTIVE-NEXT: - Index: 0 ; ACTIVE-NEXT: Name: __wasm_call_ctors +;; In ACTIVE-PIC mode the memory is imported which means all data segments +;; (except BSS) are combined in the single one. +;; BSS is not included here, and instead initialized using `memory.init` in +;; `__wasm_init_memory` + +; ACTIVE-PIC: - Type: DATA +; ACTIVE-PIC-NEXT: Segments: +; ACTIVE-PIC-NEXT: - SectionOffset: 6 +; ACTIVE-PIC-NEXT: InitFlags: 0 +; ACTIVE-PIC-NEXT: Offset: +; ACTIVE-PIC-NEXT: Opcode: GLOBAL_GET +; ACTIVE-PIC-NEXT: Index: 1 +; ACTIVE-PIC-NEXT: Content: 63000000636F6E7374616E74000000002B00000068656C6C6F00676F6F646279650000002A000000 + ; PASSIVE-LABEL: - Type: START ; PASSIVE-NEXT: StartFunction: 2 ; PASSIVE-LABEL: - Type: DATACOUNT @@ -151,6 +170,18 @@ ; PASSIVE-PIC-NEXT: - Index: 2 ; PASSIVE-PIC-NEXT: Name: __wasm_init_memory +;; For the non-shared PIC case the __wasm_init_memory only deals with BSS since +;; all other segments are active +; PIC-NON-SHARED-DIS: <__wasm_init_memory>: +; PIC-NON-SHARED-DIS-EMPTY: +; PIC-NON-SHARED-DIS-NEXT: i32.const 40 +; PIC-NON-SHARED-DIS-NEXT: global.get 1 +; PIC-NON-SHARED-DIS-NEXT: i32.add +; PIC-NON-SHARED-DIS-NEXT: i32.const 0 +; PIC-NON-SHARED-DIS-NEXT: i32.const 10000 +; PIC-NON-SHARED-DIS-NEXT: memory.fill 0 +; PIC-NON-SHARED-DIS-NEXT: end + ;; no data relocations. ; DIS-LABEL: <__wasm_call_ctors>: ; DIS-EMPTY: diff --git a/lld/wasm/OutputSections.cpp b/lld/wasm/OutputSections.cpp index 95f7ecc29de6b..d679d1e676479 100644 --- a/lld/wasm/OutputSections.cpp +++ b/lld/wasm/OutputSections.cpp @@ -101,7 +101,8 @@ void DataSection::finalizeContents() { }); #ifndef NDEBUG unsigned activeCount = llvm::count_if(segments, [](OutputSegment *segment) { - return (segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE) == 0; + return segment->requiredInBinary() && + (segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE) == 0; }); #endif diff --git a/lld/wasm/Writer.cpp b/lld/wasm/Writer.cpp index 76e38f548157c..7770bdcfc1f16 100644 --- a/lld/wasm/Writer.cpp +++ b/lld/wasm/Writer.cpp @@ -1081,7 +1081,12 @@ void Writer::combineOutputSegments() { return; OutputSegment *combined = make(".data"); combined->startVA = segments[0]->startVA; + std::vector newSegments = {combined}; for (OutputSegment *s : segments) { + if (!s->requiredInBinary()) { + newSegments.push_back(s); + continue; + } bool first = true; for (InputChunk *inSeg : s->inputSegments) { if (first) @@ -1100,7 +1105,7 @@ void Writer::combineOutputSegments() { } } - segments = {combined}; + segments = newSegments; } static void createFunction(DefinedFunction *func, StringRef bodyContent) { diff --git a/lldb/source/Plugins/ABI/LoongArch/ABISysV_loongarch.cpp b/lldb/source/Plugins/ABI/LoongArch/ABISysV_loongarch.cpp index dc7e9bba00067..b8d40501ec13a 100644 --- a/lldb/source/Plugins/ABI/LoongArch/ABISysV_loongarch.cpp +++ b/lldb/source/Plugins/ABI/LoongArch/ABISysV_loongarch.cpp @@ -12,6 +12,7 @@ #include #include +#include "llvm/ADT/StringRef.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/MathExtras.h" @@ -644,34 +645,25 @@ void ABISysV_loongarch::AugmentRegisterInfo( std::vector ®s) { lldb_private::RegInfoBasedABI::AugmentRegisterInfo(regs); + static const llvm::StringMap isa_to_abi_alias_map = { + {"r0", "zero"}, {"r1", "ra"}, {"r2", "tp"}, {"r3", "sp"}, + {"r4", "a0"}, {"r5", "a1"}, {"r6", "a2"}, {"r7", "a3"}, + {"r8", "a4"}, {"r9", "a5"}, {"r10", "a6"}, {"r11", "a7"}, + {"r12", "t0"}, {"r13", "t1"}, {"r14", "t2"}, {"r15", "t3"}, + {"r16", "t4"}, {"r17", "t5"}, {"r18", "t6"}, {"r19", "t7"}, + {"r20", "t8"}, {"r22", "fp"}, {"r23", "s0"}, {"r24", "s1"}, + {"r25", "s2"}, {"r26", "s3"}, {"r27", "s4"}, {"r28", "s5"}, + {"r29", "s6"}, {"r30", "s7"}, {"r31", "s8"}}; + for (auto it : llvm::enumerate(regs)) { + llvm::StringRef reg_name = it.value().name.GetStringRef(); + // Set alt name for certain registers for convenience - if (it.value().name == "r0") - it.value().alt_name.SetCString("zero"); - else if (it.value().name == "r1") - it.value().alt_name.SetCString("ra"); - else if (it.value().name == "r3") - it.value().alt_name.SetCString("sp"); - else if (it.value().name == "r22") - it.value().alt_name.SetCString("fp"); - else if (it.value().name == "r4") - it.value().alt_name.SetCString("a0"); - else if (it.value().name == "r5") - it.value().alt_name.SetCString("a1"); - else if (it.value().name == "r6") - it.value().alt_name.SetCString("a2"); - else if (it.value().name == "r7") - it.value().alt_name.SetCString("a3"); - else if (it.value().name == "r8") - it.value().alt_name.SetCString("a4"); - else if (it.value().name == "r9") - it.value().alt_name.SetCString("a5"); - else if (it.value().name == "r10") - it.value().alt_name.SetCString("a6"); - else if (it.value().name == "r11") - it.value().alt_name.SetCString("a7"); + llvm::StringRef alias_name = isa_to_abi_alias_map.lookup(reg_name); + if (!alias_name.empty()) + it.value().alt_name.SetString(alias_name); // Set generic regnum so lldb knows what the PC, etc is - it.value().regnum_generic = GetGenericNum(it.value().name.GetStringRef()); + it.value().regnum_generic = GetGenericNum(reg_name); } } diff --git a/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py b/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py index 60caedf4737da..a287fd19ba352 100644 --- a/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py +++ b/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py @@ -866,41 +866,42 @@ def test_loongarch64_regs(self): self.assertTrue(target, VALID_TARGET) process = target.LoadCore("linux-loongarch64.core") - values = {} - values["r0"] = "0x0000000000000000" - values["r1"] = "0x000000012000016c" - values["r2"] = "0x0000000000000000" - values["r3"] = "0x00007ffffb8249e0" - values["r4"] = "0x0000000000000000" - values["r5"] = "0x000000012000010c" - values["r6"] = "0x0000000000000000" - values["r7"] = "0x0000000000000000" - values["r8"] = "0x0000000000000000" - values["r9"] = "0x0000000000000000" - values["r10"] = "0x0000000000000000" - values["r11"] = "0x00000000000000dd" - values["r12"] = "0x0000000000000000" - values["r13"] = "0x000000000000002f" - values["r14"] = "0x0000000000000000" - values["r15"] = "0x0000000000000000" - values["r16"] = "0x0000000000000000" - values["r17"] = "0x0000000000000000" - values["r18"] = "0x0000000000000000" - values["r19"] = "0x0000000000000000" - values["r20"] = "0x0000000000000000" - values["r21"] = "0x0000000000000000" - values["r22"] = "0x00007ffffb824a10" - values["r23"] = "0x0000000000000000" - values["r24"] = "0x0000000000000000" - values["r25"] = "0x0000000000000000" - values["r26"] = "0x0000000000000000" - values["r27"] = "0x0000000000000000" - values["r28"] = "0x0000000000000000" - values["r29"] = "0x0000000000000000" - values["r30"] = "0x0000000000000000" - values["r31"] = "0x0000000000000000" - values["orig_a0"] = "0x0000555556b62d50" - values["pc"] = "0x000000012000012c" + values = { + "r0": ("0x0000000000000000", "zero"), + "r1": ("0x000000012000016c", "ra"), + "r2": ("0x0000000000000000", "tp"), + "r3": ("0x00007ffffb8249e0", "sp"), + "r4": ("0x0000000000000000", "a0"), + "r5": ("0x000000012000010c", "a1"), + "r6": ("0x0000000000000000", "a2"), + "r7": ("0x0000000000000000", "a3"), + "r8": ("0x0000000000000000", "a4"), + "r9": ("0x0000000000000000", "a5"), + "r10": ("0x0000000000000000", "a6"), + "r11": ("0x00000000000000dd", "a7"), + "r12": ("0x0000000000000000", "t0"), + "r13": ("0x000000000000002f", "t1"), + "r14": ("0x0000000000000000", "t2"), + "r15": ("0x0000000000000000", "t3"), + "r16": ("0x0000000000000000", "t4"), + "r17": ("0x0000000000000000", "t5"), + "r18": ("0x0000000000000000", "t6"), + "r19": ("0x0000000000000000", "t7"), + "r20": ("0x0000000000000000", "t8"), + "r21": ("0x0000000000000000", None), + "r22": ("0x00007ffffb824a10", "fp"), + "r23": ("0x0000000000000000", "s0"), + "r24": ("0x0000000000000000", "s1"), + "r25": ("0x0000000000000000", "s2"), + "r26": ("0x0000000000000000", "s3"), + "r27": ("0x0000000000000000", "s4"), + "r28": ("0x0000000000000000", "s5"), + "r29": ("0x0000000000000000", "s6"), + "r30": ("0x0000000000000000", "s7"), + "r31": ("0x0000000000000000", "s8"), + "orig_a0": ("0x0000555556b62d50", None), + "pc": ("0x000000012000012c", None), + } fpr_values = {} fpr_values["f0"] = "0x00000000ffffff05" @@ -945,11 +946,17 @@ def test_loongarch64_regs(self): fpr_values["fcc7"] = "0x01" fpr_values["fcsr"] = "0x00000000" - for regname, value in values.items(): + for regname in values: + value, alias = values[regname] self.expect( "register read {}".format(regname), substrs=["{} = {}".format(regname, value)], ) + if alias: + self.expect( + "register read {}".format(alias), + substrs=["{} = {}".format(regname, value)], + ) for regname, value in fpr_values.items(): self.expect( diff --git a/lldb/test/Shell/Register/Inputs/loongarch64-gp-read.cpp b/lldb/test/Shell/Register/Inputs/loongarch64-gp-read.cpp new file mode 100644 index 0000000000000..91e37a6ca6667 --- /dev/null +++ b/lldb/test/Shell/Register/Inputs/loongarch64-gp-read.cpp @@ -0,0 +1,37 @@ +int main() { + asm volatile( + // r0 aka zero is always tied to zero + "li.w $r1, 1\n\t" + "li.w $r2, 2\n\t" + "li.w $r3, 3\n\t" + "li.w $r4, 4\n\t" + "li.w $r5, 5\n\t" + "li.w $r6, 6\n\t" + "li.w $r7, 7\n\t" + "li.w $r8, 8\n\t" + "li.w $r9, 9\n\t" + "li.w $r10, 10\n\t" + "li.w $r11, 11\n\t" + "li.w $r12, 12\n\t" + "li.w $r13, 13\n\t" + "li.w $r14, 14\n\t" + "li.w $r15, 15\n\t" + "li.w $r16, 16\n\t" + "li.w $r17, 17\n\t" + "li.w $r18, 18\n\t" + "li.w $r19, 19\n\t" + "li.w $r20, 20\n\t" + "li.w $r21, 21\n\t" + "li.w $r22, 22\n\t" + "li.w $r23, 23\n\t" + "li.w $r24, 24\n\t" + "li.w $r25, 25\n\t" + "li.w $r26, 26\n\t" + "li.w $r27, 27\n\t" + "li.w $r28, 28\n\t" + "li.w $r29, 29\n\t" + "li.w $r30, 30\n\t" + "li.w $r31, 31\n\t" + "break 5\n\t"); + return 0; +} diff --git a/lldb/test/Shell/Register/loongarch64-gp-read.test b/lldb/test/Shell/Register/loongarch64-gp-read.test new file mode 100644 index 0000000000000..18f39cd6af625 --- /dev/null +++ b/lldb/test/Shell/Register/loongarch64-gp-read.test @@ -0,0 +1,39 @@ +# REQUIRES: native && target-loongarch64 +# RUN: %clangxx_host %p/Inputs/loongarch64-gp-read.cpp -o %t +# RUN: %lldb -b -s %s %t | FileCheck %s +process launch + +## Read register using the register's alias. +register read zero ra tp sp a0 a1 a2 a3 a4 a5 a6 a7 t0 t1 t2 t3 t4 t5 t6 t7 t8 r21 fp s0 s1 s2 s3 s4 s5 s6 s7 s8 +# CHECK-DAG: r0 = 0x0000000000000000 +# CHECK-DAG: r1 = 0x0000000000000001 +# CHECK-DAG: r2 = 0x0000000000000002 +# CHECK-DAG: r3 = 0x0000000000000003 +# CHECK-DAG: r4 = 0x0000000000000004 +# CHECK-DAG: r5 = 0x0000000000000005 +# CHECK-DAG: r6 = 0x0000000000000006 +# CHECK-DAG: r7 = 0x0000000000000007 +# CHECK-DAG: r8 = 0x0000000000000008 +# CHECK-DAG: r9 = 0x0000000000000009 +# CHECK-DAG: r10 = 0x000000000000000a +# CHECK-DAG: r11 = 0x000000000000000b +# CHECK-DAG: r12 = 0x000000000000000c +# CHECK-DAG: r13 = 0x000000000000000d +# CHECK-DAG: r14 = 0x000000000000000e +# CHECK-DAG: r15 = 0x000000000000000f +# CHECK-DAG: r16 = 0x0000000000000010 +# CHECK-DAG: r17 = 0x0000000000000011 +# CHECK-DAG: r18 = 0x0000000000000012 +# CHECK-DAG: r19 = 0x0000000000000013 +# CHECK-DAG: r20 = 0x0000000000000014 +# CHECK-DAG: r21 = 0x0000000000000015 +# CHECK-DAG: r22 = 0x0000000000000016 +# CHECK-DAG: r23 = 0x0000000000000017 +# CHECK-DAG: r24 = 0x0000000000000018 +# CHECK-DAG: r25 = 0x0000000000000019 +# CHECK-DAG: r26 = 0x000000000000001a +# CHECK-DAG: r27 = 0x000000000000001b +# CHECK-DAG: r28 = 0x000000000000001c +# CHECK-DAG: r29 = 0x000000000000001d +# CHECK-DAG: r30 = 0x000000000000001e +# CHECK-DAG: r31 = 0x000000000000001f diff --git a/llvm/include/llvm/CodeGen/RDFRegisters.h b/llvm/include/llvm/CodeGen/RDFRegisters.h index cc30b977ae421..dcce190f0f308 100644 --- a/llvm/include/llvm/CodeGen/RDFRegisters.h +++ b/llvm/include/llvm/CodeGen/RDFRegisters.h @@ -111,10 +111,10 @@ struct RegisterRef { } static constexpr bool isRegId(unsigned Id) { - return Register(Id).isPhysical(); + return Register::isPhysicalRegister(Id); } static constexpr bool isUnitId(unsigned Id) { - return Register(Id).isVirtual(); + return Register::isVirtualRegister(Id); } static constexpr bool isMaskId(unsigned Id) { return Register(Id).isStack(); } diff --git a/llvm/include/llvm/CodeGen/RegAllocFast.h b/llvm/include/llvm/CodeGen/RegAllocFast.h index b2ca9e10bf464..9fdaca09e4317 100644 --- a/llvm/include/llvm/CodeGen/RegAllocFast.h +++ b/llvm/include/llvm/CodeGen/RegAllocFast.h @@ -14,18 +14,18 @@ namespace llvm { -struct RegAllocFastPassOptions { - RegAllocFilterFunc Filter = nullptr; - StringRef FilterName = "all"; - bool ClearVRegs = true; -}; - class RegAllocFastPass : public PassInfoMixin { - RegAllocFastPassOptions Opts; - public: - RegAllocFastPass(RegAllocFastPassOptions Opts = RegAllocFastPassOptions()) - : Opts(Opts) {} + struct Options { + RegAllocFilterFunc Filter; + StringRef FilterName; + bool ClearVRegs; + Options(RegAllocFilterFunc F = nullptr, StringRef FN = "all", + bool CV = true) + : Filter(F), FilterName(FN), ClearVRegs(CV) {} + }; + + RegAllocFastPass(Options Opts = Options()) : Opts(Opts) {} MachineFunctionProperties getRequiredProperties() const { return MachineFunctionProperties().set( @@ -52,6 +52,9 @@ class RegAllocFastPass : public PassInfoMixin { function_ref MapClassName2PassName); static bool isRequired() { return true; } + +private: + Options Opts; }; } // namespace llvm diff --git a/llvm/include/llvm/CodeGen/Register.h b/llvm/include/llvm/CodeGen/Register.h index 6c02ffef89363..2fdc2148ef020 100644 --- a/llvm/include/llvm/CodeGen/Register.h +++ b/llvm/include/llvm/CodeGen/Register.h @@ -48,6 +48,18 @@ class Register { return Register(FI + MCRegister::FirstStackSlot); } + /// Return true if the specified register number is in + /// the physical register namespace. + static constexpr bool isPhysicalRegister(unsigned Reg) { + return MCRegister::isPhysicalRegister(Reg); + } + + /// Return true if the specified register number is in + /// the virtual register namespace. + static constexpr bool isVirtualRegister(unsigned Reg) { + return Reg & MCRegister::VirtualRegFlag; + } + /// Convert a 0-based index to a virtual register number. /// This is the inverse operation of VirtReg2IndexFunctor below. static Register index2VirtReg(unsigned Index) { @@ -57,13 +69,11 @@ class Register { /// Return true if the specified register number is in the virtual register /// namespace. - constexpr bool isVirtual() const { return Reg & MCRegister::VirtualRegFlag; } + constexpr bool isVirtual() const { return isVirtualRegister(Reg); } /// Return true if the specified register number is in the physical register /// namespace. - constexpr bool isPhysical() const { - return MCRegister::isPhysicalRegister(Reg); - } + constexpr bool isPhysical() const { return isPhysicalRegister(Reg); } /// Convert a virtual register number to a 0-based index. The first virtual /// register in a function will get the index 0. @@ -146,14 +156,14 @@ class VirtRegOrUnit { public: constexpr explicit VirtRegOrUnit(MCRegUnit Unit) : VRegOrUnit(Unit) { - assert(!Register(VRegOrUnit).isVirtual()); + assert(!Register::isVirtualRegister(VRegOrUnit)); } constexpr explicit VirtRegOrUnit(Register Reg) : VRegOrUnit(Reg.id()) { assert(Reg.isVirtual()); } constexpr bool isVirtualReg() const { - return Register(VRegOrUnit).isVirtual(); + return Register::isVirtualRegister(VRegOrUnit); } constexpr MCRegUnit asMCRegUnit() const { diff --git a/llvm/include/llvm/Passes/MachinePassRegistry.def b/llvm/include/llvm/Passes/MachinePassRegistry.def index 373bd047e2395..8de02e951ba52 100644 --- a/llvm/include/llvm/Passes/MachinePassRegistry.def +++ b/llvm/include/llvm/Passes/MachinePassRegistry.def @@ -189,7 +189,7 @@ MACHINE_FUNCTION_PASS("verify", MachineTraceMetricsVerifi #endif MACHINE_FUNCTION_PASS_WITH_PARAMS( "regallocfast", "RegAllocFastPass", - [](RegAllocFastPassOptions Opts) { return RegAllocFastPass(Opts); }, + [](RegAllocFastPass::Options Opts) { return RegAllocFastPass(Opts); }, [PB = this](StringRef Params) { return parseRegAllocFastPassOptions(*PB, Params); }, diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h index ede51c28fc94d..aff2068b28331 100644 --- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h @@ -101,7 +101,7 @@ class DGNode { /// The scheduler bundle that this node belongs to. SchedBundle *SB = nullptr; - void setSchedBundle(SchedBundle &SB) { this->SB = &SB; } + void setSchedBundle(SchedBundle &SB); void clearSchedBundle() { this->SB = nullptr; } friend class SchedBundle; // For setSchedBundle(), clearSchedBundle(). diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h index 6b56f348f328c..c2bdb40ff96dd 100644 --- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h @@ -112,8 +112,11 @@ class SchedBundle { ContainerTy Nodes; /// Called by the DGNode destructor to avoid accessing freed memory. - void eraseFromBundle(DGNode *N) { Nodes.erase(find(Nodes, N)); } - friend DGNode::~DGNode(); // For eraseFromBundle(). + void eraseFromBundle(DGNode *N) { + Nodes.erase(std::remove(Nodes.begin(), Nodes.end(), N), Nodes.end()); + } + friend void DGNode::setSchedBundle(SchedBundle &); // For eraseFromBunde(). + friend DGNode::~DGNode(); // For eraseFromBundle(). public: SchedBundle() = default; @@ -130,6 +133,10 @@ class SchedBundle { N->clearSchedBundle(); } bool empty() const { return Nodes.empty(); } + /// Singleton bundles are created when scheduling instructions temporarily to + /// fill in the schedule until we schedule the vector bundle. These are + /// non-vector bundles containing just a single instruction. + bool isSingleton() const { return Nodes.size() == 1u; } DGNode *back() const { return Nodes.back(); } using iterator = ContainerTy::iterator; using const_iterator = ContainerTy::const_iterator; @@ -187,10 +194,12 @@ class Scheduler { /// The scheduling state of the instructions in the bundle. enum class BndlSchedState { NoneScheduled, ///> No instruction in the bundle was previously scheduled. - PartiallyOrDifferentlyScheduled, ///> Only some of the instrs in the bundle - /// were previously scheduled, or all of - /// them were but not in the same - /// SchedBundle. + AlreadyScheduled, ///> At least one instruction in the bundle belongs to a + /// different non-singleton scheduling bundle. + TemporarilyScheduled, ///> Instructions were temporarily scheduled as + /// singleton bundles or some of them were not + /// scheduled at all. None of them were in a vector + ///(non-singleton) bundle. FullyScheduled, ///> All instrs in the bundle were previously scheduled and /// were in the same SchedBundle. }; @@ -243,6 +252,11 @@ class Scheduler { class SchedulerInternalsAttorney { public: static DependencyGraph &getDAG(Scheduler &Sched) { return Sched.DAG; } + using BndlSchedState = Scheduler::BndlSchedState; + static BndlSchedState getBndlSchedState(const Scheduler &Sched, + ArrayRef Instrs) { + return Sched.getBndlSchedState(Instrs); + } }; } // namespace llvm::sandboxir diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp index 0f11423a84930..d87649c4e6567 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp @@ -525,7 +525,7 @@ void llvm::calculateDbgEntityHistory(const MachineFunction *MF, // Don't consider SP to be clobbered by register masks. for (auto It : RegVars) { unsigned int Reg = It.first; - if (Reg != SP && Register(Reg).isPhysical() && + if (Reg != SP && Register::isPhysicalRegister(Reg) && MO.clobbersPhysReg(Reg)) RegsToClobber.push_back(Reg); } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index cf3673058c8e7..ddf0275ddfe6a 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -564,7 +564,7 @@ DIE &DwarfCompileUnit::updateSubprogramScopeDIE(const DISubprogram *SP, TFI->getDwarfFrameBase(*Asm->MF); switch (FrameBase.Kind) { case TargetFrameLowering::DwarfFrameBase::Register: { - if (Register(FrameBase.Location.Reg).isPhysical()) { + if (Register::isPhysicalRegister(FrameBase.Location.Reg)) { MachineLocation Location(FrameBase.Location.Reg); addAddress(*SPDie, dwarf::DW_AT_frame_base, Location); } diff --git a/llvm/lib/CodeGen/EarlyIfConversion.cpp b/llvm/lib/CodeGen/EarlyIfConversion.cpp index 48d8319892637..caec0524e7ab6 100644 --- a/llvm/lib/CodeGen/EarlyIfConversion.cpp +++ b/llvm/lib/CodeGen/EarlyIfConversion.cpp @@ -522,8 +522,8 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB, bool Predicate) { if (PI.PHI->getOperand(i+1).getMBB() == FPred) PI.FReg = PI.PHI->getOperand(i).getReg(); } - assert(Register(PI.TReg).isVirtual() && "Bad PHI"); - assert(Register(PI.FReg).isVirtual() && "Bad PHI"); + assert(Register::isVirtualRegister(PI.TReg) && "Bad PHI"); + assert(Register::isVirtualRegister(PI.FReg) && "Bad PHI"); // Get target information. if (!TII->canInsertSelect(*Head, Cond, PI.PHI->getOperand(0).getReg(), diff --git a/llvm/lib/CodeGen/LiveInterval.cpp b/llvm/lib/CodeGen/LiveInterval.cpp index 404ffad01c229..0683353d9cdba 100644 --- a/llvm/lib/CodeGen/LiveInterval.cpp +++ b/llvm/lib/CodeGen/LiveInterval.cpp @@ -876,7 +876,7 @@ static void stripValuesNotDefiningMask(unsigned Reg, LiveInterval::SubRange &SR, unsigned ComposeSubRegIdx) { // Phys reg should not be tracked at subreg level. // Same for noreg (Reg == 0). - if (!Register(Reg).isVirtual() || !Reg) + if (!Register::isVirtualRegister(Reg) || !Reg) return; // Remove the values that don't define those lanes. SmallVector ToBeRemoved; diff --git a/llvm/lib/CodeGen/LiveRangeCalc.cpp b/llvm/lib/CodeGen/LiveRangeCalc.cpp index a7c8c3fc8a25a..1a9bc694ed0fd 100644 --- a/llvm/lib/CodeGen/LiveRangeCalc.cpp +++ b/llvm/lib/CodeGen/LiveRangeCalc.cpp @@ -216,7 +216,7 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB, report_fatal_error("Use not jointly dominated by defs."); } - if (Register(PhysReg).isPhysical()) { + if (Register::isPhysicalRegister(PhysReg)) { const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo(); bool IsLiveIn = MBB->isLiveIn(PhysReg); for (MCRegAliasIterator Alias(PhysReg, TRI, false); !IsLiveIn && Alias.isValid(); ++Alias) diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp index 7600a2f08dc4f..49c8a0e466337 100644 --- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp +++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp @@ -137,7 +137,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) { } unsigned VRegRenamer::createVirtualRegister(unsigned VReg) { - assert(Register(VReg).isVirtual() && "Expected Virtual Registers"); + assert(Register::isVirtualRegister(VReg) && "Expected Virtual Registers"); std::string Name = getInstructionOpcodeHash(*MRI.getVRegDef(VReg)); return createVirtualRegisterWithLowerName(VReg, Name); } diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index 1cc1b2cbd81b9..0da7535031a7d 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -3966,7 +3966,8 @@ void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) { // Find already scheduled copies with a single physreg dependence and move // them just above the scheduled instruction. for (SDep &Dep : Deps) { - if (Dep.getKind() != SDep::Data || !Register(Dep.getReg()).isPhysical()) + if (Dep.getKind() != SDep::Data || + !Register::isPhysicalRegister(Dep.getReg())) continue; SUnit *DepSU = Dep.getSUnit(); if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) diff --git a/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/llvm/lib/CodeGen/MachineTraceMetrics.cpp index a86ad0f6c46dc..021c1a058c020 100644 --- a/llvm/lib/CodeGen/MachineTraceMetrics.cpp +++ b/llvm/lib/CodeGen/MachineTraceMetrics.cpp @@ -682,7 +682,7 @@ struct DataDep { /// Create a DataDep from an SSA form virtual register. DataDep(const MachineRegisterInfo *MRI, unsigned VirtReg, unsigned UseOp) : UseOp(UseOp) { - assert(Register(VirtReg).isVirtual()); + assert(Register::isVirtualRegister(VirtReg)); MachineOperand *DefMO = MRI->getOneDef(VirtReg); assert(DefMO && "Register does not have unique def"); DefMI = DefMO->getParent(); diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp index 2809056bfeba2..14128dafbe4ee 100644 --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -708,7 +708,7 @@ void RegAllocFastImpl::reloadAtBegin(MachineBasicBlock &MBB) { /// not used by a virtreg. Kill the physreg, marking it free. This may add /// implicit kills to MO->getParent() and invalidate MO. bool RegAllocFastImpl::usePhysReg(MachineInstr &MI, MCPhysReg Reg) { - assert(Register(Reg).isPhysical() && "expected physreg"); + assert(Register::isPhysicalRegister(Reg) && "expected physreg"); bool displacedAny = displacePhysReg(MI, Reg); setPhysRegState(Reg, regPreAssigned); markRegUsedInInstr(Reg); @@ -1289,7 +1289,7 @@ void RegAllocFastImpl::dumpState() const { assert(VirtReg.isVirtual() && "Bad map key"); MCPhysReg PhysReg = LR.PhysReg; if (PhysReg != 0) { - assert(Register(PhysReg).isPhysical() && "mapped to physreg"); + assert(Register::isPhysicalRegister(PhysReg) && "mapped to physreg"); for (MCRegUnit Unit : TRI->regunits(PhysReg)) { assert(RegUnitStates[Unit] == VirtReg && "inverse map valid"); } diff --git a/llvm/lib/CodeGen/RegisterPressure.cpp b/llvm/lib/CodeGen/RegisterPressure.cpp index 5a4c3a0efef2a..ca51b670b46cc 100644 --- a/llvm/lib/CodeGen/RegisterPressure.cpp +++ b/llvm/lib/CodeGen/RegisterPressure.cpp @@ -231,7 +231,7 @@ void LiveRegSet::clear() { } static const LiveRange *getLiveRange(const LiveIntervals &LIS, unsigned Reg) { - if (Register(Reg).isVirtual()) + if (Register::isVirtualRegister(Reg)) return &LIS.getInterval(Reg); return LIS.getCachedRegUnit(Reg); } diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 91571ed204317..5a314570c776a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -2229,7 +2229,8 @@ Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx) { Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); - assert(Register(Op0).isVirtual() && "Cannot yet extract from physregs"); + assert(Register::isVirtualRegister(Op0) && + "Cannot yet extract from physregs"); const TargetRegisterClass *RC = MRI.getRegClass(Op0); MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp index 288b9d9553b1d..fd4641ec6f124 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp @@ -501,8 +501,8 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU, F.isClobberKind()) { // Check for def of register or earlyclobber register. for (; NumVals; --NumVals, ++i) { - Register Reg = cast(Node->getOperand(i))->getReg(); - if (Reg.isPhysical()) + unsigned Reg = cast(Node->getOperand(i))->getReg(); + if (Register::isPhysicalRegister(Reg)) CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI); } } else diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 4125d223dc325..d04bd6e98097e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -116,11 +116,11 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, if (Op != 2 || User->getOpcode() != ISD::CopyToReg) return; - Register Reg = cast(User->getOperand(1))->getReg(); + unsigned Reg = cast(User->getOperand(1))->getReg(); if (TLI.checkForPhysRegDependency(Def, User, Op, TRI, TII, PhysReg, Cost)) return; - if (Reg.isVirtual()) + if (Register::isVirtualRegister(Reg)) return; unsigned ResNo = User->getOperand(2).getResNo(); @@ -664,8 +664,8 @@ void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use, TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx); if (Latency > 1U && Use->getOpcode() == ISD::CopyToReg && !BB->succ_empty()) { - Register Reg = cast(Use->getOperand(1))->getReg(); - if (Reg.isVirtual()) + unsigned Reg = cast(Use->getOperand(1))->getReg(); + if (Register::isVirtualRegister(Reg)) // This copy is a liveout value. It is likely coalesced, so reduce the // latency so not to penalize the def. // FIXME: need target specific adjustment here? diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index a76498fcab8f2..1c58a7f05446c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -908,7 +908,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, // If the source register was virtual and if we know something about it, // add an assert node. - if (!Regs[Part + i].isVirtual() || !RegisterVT.isInteger()) + if (!Register::isVirtualRegister(Regs[Part + i]) || + !RegisterVT.isInteger()) continue; const FunctionLoweringInfo::LiveOutInfo *LOI = @@ -1022,7 +1023,7 @@ void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, InlineAsm::Flag Flag(Code, Regs.size()); if (HasMatching) Flag.setMatchingOp(MatchingIdx); - else if (!Regs.empty() && Regs.front().isVirtual()) { + else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) { // Put the register class of the virtual registers in the flag word. That // way, later passes can recompute register class constraints for inline // assembly as well as normal instructions. @@ -10125,8 +10126,9 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, auto DetectWriteToReservedRegister = [&]() { const MachineFunction &MF = DAG.getMachineFunction(); const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); - for (Register Reg : OpInfo.AssignedRegs.Regs) { - if (Reg.isPhysical() && TRI.isInlineAsmReadOnlyReg(MF, Reg)) { + for (unsigned Reg : OpInfo.AssignedRegs.Regs) { + if (Register::isPhysicalRegister(Reg) && + TRI.isInlineAsmReadOnlyReg(MF, Reg)) { const char *RegName = TRI.getName(Reg); emitInlineAsmError(Call, "write to reserved register '" + Twine(RegName) + "'"); @@ -11388,7 +11390,7 @@ void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, assert((Op.getOpcode() != ISD::CopyFromReg || cast(Op.getOperand(1))->getReg() != Reg) && "Copy from a reg to the same reg!"); - assert(!Register(Reg).isPhysical() && "Is a physreg"); + assert(!Register::isPhysicalRegister(Reg) && "Is a physreg"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If this is an InlineAsm we have to match the registers required, not the diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index cb55a00b9e03b..3fe8d5dbc4b67 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -160,7 +160,7 @@ Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) { Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) { return Printable([Unit, TRI](raw_ostream &OS) { - if (Register(Unit).isVirtual()) { + if (Register::isVirtualRegister(Unit)) { OS << '%' << Register(Unit).virtRegIndex(); } else { OS << printRegUnit(Unit, TRI); diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 5bb2e7d0abdd9..3a078985c33e4 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -1332,9 +1332,9 @@ Expected> parseInternalizeGVs(StringRef Params) { return Expected>(std::move(PreservedGVs)); } -Expected +Expected parseRegAllocFastPassOptions(PassBuilder &PB, StringRef Params) { - RegAllocFastPassOptions Opts; + RegAllocFastPass::Options Opts; while (!Params.empty()) { StringRef ParamName; std::tie(ParamName, Params) = Params.split(';'); diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp index 8de4489de8f28..9e31243cd696c 100644 --- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp +++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp @@ -105,14 +105,14 @@ static bool isGPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI) { if (SubReg) return false; - if (Register(Reg).isVirtual()) + if (Register::isVirtualRegister(Reg)) return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass); return AArch64::GPR64RegClass.contains(Reg); } static bool isFPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI) { - if (Register(Reg).isVirtual()) + if (Register::isVirtualRegister(Reg)) return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) && SubReg == 0) || (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) && diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp index 9d7c4448e4cf8..0301032e84977 100644 --- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -258,7 +258,7 @@ bool SSACCmpConv::isDeadDef(unsigned DstReg) { // Writes to the zero register are dead. if (DstReg == AArch64::WZR || DstReg == AArch64::XZR) return true; - if (!Register(DstReg).isVirtual()) + if (!Register::isVirtualRegister(DstReg)) return false; // A virtual register def without any uses will be marked dead later, and // eventually replaced by the zero register. diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 41e15ab1e5942..efa03d4a1035b 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -677,7 +677,7 @@ unsigned AArch64InstrInfo::insertBranch( // Find the original register that VReg is copied from. static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) { - while (Register(VReg).isVirtual()) { + while (Register::isVirtualRegister(VReg)) { const MachineInstr *DefMI = MRI.getVRegDef(VReg); if (!DefMI->isFullCopy()) return VReg; @@ -692,7 +692,7 @@ static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) { static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg, unsigned *NewVReg = nullptr) { VReg = removeCopies(MRI, VReg); - if (!Register(VReg).isVirtual()) + if (!Register::isVirtualRegister(VReg)) return 0; bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg)); @@ -6121,9 +6121,9 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( Register SrcReg = SrcMO.getReg(); // This is slightly expensive to compute for physical regs since // getMinimalPhysRegClass is slow. - auto getRegClass = [&](Register Reg) { - return Reg.isVirtual() ? MRI.getRegClass(Reg) - : TRI.getMinimalPhysRegClass(Reg); + auto getRegClass = [&](unsigned Reg) { + return Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg) + : TRI.getMinimalPhysRegClass(Reg); }; if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) { @@ -7456,7 +7456,7 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI, MRI.constrainRegClass(SrcReg0, RC); if (SrcReg1.isVirtual()) MRI.constrainRegClass(SrcReg1, RC); - if (Register(VR).isVirtual()) + if (Register::isVirtualRegister(VR)) MRI.constrainRegClass(VR, RC); MachineInstrBuilder MIB = diff --git a/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp index c636719d86ca0..174438c1863dd 100644 --- a/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp +++ b/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp @@ -155,11 +155,11 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd, LiveIntervals &LIs = G.getMetadata().LIS; - if (Register(Rd).isPhysical() || Register(Ra).isPhysical()) { - LLVM_DEBUG(dbgs() << "Rd is a physical reg:" << Register(Rd).isPhysical() - << '\n'); - LLVM_DEBUG(dbgs() << "Ra is a physical reg:" << Register(Ra).isPhysical() - << '\n'); + if (Register::isPhysicalRegister(Rd) || Register::isPhysicalRegister(Ra)) { + LLVM_DEBUG(dbgs() << "Rd is a physical reg:" + << Register::isPhysicalRegister(Rd) << '\n'); + LLVM_DEBUG(dbgs() << "Ra is a physical reg:" + << Register::isPhysicalRegister(Ra) << '\n'); return false; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 909ad07782fc6..6ed09253c51e1 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -869,8 +869,13 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, if (Subtarget->hasMinimum3Maximum3F32()) setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, MVT::f32, Legal); - if (Subtarget->hasMinimum3Maximum3PKF16()) + if (Subtarget->hasMinimum3Maximum3PKF16()) { setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, MVT::v2f16, Legal); + + // If only the vector form is available, we need to widen to a vector. + if (!Subtarget->hasMinimum3Maximum3F16()) + setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, MVT::f16, Custom); + } } setOperationAction(ISD::INTRINSIC_WO_CHAIN, @@ -5964,6 +5969,9 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::FMINNUM: case ISD::FMAXNUM: return lowerFMINNUM_FMAXNUM(Op, DAG); + case ISD::FMINIMUM: + case ISD::FMAXIMUM: + return lowerFMINIMUM_FMAXIMUM(Op, DAG); case ISD::FLDEXP: case ISD::STRICT_FLDEXP: return lowerFLDEXP(Op, DAG); @@ -5985,8 +5993,6 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::FMUL: case ISD::FMINNUM_IEEE: case ISD::FMAXNUM_IEEE: - case ISD::FMINIMUM: - case ISD::FMAXIMUM: case ISD::FMINIMUMNUM: case ISD::FMAXIMUMNUM: case ISD::UADDSAT: @@ -6841,6 +6847,34 @@ SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, return Op; } +SDValue SITargetLowering::lowerFMINIMUM_FMAXIMUM(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + if (VT.isVector()) + return splitBinaryVectorOp(Op, DAG); + + assert(!Subtarget->hasIEEEMinMax() && !Subtarget->hasMinimum3Maximum3F16() && + Subtarget->hasMinimum3Maximum3PKF16() && VT == MVT::f16 && + "should not need to widen f16 minimum/maximum to v2f16"); + + // Widen f16 operation to v2f16 + + // fminimum f16:x, f16:y -> + // extract_vector_elt (fminimum (v2f16 (scalar_to_vector x)) + // (v2f16 (scalar_to_vector y))), 0 + SDLoc SL(Op); + SDValue WideSrc0 = + DAG.getNode(ISD::SCALAR_TO_VECTOR, SL, MVT::v2f16, Op.getOperand(0)); + SDValue WideSrc1 = + DAG.getNode(ISD::SCALAR_TO_VECTOR, SL, MVT::v2f16, Op.getOperand(1)); + + SDValue Widened = + DAG.getNode(Op.getOpcode(), SL, MVT::v2f16, WideSrc0, WideSrc1); + + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::f16, Widened, + DAG.getConstant(0, SL, MVT::i32)); +} + SDValue SITargetLowering::lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const { bool IsStrict = Op.getOpcode() == ISD::STRICT_FLDEXP; EVT VT = Op.getValueType(); @@ -13481,7 +13515,8 @@ static bool supportsMin3Max3(const GCNSubtarget &Subtarget, unsigned Opc, case ISD::FMINIMUM: case ISD::FMAXIMUM: return (VT == MVT::f32 && Subtarget.hasMinimum3Maximum3F32()) || - (VT == MVT::f16 && Subtarget.hasMinimum3Maximum3F16()); + (VT == MVT::f16 && Subtarget.hasMinimum3Maximum3F16()) || + (VT == MVT::v2f16 && Subtarget.hasMinimum3Maximum3PKF16()); case ISD::SMAX: case ISD::SMIN: case ISD::UMAX: diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index 1cd7f1b29e077..9b2c14862407a 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -146,6 +146,7 @@ class SITargetLowering final : public AMDGPUTargetLowering { /// Custom lowering for ISD::FP_ROUND for MVT::f16. SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerFMINIMUM_FMAXIMUM(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const; SDValue promoteUniformOpToI32(SDValue Op, DAGCombinerInfo &DCI) const; SDValue lowerMUL(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index e30e257da6873..403c657c64053 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -1803,19 +1803,16 @@ class getVOP3SrcForVT { 1 : VSrc_b32); } -// Returns the vreg register class to use for sources of VOP3 instructions for the -// given VT. -class getVOP3VRegSrcForVT { - RegisterOperand ret = - !cond(!eq(VT.Size, 128) : RegisterOperand, - !eq(VT.Size, 96) : RegisterOperand, - !eq(VT.Size, 64) : RegisterOperand, - !eq(VT.Size, 48) : RegisterOperand, - !eq(VT.Size, 16) : !if(IsTrue16, - !if(IsFake16, RegisterOperand, - RegisterOperand), - RegisterOperand), - 1 : RegisterOperand); +// VGPR only VOP3 src with 9 bit encoding +class getVOP3VRegSrcForVT { + RegisterOperand ret = !cond(!eq(VT.Size, 1024) : VRegSrc_1024, + !eq(VT.Size, 512) : VRegSrc_512, + !eq(VT.Size, 256) : VRegSrc_256, + !eq(VT.Size, 192) : VRegSrc_192, + !eq(VT.Size, 128) : VRegSrc_128, + !eq(VT.Size, 96) : VRegSrc_96, + !eq(VT.Size, 64) : VRegSrc_64, + 1 : VRegSrc_32); } // Src2 of VOP3 DPP instructions cannot be a literal @@ -2859,6 +2856,7 @@ def VOP_V2I16_F32_F32_F32 : VOPProfile<[v2i16, f32, f32, f32]>; def VOP_V2I16_V2F16_F32 : VOPProfile<[v2i16, v2f16, f32, untyped]>; def VOP_V2I16_V2BF16_F32 : VOPProfile<[v2i16, v2bf16, f32, untyped]>; def VOP_I32_F32_F32_F32 : VOPProfile<[i32, f32, f32, f32]>; +def VOP_I32_V2F32_I32_F32 : VOPProfile<[i32, v2f32, i32, f32]>; def VOP_I32_V2F16_F32_F32 : VOPProfile<[i32, v2f16, f32, f32]>; def VOP_I32_V2BF16_F32_F32: VOPProfile<[i32, v2bf16, f32, f32]>; def VOP_BF16_F32_I32 : VOPProfile<[bf16, f32, i32, untyped]>; diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td index 900c91731aa1b..15f2dd47c7f99 100644 --- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -418,12 +418,27 @@ def VOP_MADMK_F16_fake16 : VOP_MADMK { } def VOP_MADMK_F32 : VOP_MADMK ; +// Returns the vreg register class to use for sources of VOP3 instructions for the +// given VT. +class getVOP3VRegForVT { + RegisterOperand ret = + !cond(!eq(VT.Size, 128) : RegisterOperand, + !eq(VT.Size, 96) : RegisterOperand, + !eq(VT.Size, 64) : RegisterOperand, + !eq(VT.Size, 48) : RegisterOperand, + !eq(VT.Size, 16) : !if(IsTrue16, + !if(IsFake16, RegisterOperand, + RegisterOperand), + RegisterOperand), + 1 : RegisterOperand); +} + // FIXME: Remove src2_modifiers. It isn't used, so is wasting memory // and processing time but it makes it easier to convert to mad. class VOP_MAC : VOPProfile <[vt0, vt1, vt1, vt0]> { let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, getVregSrcForVT.ret:$src2); // Src2 must accept the same operand types as vdst, namely VGPRs only - let Src2RC64 = getVOP3VRegSrcForVT.ret; + let Src2RC64 = getVOP3VRegForVT.ret; let Ins64 = getIns64.ret; diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index afafc2ecccfaf..1447804871809 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -1052,7 +1052,11 @@ class VOP3_CVT_SCALE_SR_PK_F4_F16BF16_TiedInput_Profile : let HasFP4DstByteSel = 1; } -def VOP3_CVT_SCALE_SR_PK_F4_F32_TiedInput_Profile : VOP3_Profile, VOP3_OPSEL> { +class VOP3_CVT_SCALE_SR_PK_F4_F32_TiedInput_Profile + : VOP3_Profile { + + let Src0RC64 = !if(!gt(P.Src0VT.Size, 32), getVOP3VRegSrcForVT.ret, + getVOP3SrcForVT.ret); let InsVOP3OpSel = (ins PackedF32InputMods: $src0_modifiers, Src0RC64:$src0, Int32InputMods: $src1_modifiers, Src1RC64:$src1, FP32InputMods: $src2_modifiers, Src2RC64:$src2, @@ -1100,6 +1104,11 @@ class VOP3_CVT_SCALEF32_PK_F864_Profile : VOP3_Profile

{ let HasExt32BitDPP = 0; let HasExtVOP3DPP = 0; let HasExt64BitDPP = 0; + + // All convert opcodes operating on FP6/BF6/FP4 data must use VGPR sources for + // any operand slots > 32 bit. + let Src0RC64 = !if(!gt(P.Src0VT.Size, 32), getVOP3VRegSrcForVT.ret, + getVOP3SrcForVT.ret); } let SubtargetPredicate = HasFP8ConversionScaleInsts, mayRaiseFPException = 0 in { @@ -1141,7 +1150,10 @@ let SubtargetPredicate = HasFP4ConversionScaleInsts, mayRaiseFPException = 0 in let Constraints = "@earlyclobber $vdst" in { defm V_CVT_SCALEF32_SR_PK_FP4_F16: VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_f16", VOP3_CVT_SCALE_SR_PK_F4_F16BF16_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_PK_FP4_BF16: VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_bf16", VOP3_CVT_SCALE_SR_PK_F4_F16BF16_TiedInput_Profile>; - defm V_CVT_SCALEF32_SR_PK_FP4_F32: VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_f32", VOP3_CVT_SCALE_SR_PK_F4_F32_TiedInput_Profile>; + defm V_CVT_SCALEF32_SR_PK_FP4_F32 + : VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_f32", + VOP3_CVT_SCALE_SR_PK_F4_F32_TiedInput_Profile< + VOP_I32_V2F32_I32_F32>>; } } defm V_CVT_SCALEF32_PK_F16_FP4 : VOP3Inst<"v_cvt_scalef32_pk_f16_fp4", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index d5c6e8af109f4..85c047167f1e1 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -145,8 +145,8 @@ def : VOP3PSatPat; } // End SubtargetPredicate = HasVOP3PInsts let SubtargetPredicate = HasMinimum3Maximum3PKF16, FPDPRounding = 1 in { -defm V_PK_MINIMUM3_F16 : VOP3PInst<"v_pk_minimum3_f16", VOP3P_Profile>; -defm V_PK_MAXIMUM3_F16 : VOP3PInst<"v_pk_maximum3_f16", VOP3P_Profile>; +defm V_PK_MINIMUM3_F16 : VOP3PInst<"v_pk_minimum3_f16", VOP3P_Profile, AMDGPUfminimum3>; +defm V_PK_MAXIMUM3_F16 : VOP3PInst<"v_pk_maximum3_f16", VOP3P_Profile, AMDGPUfmaximum3>; } // TODO: Make sure we're doing the right thing with denormals. Note diff --git a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp index f2b5ce6de4a60..a2af5062c65a0 100644 --- a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp +++ b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp @@ -151,7 +151,7 @@ static bool dominatesAllUsesOf(const MachineInstr *MI, unsigned VReg, MachineDominatorTree *MDT, MachineRegisterInfo *MRI) { - assert(Register(VReg).isVirtual() && "Expected virtual register!"); + assert(Register::isVirtualRegister(VReg) && "Expected virtual register!"); for (const MachineOperand &Use : MRI->use_nodbg_operands(VReg)) { const MachineInstr *User = Use.getParent(); @@ -216,7 +216,7 @@ MachineInstr *ARCOptAddrMode::tryToCombine(MachineInstr &Ldst) { } Register B = Base.getReg(); - if (!B.isVirtual()) + if (!Register::isVirtualRegister(B)) { LLVM_DEBUG(dbgs() << "[ABAW] Base is not VReg\n"); return nullptr; } diff --git a/llvm/lib/Target/ARM/A15SDOptimizer.cpp b/llvm/lib/Target/ARM/A15SDOptimizer.cpp index 452159406085d..bb9a0a2bdf98b 100644 --- a/llvm/lib/Target/ARM/A15SDOptimizer.cpp +++ b/llvm/lib/Target/ARM/A15SDOptimizer.cpp @@ -152,7 +152,7 @@ unsigned A15SDOptimizer::getDPRLaneFromSPR(unsigned SReg) { // Get the subreg type that is most likely to be coalesced // for an SPR register that will be used in VDUP32d pseudo. unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) { - if (!Register(SReg).isVirtual()) + if (!Register::isVirtualRegister(SReg)) return getDPRLaneFromSPR(SReg); MachineInstr *MI = MRI->getVRegDef(SReg); @@ -166,7 +166,7 @@ unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) { SReg = MI->getOperand(1).getReg(); } - if (Register(SReg).isVirtual()) { + if (Register::isVirtualRegister(SReg)) { if (MO->getSubReg() == ARM::ssub_1) return ARM::ssub_1; return ARM::ssub_0; } @@ -598,7 +598,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) { // we can end up with multiple defs of this DPR. SmallVector DefSrcs; - if (!Register(I).isVirtual()) + if (!Register::isVirtualRegister(I)) continue; MachineInstr *Def = MRI->getVRegDef(I); if (!Def) diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index 9a021925a6bd1..839b7e81f8998 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1108,7 +1108,7 @@ ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, if (!SubIdx) return MIB.addReg(Reg, State); - if (Register(Reg).isPhysical()) + if (Register::isPhysicalRegister(Reg)) return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); return MIB.addReg(Reg, State, SubIdx); } diff --git a/llvm/lib/Target/ARM/ARMLatencyMutations.cpp b/llvm/lib/Target/ARM/ARMLatencyMutations.cpp index 601b3fa19978d..85bad4f1925a4 100644 --- a/llvm/lib/Target/ARM/ARMLatencyMutations.cpp +++ b/llvm/lib/Target/ARM/ARMLatencyMutations.cpp @@ -756,7 +756,7 @@ signed M85Overrides::modifyMixedWidthFP(const MachineInstr *SrcMI, !II->producesQP(SrcMI->getOpcode())) return 0; - if (Register(RegID).isVirtual()) { + if (Register::isVirtualRegister(RegID)) { if (II->producesSP(SrcMI->getOpcode()) && II->consumesDP(DstMI->getOpcode())) { for (auto &OP : SrcMI->operands()) @@ -802,7 +802,7 @@ signed M85Overrides::modifyMixedWidthFP(const MachineInstr *SrcMI, OP.getSubReg() == ARM::ssub_1) return 1; } - } else if (Register(RegID).isPhysical()) { + } else if (Register::isPhysicalRegister(RegID)) { // Note that when the producer is narrower, not all of the producers // may be present in the scheduling graph; somewhere earlier in the // compiler, an implicit def/use of the aliased full register gets diff --git a/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp b/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp index 1f6b6163dd3b6..a8927d834630e 100644 --- a/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp +++ b/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp @@ -253,15 +253,15 @@ bool AVRDAGToDAGISel::SelectInlineAsmMemoryOperand( SDValue ImmOp = Op->getOperand(1); ConstantSDNode *ImmNode = dyn_cast(ImmOp); - Register Reg; + unsigned Reg; bool CanHandleRegImmOpt = ImmNode && ImmNode->getAPIntValue().ult(64); if (CopyFromRegOp->getOpcode() == ISD::CopyFromReg) { RegisterSDNode *RegNode = cast(CopyFromRegOp->getOperand(1)); Reg = RegNode->getReg(); - CanHandleRegImmOpt &= - (Reg.isVirtual() || AVR::PTRDISPREGSRegClass.contains(Reg)); + CanHandleRegImmOpt &= (Register::isVirtualRegister(Reg) || + AVR::PTRDISPREGSRegClass.contains(Reg)); } else { CanHandleRegImmOpt = false; } diff --git a/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp b/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp index 1a0cdd811762f..59c882bf37afa 100644 --- a/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp @@ -139,7 +139,8 @@ void HexagonCopyHoisting::addMItoCopyList(MachineInstr *MI) { Register DstReg = MI->getOperand(0).getReg(); Register SrcReg = MI->getOperand(1).getReg(); - if (!DstReg.isVirtual() || !SrcReg.isVirtual() || + if (!Register::isVirtualRegister(DstReg) || + !Register::isVirtualRegister(SrcReg) || MRI->getRegClass(DstReg) != &Hexagon::IntRegsRegClass || MRI->getRegClass(SrcReg) != &Hexagon::IntRegsRegClass) return; diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp index df182613d1661..3b157006d9224 100644 --- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -223,8 +223,8 @@ static bool areCombinableOperations(const TargetRegisterInfo *TRI, return true; } -static bool isEvenReg(Register Reg) { - assert(Reg.isPhysical()); +static bool isEvenReg(unsigned Reg) { + assert(Register::isPhysicalRegister(Reg)); if (Hexagon::IntRegsRegClass.contains(Reg)) return (Reg - Hexagon::R0) % 2 == 0; if (Hexagon::HvxVRRegClass.contains(Reg)) @@ -546,7 +546,7 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr &I1, // is even. bool IsI1LowReg = (I2DestReg - I1DestReg) == 1; bool IsI2LowReg = (I1DestReg - I2DestReg) == 1; - Register FirstRegIndex = IsI1LowReg ? I1DestReg : I2DestReg; + unsigned FirstRegIndex = IsI1LowReg ? I1DestReg : I2DestReg; if ((!IsI1LowReg && !IsI2LowReg) || !isEvenReg(FirstRegIndex)) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp index 3bb7175bbf8b9..ee01ebc4daa26 100644 --- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -275,7 +275,7 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII, return false; } - Register cmpReg1, cmpOp2; + unsigned cmpReg1, cmpOp2 = 0; // cmpOp2 assignment silences compiler warning. cmpReg1 = MI.getOperand(1).getReg(); if (secondReg) { @@ -290,7 +290,7 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII, // at machine code level, we don't need this, but if we decide // to move new value jump prior to RA, we would be needing this. MachineRegisterInfo &MRI = MF.getRegInfo(); - if (!cmpOp2.isPhysical()) { + if (!Register::isPhysicalRegister(cmpOp2)) { MachineInstr *def = MRI.getVRegDef(cmpOp2); if (def->getOpcode() == TargetOpcode::COPY) return false; @@ -480,7 +480,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { bool foundJump = false; bool foundCompare = false; bool invertPredicate = false; - Register predReg; // predicate reg of the jump. + unsigned predReg = 0; // predicate reg of the jump. unsigned cmpReg1 = 0; int cmpOp2 = 0; MachineBasicBlock::iterator jmpPos; @@ -516,7 +516,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { jmpPos = MII; jmpInstr = &MI; predReg = MI.getOperand(0).getReg(); - afterRA = predReg.isPhysical(); + afterRA = Register::isPhysicalRegister(predReg); // If ifconverter had not messed up with the kill flags of the // operands, the following check on the kill flag would suffice. diff --git a/llvm/lib/Target/Hexagon/RDFCopy.cpp b/llvm/lib/Target/Hexagon/RDFCopy.cpp index 76177901f658a..fafdad08909dd 100644 --- a/llvm/lib/Target/Hexagon/RDFCopy.cpp +++ b/llvm/lib/Target/Hexagon/RDFCopy.cpp @@ -44,8 +44,8 @@ bool CopyPropagation::interpretAsCopy(const MachineInstr *MI, EqualityMap &EM) { const MachineOperand &Src = MI->getOperand(1); RegisterRef DstR = DFG.makeRegRef(Dst.getReg(), Dst.getSubReg()); RegisterRef SrcR = DFG.makeRegRef(Src.getReg(), Src.getSubReg()); - assert(Register(DstR.Reg).isPhysical()); - assert(Register(SrcR.Reg).isPhysical()); + assert(Register::isPhysicalRegister(DstR.Reg)); + assert(Register::isPhysicalRegister(SrcR.Reg)); const TargetRegisterInfo &TRI = DFG.getTRI(); if (TRI.getMinimalPhysRegClass(DstR.Reg) != TRI.getMinimalPhysRegClass(SrcR.Reg)) diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp index 5a678bfcf410c..39b307b28889c 100644 --- a/llvm/lib/Target/M68k/M68kISelLowering.cpp +++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -322,7 +322,7 @@ static bool MatchingStackOffset(SDValue Arg, unsigned Offset, int FI = INT_MAX; if (Arg.getOpcode() == ISD::CopyFromReg) { Register VR = cast(Arg.getOperand(1))->getReg(); - if (!VR.isVirtual()) + if (!Register::isVirtualRegister(VR)) return false; MachineInstr *Def = MRI->getVRegDef(VR); if (!Def) diff --git a/llvm/lib/Target/M68k/M68kRegisterInfo.cpp b/llvm/lib/Target/M68k/M68kRegisterInfo.cpp index 5375d4484a7ab..62fb72ba4fd5e 100644 --- a/llvm/lib/Target/M68k/M68kRegisterInfo.cpp +++ b/llvm/lib/Target/M68k/M68kRegisterInfo.cpp @@ -83,7 +83,8 @@ M68kRegisterInfo::getMatchingMegaReg(unsigned Reg, const TargetRegisterClass * M68kRegisterInfo::getMaximalPhysRegClass(unsigned reg, MVT VT) const { - assert(Register(reg).isPhysical() && "reg must be a physical register"); + assert(Register::isPhysicalRegister(reg) && + "reg must be a physical register"); // Pick the most sub register class of the right type that contains // this physreg. diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 51cd2b999ff9e..6e5dd6b15900c 100644 --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -205,7 +205,7 @@ MCOperand NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO) { } unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) { - if (Register(Reg).isVirtual()) { + if (Register::isVirtualRegister(Reg)) { const TargetRegisterClass *RC = MRI->getRegClass(Reg); DenseMap &RegMap = VRegMapping[RC]; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index e96c1758676a1..6621aa06ac268 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -13,8 +13,10 @@ #include "NVPTXISelDAGToDAG.h" #include "NVPTX.h" #include "NVPTXUtilities.h" +#include "llvm/ADT/APInt.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/ISDOpcodes.h" +#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Instructions.h" @@ -964,7 +966,6 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) { // Create the machine instruction DAG SDValue N1 = N->getOperand(1); - SDValue Addr; SDValue Offset, Base; std::optional Opcode; MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy; @@ -974,49 +975,27 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) { getI32Imm(VecType, DL), getI32Imm(FromType, DL), getI32Imm(FromTypeWidth, DL)}); - if (SelectDirectAddr(N1, Addr)) { - Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_avar, NVPTX::LD_i16_avar, - NVPTX::LD_i32_avar, NVPTX::LD_i64_avar, - NVPTX::LD_f32_avar, NVPTX::LD_f64_avar); - if (!Opcode) - return false; - Ops.append({Addr, Chain}); - } else if (PointerSize == 64 ? SelectADDRsi64(N1.getNode(), N1, Base, Offset) - : SelectADDRsi(N1.getNode(), N1, Base, Offset)) { + if (SelectADDRsi(N1.getNode(), N1, Base, Offset)) { Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_asi, NVPTX::LD_i16_asi, NVPTX::LD_i32_asi, NVPTX::LD_i64_asi, NVPTX::LD_f32_asi, NVPTX::LD_f64_asi); - if (!Opcode) - return false; - Ops.append({Base, Offset, Chain}); - } else if (PointerSize == 64 ? SelectADDRri64(N1.getNode(), N1, Base, Offset) - : SelectADDRri(N1.getNode(), N1, Base, Offset)) { - if (PointerSize == 64) + } else { + if (PointerSize == 64) { + SelectADDRri64(N1.getNode(), N1, Base, Offset); Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_ari_64, NVPTX::LD_i16_ari_64, NVPTX::LD_i32_ari_64, NVPTX::LD_i64_ari_64, NVPTX::LD_f32_ari_64, NVPTX::LD_f64_ari_64); - else + } else { + SelectADDRri(N1.getNode(), N1, Base, Offset); Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_ari, NVPTX::LD_i16_ari, NVPTX::LD_i32_ari, NVPTX::LD_i64_ari, NVPTX::LD_f32_ari, NVPTX::LD_f64_ari); - if (!Opcode) - return false; - Ops.append({Base, Offset, Chain}); - } else { - if (PointerSize == 64) - Opcode = - pickOpcodeForVT(TargetVT, NVPTX::LD_i8_areg_64, NVPTX::LD_i16_areg_64, - NVPTX::LD_i32_areg_64, NVPTX::LD_i64_areg_64, - NVPTX::LD_f32_areg_64, NVPTX::LD_f64_areg_64); - else - Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_areg, NVPTX::LD_i16_areg, - NVPTX::LD_i32_areg, NVPTX::LD_i64_areg, - NVPTX::LD_f32_areg, NVPTX::LD_f64_areg); - if (!Opcode) - return false; - Ops.append({N1, Chain}); + } } + if (!Opcode) + return false; + Ops.append({Base, Offset, Chain}); SDNode *NVPTXLD = CurDAG->getMachineNode(*Opcode, DL, TargetVT, MVT::Other, Ops); @@ -1102,7 +1081,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { } SDValue Op1 = N->getOperand(1); - SDValue Addr, Offset, Base; + SDValue Offset, Base; std::optional Opcode; SDNode *LD; @@ -1111,29 +1090,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { getI32Imm(VecType, DL), getI32Imm(FromType, DL), getI32Imm(FromTypeWidth, DL)}); - if (SelectDirectAddr(Op1, Addr)) { - switch (N->getOpcode()) { - default: - return false; - case NVPTXISD::LoadV2: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::LDV_i8_v2_avar, NVPTX::LDV_i16_v2_avar, - NVPTX::LDV_i32_v2_avar, NVPTX::LDV_i64_v2_avar, - NVPTX::LDV_f32_v2_avar, NVPTX::LDV_f64_v2_avar); - break; - case NVPTXISD::LoadV4: - Opcode = - pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, NVPTX::LDV_i8_v4_avar, - NVPTX::LDV_i16_v4_avar, NVPTX::LDV_i32_v4_avar, - std::nullopt, NVPTX::LDV_f32_v4_avar, std::nullopt); - break; - } - if (!Opcode) - return false; - Ops.append({Addr, Chain}); - } else if (PointerSize == 64 - ? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset) - : SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) { + if (SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) { switch (N->getOpcode()) { default: return false; @@ -1153,10 +1110,9 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { if (!Opcode) return false; Ops.append({Base, Offset, Chain}); - } else if (PointerSize == 64 - ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset) - : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) { + } else { if (PointerSize == 64) { + SelectADDRri64(Op1.getNode(), Op1, Base, Offset); switch (N->getOpcode()) { default: return false; @@ -1175,6 +1131,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { break; } } else { + SelectADDRri(Op1.getNode(), Op1, Base, Offset); switch (N->getOpcode()) { default: return false; @@ -1195,47 +1152,6 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { if (!Opcode) return false; Ops.append({Base, Offset, Chain}); - } else { - if (PointerSize == 64) { - switch (N->getOpcode()) { - default: - return false; - case NVPTXISD::LoadV2: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::LDV_i8_v2_areg_64, - NVPTX::LDV_i16_v2_areg_64, NVPTX::LDV_i32_v2_areg_64, - NVPTX::LDV_i64_v2_areg_64, NVPTX::LDV_f32_v2_areg_64, - NVPTX::LDV_f64_v2_areg_64); - break; - case NVPTXISD::LoadV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::LDV_i8_v4_areg_64, - NVPTX::LDV_i16_v4_areg_64, NVPTX::LDV_i32_v4_areg_64, std::nullopt, - NVPTX::LDV_f32_v4_areg_64, std::nullopt); - break; - } - } else { - switch (N->getOpcode()) { - default: - return false; - case NVPTXISD::LoadV2: - Opcode = - pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, NVPTX::LDV_i8_v2_areg, - NVPTX::LDV_i16_v2_areg, NVPTX::LDV_i32_v2_areg, - NVPTX::LDV_i64_v2_areg, NVPTX::LDV_f32_v2_areg, - NVPTX::LDV_f64_v2_areg); - break; - case NVPTXISD::LoadV4: - Opcode = - pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, NVPTX::LDV_i8_v4_areg, - NVPTX::LDV_i16_v4_areg, NVPTX::LDV_i32_v4_areg, - std::nullopt, NVPTX::LDV_f32_v4_areg, std::nullopt); - break; - } - } - if (!Opcode) - return false; - Ops.append({Op1, Chain}); } LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops); @@ -1344,9 +1260,9 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { return false; SDValue Ops[] = { Addr, Chain }; LD = CurDAG->getMachineNode(*Opcode, DL, InstVTList, Ops); - } else if (TM.is64Bit() ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset) - : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) { + } else { if (TM.is64Bit()) { + SelectADDRri64(Op1.getNode(), Op1, Base, Offset); switch (N->getOpcode()) { default: return false; @@ -1402,6 +1318,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { break; } } else { + SelectADDRri(Op1.getNode(), Op1, Base, Offset); switch (N->getOpcode()) { default: return false; @@ -1457,122 +1374,6 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { return false; SDValue Ops[] = {Base, Offset, Chain}; LD = CurDAG->getMachineNode(*Opcode, DL, InstVTList, Ops); - } else { - if (TM.is64Bit()) { - switch (N->getOpcode()) { - default: - return false; - case ISD::LOAD: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDG_GLOBAL_i8areg64, - NVPTX::INT_PTX_LDG_GLOBAL_i16areg64, - NVPTX::INT_PTX_LDG_GLOBAL_i32areg64, - NVPTX::INT_PTX_LDG_GLOBAL_i64areg64, - NVPTX::INT_PTX_LDG_GLOBAL_f32areg64, - NVPTX::INT_PTX_LDG_GLOBAL_f64areg64); - break; - case ISD::INTRINSIC_W_CHAIN: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDU_GLOBAL_i8areg64, - NVPTX::INT_PTX_LDU_GLOBAL_i16areg64, - NVPTX::INT_PTX_LDU_GLOBAL_i32areg64, - NVPTX::INT_PTX_LDU_GLOBAL_i64areg64, - NVPTX::INT_PTX_LDU_GLOBAL_f32areg64, - NVPTX::INT_PTX_LDU_GLOBAL_f64areg64); - break; - case NVPTXISD::LoadV2: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v2i16_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v2i32_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v2i64_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v2f32_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v2f64_ELE_areg64); - break; - case NVPTXISD::LDUV2: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v2i16_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v2i32_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v2i64_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v2f32_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v2f64_ELE_areg64); - break; - case NVPTXISD::LoadV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v4i16_ELE_areg64, - NVPTX::INT_PTX_LDG_G_v4i32_ELE_areg64, std::nullopt, - NVPTX::INT_PTX_LDG_G_v4f32_ELE_areg64, std::nullopt); - break; - case NVPTXISD::LDUV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v4i16_ELE_areg64, - NVPTX::INT_PTX_LDU_G_v4i32_ELE_areg64, std::nullopt, - NVPTX::INT_PTX_LDU_G_v4f32_ELE_areg64, std::nullopt); - break; - } - } else { - switch (N->getOpcode()) { - default: - return false; - case ISD::LOAD: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDG_GLOBAL_i8areg, - NVPTX::INT_PTX_LDG_GLOBAL_i16areg, - NVPTX::INT_PTX_LDG_GLOBAL_i32areg, - NVPTX::INT_PTX_LDG_GLOBAL_i64areg, - NVPTX::INT_PTX_LDG_GLOBAL_f32areg, - NVPTX::INT_PTX_LDG_GLOBAL_f64areg); - break; - case ISD::INTRINSIC_W_CHAIN: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDU_GLOBAL_i8areg, - NVPTX::INT_PTX_LDU_GLOBAL_i16areg, - NVPTX::INT_PTX_LDU_GLOBAL_i32areg, - NVPTX::INT_PTX_LDU_GLOBAL_i64areg, - NVPTX::INT_PTX_LDU_GLOBAL_f32areg, - NVPTX::INT_PTX_LDU_GLOBAL_f64areg); - break; - case NVPTXISD::LoadV2: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v2i16_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v2i32_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v2i64_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v2f32_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v2f64_ELE_areg32); - break; - case NVPTXISD::LDUV2: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v2i16_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v2i32_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v2i64_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v2f32_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v2f64_ELE_areg32); - break; - case NVPTXISD::LoadV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v4i16_ELE_areg32, - NVPTX::INT_PTX_LDG_G_v4i32_ELE_areg32, std::nullopt, - NVPTX::INT_PTX_LDG_G_v4f32_ELE_areg32, std::nullopt); - break; - case NVPTXISD::LDUV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v4i16_ELE_areg32, - NVPTX::INT_PTX_LDU_G_v4i32_ELE_areg32, std::nullopt, - NVPTX::INT_PTX_LDU_G_v4f32_ELE_areg32, std::nullopt); - break; - } - } - if (!Opcode) - return false; - SDValue Ops[] = { Op1, Chain }; - LD = CurDAG->getMachineNode(*Opcode, DL, InstVTList, Ops); } // For automatic generation of LDG (through SelectLoad[Vector], not the @@ -1658,7 +1459,6 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) { // Create the machine instruction DAG SDValue Value = PlainStore ? PlainStore->getValue() : AtomicStore->getVal(); SDValue BasePtr = ST->getBasePtr(); - SDValue Addr; SDValue Offset, Base; std::optional Opcode; MVT::SimpleValueType SourceVT = @@ -1669,51 +1469,27 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) { getI32Imm(CodeAddrSpace, DL), getI32Imm(VecType, DL), getI32Imm(ToType, DL), getI32Imm(ToTypeWidth, DL)}); - if (SelectDirectAddr(BasePtr, Addr)) { - Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_avar, NVPTX::ST_i16_avar, - NVPTX::ST_i32_avar, NVPTX::ST_i64_avar, - NVPTX::ST_f32_avar, NVPTX::ST_f64_avar); - if (!Opcode) - return false; - Ops.append({Addr, Chain}); - } else if (PointerSize == 64 - ? SelectADDRsi64(BasePtr.getNode(), BasePtr, Base, Offset) - : SelectADDRsi(BasePtr.getNode(), BasePtr, Base, Offset)) { + if (SelectADDRsi(BasePtr.getNode(), BasePtr, Base, Offset)) { Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_asi, NVPTX::ST_i16_asi, NVPTX::ST_i32_asi, NVPTX::ST_i64_asi, NVPTX::ST_f32_asi, NVPTX::ST_f64_asi); - if (!Opcode) - return false; - Ops.append({Base, Offset, Chain}); - } else if (PointerSize == 64 - ? SelectADDRri64(BasePtr.getNode(), BasePtr, Base, Offset) - : SelectADDRri(BasePtr.getNode(), BasePtr, Base, Offset)) { - if (PointerSize == 64) + } else { + if (PointerSize == 64) { + SelectADDRri64(BasePtr.getNode(), BasePtr, Base, Offset); Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_ari_64, NVPTX::ST_i16_ari_64, NVPTX::ST_i32_ari_64, NVPTX::ST_i64_ari_64, NVPTX::ST_f32_ari_64, NVPTX::ST_f64_ari_64); - else + } else { + SelectADDRri(BasePtr.getNode(), BasePtr, Base, Offset); Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_ari, NVPTX::ST_i16_ari, NVPTX::ST_i32_ari, NVPTX::ST_i64_ari, NVPTX::ST_f32_ari, NVPTX::ST_f64_ari); - if (!Opcode) - return false; - Ops.append({Base, Offset, Chain}); - } else { - if (PointerSize == 64) - Opcode = - pickOpcodeForVT(SourceVT, NVPTX::ST_i8_areg_64, NVPTX::ST_i16_areg_64, - NVPTX::ST_i32_areg_64, NVPTX::ST_i64_areg_64, - NVPTX::ST_f32_areg_64, NVPTX::ST_f64_areg_64); - else - Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_areg, NVPTX::ST_i16_areg, - NVPTX::ST_i32_areg, NVPTX::ST_i64_areg, - NVPTX::ST_f32_areg, NVPTX::ST_f64_areg); - if (!Opcode) - return false; - Ops.append({BasePtr, Chain}); + } } + if (!Opcode) + return false; + Ops.append({Base, Offset, Chain}); SDNode *NVPTXST = CurDAG->getMachineNode(*Opcode, DL, MVT::Other, Ops); @@ -1728,7 +1504,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) { bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { SDValue Op1 = N->getOperand(1); - SDValue Addr, Offset, Base; + SDValue Offset, Base; std::optional Opcode; SDNode *ST; EVT EltVT = Op1.getValueType(); @@ -1785,26 +1561,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { getI32Imm(CodeAddrSpace, DL), getI32Imm(VecType, DL), getI32Imm(ToType, DL), getI32Imm(ToTypeWidth, DL)}); - if (SelectDirectAddr(N2, Addr)) { - switch (N->getOpcode()) { - default: - return false; - case NVPTXISD::StoreV2: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::STV_i8_v2_avar, NVPTX::STV_i16_v2_avar, - NVPTX::STV_i32_v2_avar, NVPTX::STV_i64_v2_avar, - NVPTX::STV_f32_v2_avar, NVPTX::STV_f64_v2_avar); - break; - case NVPTXISD::StoreV4: - Opcode = pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, - NVPTX::STV_i8_v4_avar, NVPTX::STV_i16_v4_avar, - NVPTX::STV_i32_v4_avar, std::nullopt, - NVPTX::STV_f32_v4_avar, std::nullopt); - break; - } - Ops.push_back(Addr); - } else if (PointerSize == 64 ? SelectADDRsi64(N2.getNode(), N2, Base, Offset) - : SelectADDRsi(N2.getNode(), N2, Base, Offset)) { + if (SelectADDRsi(N2.getNode(), N2, Base, Offset)) { switch (N->getOpcode()) { default: return false; @@ -1822,9 +1579,9 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { break; } Ops.append({Base, Offset}); - } else if (PointerSize == 64 ? SelectADDRri64(N2.getNode(), N2, Base, Offset) - : SelectADDRri(N2.getNode(), N2, Base, Offset)) { + } else { if (PointerSize == 64) { + SelectADDRri64(N2.getNode(), N2, Base, Offset); switch (N->getOpcode()) { default: return false; @@ -1843,6 +1600,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { break; } } else { + SelectADDRri(N2.getNode(), N2, Base, Offset); switch (N->getOpcode()) { default: return false; @@ -1861,47 +1619,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { } } Ops.append({Base, Offset}); - } else { - if (PointerSize == 64) { - switch (N->getOpcode()) { - default: - return false; - case NVPTXISD::StoreV2: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::STV_i8_v2_areg_64, - NVPTX::STV_i16_v2_areg_64, NVPTX::STV_i32_v2_areg_64, - NVPTX::STV_i64_v2_areg_64, NVPTX::STV_f32_v2_areg_64, - NVPTX::STV_f64_v2_areg_64); - break; - case NVPTXISD::StoreV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::STV_i8_v4_areg_64, - NVPTX::STV_i16_v4_areg_64, NVPTX::STV_i32_v4_areg_64, std::nullopt, - NVPTX::STV_f32_v4_areg_64, std::nullopt); - break; - } - } else { - switch (N->getOpcode()) { - default: - return false; - case NVPTXISD::StoreV2: - Opcode = - pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, NVPTX::STV_i8_v2_areg, - NVPTX::STV_i16_v2_areg, NVPTX::STV_i32_v2_areg, - NVPTX::STV_i64_v2_areg, NVPTX::STV_f32_v2_areg, - NVPTX::STV_f64_v2_areg); - break; - case NVPTXISD::StoreV4: - Opcode = - pickOpcodeForVT(EltVT.getSimpleVT().SimpleTy, NVPTX::STV_i8_v4_areg, - NVPTX::STV_i16_v4_areg, NVPTX::STV_i32_v4_areg, - std::nullopt, NVPTX::STV_f32_v4_areg, std::nullopt); - break; - } - } - Ops.push_back(N2); } - if (!Opcode) return false; @@ -2581,93 +2299,56 @@ bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) { return false; } -// symbol+offset -bool NVPTXDAGToDAGISel::SelectADDRsi_imp(SDNode *OpNode, SDValue Addr, - SDValue &Base, SDValue &Offset, - MVT VT) { - std::function(SDValue, uint64_t)> - FindRootAddressAndTotalOffset = - [&](SDValue Addr, - uint64_t AccumulatedOffset) -> std::optional { - if (isAddLike(Addr)) { - if (ConstantSDNode *CN = dyn_cast(Addr.getOperand(1))) { - SDValue PossibleBaseAddr = Addr.getOperand(0); - AccumulatedOffset += CN->getZExtValue(); - if (SelectDirectAddr(PossibleBaseAddr, Base)) - return AccumulatedOffset; - return FindRootAddressAndTotalOffset(PossibleBaseAddr, - AccumulatedOffset); - } - } - return std::nullopt; - }; - if (auto AccumulatedOffset = FindRootAddressAndTotalOffset(Addr, 0)) { - Offset = CurDAG->getTargetConstant(*AccumulatedOffset, SDLoc(OpNode), VT); - return true; +static SDValue accumulateOffset(SDValue &Addr, SDLoc DL, SelectionDAG *DAG) { + APInt AccumulatedOffset(64u, 0); + while (isAddLike(Addr)) { + const auto *CN = dyn_cast(Addr.getOperand(1)); + if (!CN) + break; + + const APInt CI = CN->getAPIntValue().sext(64); + if (!(CI + AccumulatedOffset).isSignedIntN(32)) + break; + + AccumulatedOffset += CI; + Addr = Addr->getOperand(0); } - return false; + return DAG->getSignedTargetConstant(AccumulatedOffset.getSExtValue(), DL, + MVT::i32); } // symbol+offset bool NVPTXDAGToDAGISel::SelectADDRsi(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset) { - return SelectADDRsi_imp(OpNode, Addr, Base, Offset, MVT::i32); -} - -// symbol+offset -bool NVPTXDAGToDAGISel::SelectADDRsi64(SDNode *OpNode, SDValue Addr, - SDValue &Base, SDValue &Offset) { - return SelectADDRsi_imp(OpNode, Addr, Base, Offset, MVT::i64); + Offset = accumulateOffset(Addr, SDLoc(OpNode), CurDAG); + return SelectDirectAddr(Addr, Base); } // register+offset -bool NVPTXDAGToDAGISel::SelectADDRri_imp(SDNode *OpNode, SDValue Addr, +void NVPTXDAGToDAGISel::SelectADDRri_imp(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset, MVT VT) { - if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { - Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT); - Offset = CurDAG->getTargetConstant(0, SDLoc(OpNode), VT); - return true; - } - if (Addr.getOpcode() == ISD::TargetExternalSymbol || - Addr.getOpcode() == ISD::TargetGlobalAddress) - return false; // direct calls. - if (isAddLike(Addr)) { - if (SelectDirectAddr(Addr.getOperand(0), Addr)) { - return false; - } - if (ConstantSDNode *CN = dyn_cast(Addr.getOperand(1))) { - if (FrameIndexSDNode *FIN = - dyn_cast(Addr.getOperand(0))) - // Constant offset from frame ref. - Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT); - else - Base = Addr.getOperand(0); - - // Offset must fit in a 32-bit signed int in PTX [register+offset] address - // mode - if (!CN->getAPIntValue().isSignedIntN(32)) - return false; - - Offset = CurDAG->getSignedTargetConstant(CN->getSExtValue(), - SDLoc(OpNode), MVT::i32); - return true; - } + Offset = accumulateOffset(Addr, SDLoc(OpNode), CurDAG); + if (auto *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT); + return; } - return false; + Base = Addr; } // register+offset bool NVPTXDAGToDAGISel::SelectADDRri(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset) { - return SelectADDRri_imp(OpNode, Addr, Base, Offset, MVT::i32); + SelectADDRri_imp(OpNode, Addr, Base, Offset, MVT::i32); + return true; } // register+offset bool NVPTXDAGToDAGISel::SelectADDRri64(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset) { - return SelectADDRri_imp(OpNode, Addr, Base, Offset, MVT::i64); + SelectADDRri_imp(OpNode, Addr, Base, Offset, MVT::i64); + return true; } bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N, diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h index 8dc6bc86c6828..1d02ae333c86b 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -107,18 +107,14 @@ class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel { // Match direct address complex pattern. bool SelectDirectAddr(SDValue N, SDValue &Address); - bool SelectADDRri_imp(SDNode *OpNode, SDValue Addr, SDValue &Base, + void SelectADDRri_imp(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset, MVT VT); bool SelectADDRri(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset); bool SelectADDRri64(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset); - bool SelectADDRsi_imp(SDNode *OpNode, SDValue Addr, SDValue &Base, - SDValue &Offset, MVT VT); bool SelectADDRsi(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset); - bool SelectADDRsi64(SDNode *OpNode, SDValue Addr, SDValue &Base, - SDValue &Offset); bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const; diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 7d9697e40e6ab..f75a70409340f 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -2754,24 +2754,6 @@ foreach vt = [v2f16, v2bf16, v2i16, v4i8] in { // Load / Store Handling // multiclass LD { - def _avar : NVPTXInst< - (outs regclass:$dst), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, - i32imm:$fromWidth, imem:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t$dst, [$addr];", []>; - def _areg : NVPTXInst< - (outs regclass:$dst), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, - i32imm:$fromWidth, Int32Regs:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t$dst, [$addr];", []>; - def _areg_64 : NVPTXInst< - (outs regclass:$dst), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, - i32imm:$fromWidth, Int64Regs:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t$dst, [$addr];", []>; def _ari : NVPTXInst< (outs regclass:$dst), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, @@ -2802,24 +2784,6 @@ let mayLoad=1, hasSideEffects=0 in { } multiclass ST { - def _avar : NVPTXInst< - (outs), - (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, - LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, imem:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth" - " \t[$addr], $src;", []>; - def _areg : NVPTXInst< - (outs), - (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, - LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth" - " \t[$addr], $src;", []>; - def _areg_64 : NVPTXInst< - (outs), - (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, - LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth" - " \t[$addr], $src;", []>; def _ari : NVPTXInst< (outs), (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, @@ -2856,24 +2820,6 @@ let mayStore=1, hasSideEffects=0 in { // elementization happens at the machine instruction level, so the following // instructions never appear in the DAG. multiclass LD_VEC { - def _v2_avar : NVPTXInst< - (outs regclass:$dst1, regclass:$dst2), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2}}, [$addr];", []>; - def _v2_areg : NVPTXInst< - (outs regclass:$dst1, regclass:$dst2), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2}}, [$addr];", []>; - def _v2_areg_64 : NVPTXInst< - (outs regclass:$dst1, regclass:$dst2), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2}}, [$addr];", []>; def _v2_ari : NVPTXInst< (outs regclass:$dst1, regclass:$dst2), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, @@ -2892,24 +2838,6 @@ multiclass LD_VEC { LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " "\t{{$dst1, $dst2}}, [$addr$offset];", []>; - def _v4_avar : NVPTXInst< - (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>; - def _v4_areg : NVPTXInst< - (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>; - def _v4_areg_64 : NVPTXInst< - (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), - (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr), - "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>; def _v4_ari : NVPTXInst< (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, @@ -2939,27 +2867,6 @@ let mayLoad=1, hasSideEffects=0 in { } multiclass ST_VEC { - def _v2_avar : NVPTXInst< - (outs), - (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, - LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, - imem:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr], {{$src1, $src2}};", []>; - def _v2_areg : NVPTXInst< - (outs), - (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, - LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, - Int32Regs:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr], {{$src1, $src2}};", []>; - def _v2_areg_64 : NVPTXInst< - (outs), - (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, - LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, - Int64Regs:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr], {{$src1, $src2}};", []>; def _v2_ari : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, @@ -2981,27 +2888,6 @@ multiclass ST_VEC { imem:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " "\t[$addr$offset], {{$src1, $src2}};", []>; - def _v4_avar : NVPTXInst< - (outs), - (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, - LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>; - def _v4_areg : NVPTXInst< - (outs), - (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, - LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>; - def _v4_areg_64 : NVPTXInst< - (outs), - (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, - LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr), - "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>; def _v4_ari : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index ed7963f35a7c7..0640d25031c6a 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -2693,12 +2693,6 @@ defm INT_PTX_SATOM_XOR : ATOM2_bitwise_impl<"xor">; // Scalar multiclass LDU_G { - def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src), - !strconcat("ldu.global.", TyStr), - []>, Requires<[hasLDU]>; - def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src), - !strconcat("ldu.global.", TyStr), - []>, Requires<[hasLDU]>; def avar: NVPTXInst<(outs regclass:$result), (ins imemAny:$src), !strconcat("ldu.global.", TyStr), []>, Requires<[hasLDU]>; @@ -2721,12 +2715,6 @@ defm INT_PTX_LDU_GLOBAL_f64 : LDU_G<"f64 \t$result, [$src];", Float64Regs>; // Elementized vector ldu multiclass VLDU_G_ELE_V2 { - def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), - (ins Int32Regs:$src), - !strconcat("ldu.global.", TyStr), []>; - def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), - (ins Int64Regs:$src), - !strconcat("ldu.global.", TyStr), []>; def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), (ins MEMri:$src), !strconcat("ldu.global.", TyStr), []>; @@ -2739,12 +2727,6 @@ multiclass VLDU_G_ELE_V2 { } multiclass VLDU_G_ELE_V4 { - def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, - regclass:$dst4), (ins Int32Regs:$src), - !strconcat("ldu.global.", TyStr), []>; - def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, - regclass:$dst4), (ins Int64Regs:$src), - !strconcat("ldu.global.", TyStr), []>; def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), (ins MEMri:$src), !strconcat("ldu.global.", TyStr), []>; @@ -2796,12 +2778,6 @@ defm INT_PTX_LDU_G_v4f32_ELE // during the lifetime of the kernel. multiclass LDG_G { - def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src), - !strconcat("ld.global.nc.", TyStr), - []>, Requires<[hasLDG]>; - def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src), - !strconcat("ld.global.nc.", TyStr), - []>, Requires<[hasLDG]>; def avar: NVPTXInst<(outs regclass:$result), (ins imemAny:$src), !strconcat("ld.global.nc.", TyStr), []>, Requires<[hasLDG]>; @@ -2830,12 +2806,6 @@ defm INT_PTX_LDG_GLOBAL_f64 // Elementized vector ldg multiclass VLDG_G_ELE_V2 { - def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), - (ins Int32Regs:$src), - !strconcat("ld.global.nc.", TyStr), []>; - def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), - (ins Int64Regs:$src), - !strconcat("ld.global.nc.", TyStr), []>; def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), (ins MEMri:$src), !strconcat("ld.global.nc.", TyStr), []>; diff --git a/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp index 4d0694faa0c9a..4971d31691c54 100644 --- a/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp @@ -1800,7 +1800,7 @@ bool NVPTXReplaceImageHandles::replaceImageHandle(MachineOperand &Op, MachineInstr &TexHandleDef = *MRI.getVRegDef(Op.getReg()); switch (TexHandleDef.getOpcode()) { - case NVPTX::LD_i64_avar: { + case NVPTX::LD_i64_asi: { // The handle is a parameter value being loaded, replace with the // parameter symbol const auto &TM = static_cast(MF.getTarget()); diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index 6c16afec33484..3aef6f2c893fa 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -5131,7 +5131,7 @@ static bool isOpZeroOfSubwordPreincLoad(int Opcode) { // This function checks for sign extension from 32 bits to 64 bits. static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI) { - if (!Register(Reg).isVirtual()) + if (!Register::isVirtualRegister(Reg)) return false; MachineInstr *MI = MRI->getVRegDef(Reg); @@ -5178,7 +5178,7 @@ static bool definedBySignExtendingOp(const unsigned Reg, // in the higher 32 bits then this function will return true. static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI) { - if (!Register(Reg).isVirtual()) + if (!Register::isVirtualRegister(Reg)) return false; MachineInstr *MI = MRI->getVRegDef(Reg); @@ -5463,7 +5463,7 @@ std::pair PPCInstrInfo::isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const { - if (!Register(Reg).isVirtual()) + if (!Register::isVirtualRegister(Reg)) return std::pair(false, false); MachineInstr *MI = MRI->getVRegDef(Reg); diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp index 4893e17953ab5..1b6da5781ac6b 100644 --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -1482,7 +1482,7 @@ static unsigned getSrcVReg(unsigned Reg, MachineBasicBlock *BB1, } else if (Inst->isFullCopy()) NextReg = Inst->getOperand(1).getReg(); - if (NextReg == SrcReg || !Register(NextReg).isVirtual()) + if (NextReg == SrcReg || !Register::isVirtualRegister(NextReg)) break; SrcReg = NextReg; } diff --git a/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp index 58ab7ad27cd6e..0bfcba9a52486 100644 --- a/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp +++ b/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp @@ -537,7 +537,7 @@ MachineInstr *PPCReduceCRLogicals::lookThroughCRCopy(unsigned Reg, unsigned &Subreg, MachineInstr *&CpDef) { Subreg = -1; - if (!Register(Reg).isVirtual()) + if (!Register::isVirtualRegister(Reg)) return nullptr; MachineInstr *Copy = MRI->getVRegDef(Reg); CpDef = Copy; diff --git a/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp b/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp index bb5ca6872b2e3..0349a5929c106 100644 --- a/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp +++ b/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp @@ -40,9 +40,9 @@ namespace { const TargetInstrInfo *TII; - bool IsRegInClass(Register Reg, const TargetRegisterClass *RC, + bool IsRegInClass(unsigned Reg, const TargetRegisterClass *RC, MachineRegisterInfo &MRI) { - if (Reg.isVirtual()) { + if (Register::isVirtualRegister(Reg)) { return RC->hasSubClassEq(MRI.getRegClass(Reg)); } else if (RC->contains(Reg)) { return true; diff --git a/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp index fc80e61a14517..573b30ccbcf2e 100644 --- a/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ b/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -157,7 +157,7 @@ struct PPCVSXSwapRemoval : public MachineFunctionPass { // Return true iff the given register is in the given class. bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) { - if (Register(Reg).isVirtual()) + if (Register::isVirtualRegister(Reg)) return RC->hasSubClassEq(MRI->getRegClass(Reg)); return RC->contains(Reg); } @@ -560,7 +560,7 @@ unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg, if (!MI->isCopyLike()) return SrcReg; - Register CopySrcReg; + unsigned CopySrcReg; if (MI->isCopy()) CopySrcReg = MI->getOperand(1).getReg(); else { @@ -568,7 +568,7 @@ unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg, CopySrcReg = MI->getOperand(2).getReg(); } - if (!CopySrcReg.isVirtual()) { + if (!Register::isVirtualRegister(CopySrcReg)) { if (!isScalarVecReg(CopySrcReg)) SwapVector[VecIdx].MentionsPhysVR = 1; return CopySrcReg; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp index 10b4f595546cc..75011ab3c8721 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -64,7 +64,7 @@ void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB, // exist. However we need to handle both here. auto &MRI = MBB.getParent()->getRegInfo(); const TargetRegisterClass *RC = - Register(DestReg).isVirtual() + Register::isVirtualRegister(DestReg) ? MRI.getRegClass(DestReg) : MRI.getTargetRegisterInfo()->getMinimalPhysRegClass(DestReg); diff --git a/llvm/lib/ToolDrivers/llvm-dlltool/CMakeLists.txt b/llvm/lib/ToolDrivers/llvm-dlltool/CMakeLists.txt index 855ae5f048ff7..5db08e7852d03 100644 --- a/llvm/lib/ToolDrivers/llvm-dlltool/CMakeLists.txt +++ b/llvm/lib/ToolDrivers/llvm-dlltool/CMakeLists.txt @@ -6,6 +6,7 @@ add_llvm_component_library(LLVMDlltoolDriver DlltoolDriver.cpp LINK_COMPONENTS + BinaryFormat Object Option Support diff --git a/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp b/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp index 1782e24287860..380fbd8b6fc6c 100644 --- a/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp +++ b/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp @@ -12,6 +12,7 @@ #include "llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/Object/Archive.h" #include "llvm/Object/COFF.h" #include "llvm/Object/COFFImportFile.h" #include "llvm/Object/COFFModuleDefinition.h" @@ -158,6 +159,143 @@ bool parseModuleDefinition(StringRef DefFileName, MachineTypes Machine, return true; } +int printError(llvm::Error E, Twine File) { + if (!E) + return 0; + handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EIB) { + llvm::errs() << "error opening " << File << ": " << EIB.message() << "\n"; + }); + return 1; +} + +template +int forEachCoff(object::Archive &Archive, StringRef Name, Callable Callback) { + Error Err = Error::success(); + for (auto &C : Archive.children(Err)) { + Expected NameOrErr = C.getName(); + if (!NameOrErr) + return printError(NameOrErr.takeError(), Name); + StringRef Name = *NameOrErr; + + Expected ChildMB = C.getMemoryBufferRef(); + if (!ChildMB) + return printError(ChildMB.takeError(), Name); + + if (identify_magic(ChildMB->getBuffer()) == file_magic::coff_object) { + auto Obj = object::COFFObjectFile::create(*ChildMB); + if (!Obj) + return printError(Obj.takeError(), Name); + if (!Callback(*Obj->get(), Name)) + return 1; + } + } + if (Err) + return printError(std::move(Err), Name); + return 0; +} + +// To find the named of the imported DLL from an import library, we can either +// inspect the object files that form the import table entries, or we could +// just look at the archive member names, for MSVC style import libraries. +// Looking at the archive member names doesn't work for GNU style import +// libraries though, while inspecting the import table entries works for +// both. (MSVC style import libraries contain a couple regular object files +// for the header/trailers.) +// +// This implementation does the same as GNU dlltool does; look at the +// content of ".idata$7" sections, or for MSVC style libraries, look +// at ".idata$6" sections. +// +// For GNU style import libraries, there are also other data chunks in sections +// named ".idata$7" (entries to the IAT or ILT); these are distinguished +// by seeing that they contain relocations. (They also look like an empty +// string when looking for null termination.) +// +// Alternatively, we could do things differently - look for any .idata$2 +// section; this would be import directory entries. At offset 0xc in them +// there is the RVA of the import DLL name; look for a relocation at this +// spot and locate the symbol that it points at. That symbol may either +// be within the same object file (in the case of MSVC style import libraries) +// or another object file (in the case of GNU import libraries). +bool identifyImportName(const COFFObjectFile &Obj, StringRef ObjName, + std::vector &Names, bool IsMsStyleImplib) { + StringRef TargetName = IsMsStyleImplib ? ".idata$6" : ".idata$7"; + for (const auto &S : Obj.sections()) { + Expected NameOrErr = S.getName(); + if (!NameOrErr) { + printError(NameOrErr.takeError(), ObjName); + return false; + } + StringRef Name = *NameOrErr; + if (Name != TargetName) + continue; + + // GNU import libraries contain .idata$7 section in the per function + // objects too, but they contain relocations. + if (!IsMsStyleImplib && !S.relocations().empty()) + continue; + + Expected ContentsOrErr = S.getContents(); + if (!ContentsOrErr) { + printError(ContentsOrErr.takeError(), ObjName); + return false; + } + StringRef Contents = *ContentsOrErr; + Contents = Contents.substr(0, Contents.find('\0')); + if (Contents.empty()) + continue; + Names.push_back(Contents); + return true; + } + return true; +} + +int doIdentify(StringRef File, bool IdentifyStrict) { + ErrorOr> MaybeBuf = MemoryBuffer::getFile( + File, /*IsText=*/false, /*RequiredNullTerminator=*/false); + if (!MaybeBuf) + return printError(errorCodeToError(MaybeBuf.getError()), File); + if (identify_magic(MaybeBuf.get()->getBuffer()) != file_magic::archive) { + llvm::errs() << File << " is not a library\n"; + return 1; + } + + std::unique_ptr B = std::move(MaybeBuf.get()); + Error Err = Error::success(); + object::Archive Archive(B->getMemBufferRef(), Err); + if (Err) + return printError(std::move(Err), B->getBufferIdentifier()); + + bool IsMsStyleImplib = false; + for (const auto &S : Archive.symbols()) { + if (S.getName() == "__NULL_IMPORT_DESCRIPTOR") { + IsMsStyleImplib = true; + break; + } + } + std::vector Names; + if (forEachCoff(Archive, B->getBufferIdentifier(), + [&](const COFFObjectFile &Obj, StringRef ObjName) -> bool { + return identifyImportName(Obj, ObjName, Names, + IsMsStyleImplib); + })) + return 1; + + if (Names.empty()) { + llvm::errs() << "No DLL import name found in " << File << "\n"; + return 1; + } + if (Names.size() > 1 && IdentifyStrict) { + llvm::errs() << File << "contains imports for two or more DLLs\n"; + return 1; + } + + for (StringRef S : Names) + llvm::outs() << S << "\n"; + + return 0; +} + } // namespace int llvm::dlltoolDriverMain(llvm::ArrayRef ArgsArr) { @@ -173,7 +311,8 @@ int llvm::dlltoolDriverMain(llvm::ArrayRef ArgsArr) { // Handle when no input or output is specified if (Args.hasArgNoClaim(OPT_INPUT) || - (!Args.hasArgNoClaim(OPT_d) && !Args.hasArgNoClaim(OPT_l))) { + (!Args.hasArgNoClaim(OPT_d) && !Args.hasArgNoClaim(OPT_l) && + !Args.hasArgNoClaim(OPT_I))) { Table.printHelp(outs(), "llvm-dlltool [options] file...", "llvm-dlltool", false); llvm::outs() @@ -185,6 +324,11 @@ int llvm::dlltoolDriverMain(llvm::ArrayRef ArgsArr) { llvm::errs() << "ignoring unknown argument: " << Arg->getAsString(Args) << "\n"; + if (Args.hasArg(OPT_I)) { + return doIdentify(Args.getLastArg(OPT_I)->getValue(), + Args.hasArg(OPT_identify_strict)); + } + if (!Args.hasArg(OPT_d)) { llvm::errs() << "no definition file specified\n"; return 1; diff --git a/llvm/lib/ToolDrivers/llvm-dlltool/Options.td b/llvm/lib/ToolDrivers/llvm-dlltool/Options.td index 7810694c98e36..4fd80189aff29 100644 --- a/llvm/lib/ToolDrivers/llvm-dlltool/Options.td +++ b/llvm/lib/ToolDrivers/llvm-dlltool/Options.td @@ -21,6 +21,11 @@ def k_alias: Flag<["--"], "kill-at">, Alias; def no_leading_underscore: Flag<["--"], "no-leading-underscore">, HelpText<"Don't add leading underscores on symbols">; +def I: JoinedOrSeparate<["-"], "I">, HelpText<"Identify DLL name from import library">; +def I_long : JoinedOrSeparate<["--"], "identify">, Alias; + +def identify_strict : Flag<["--"], "identify-strict">, HelpText<"Error out if the --identify option detects more than one DLL">; + //============================================================================== // The flags below do nothing. They are defined only for dlltool compatibility. //============================================================================== diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp index 3da52b5b4a6f1..9c869dd1bbdca 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp @@ -71,6 +71,12 @@ bool PredIterator::operator==(const PredIterator &Other) const { return OpIt == Other.OpIt && MemIt == Other.MemIt; } +void DGNode::setSchedBundle(SchedBundle &SB) { + if (this->SB != nullptr) + this->SB->eraseFromBundle(this); + this->SB = &SB; +} + DGNode::~DGNode() { if (SB == nullptr) return; diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Scheduler.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Scheduler.cpp index 3e37e07aabc5c..ad46683d95063 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Scheduler.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Scheduler.cpp @@ -161,30 +161,36 @@ bool Scheduler::tryScheduleUntil(ArrayRef Instrs) { Scheduler::BndlSchedState Scheduler::getBndlSchedState(ArrayRef Instrs) const { assert(!Instrs.empty() && "Expected non-empty bundle"); - bool PartiallyScheduled = false; - bool FullyScheduled = true; - for (auto *I : Instrs) { + auto *N0 = DAG.getNode(Instrs[0]); + auto *SB0 = N0 != nullptr ? N0->getSchedBundle() : nullptr; + bool AllUnscheduled = SB0 == nullptr; + bool FullyScheduled = SB0 != nullptr && !SB0->isSingleton(); + for (auto *I : drop_begin(Instrs)) { auto *N = DAG.getNode(I); - if (N != nullptr && N->scheduled()) - PartiallyScheduled = true; - else - FullyScheduled = false; - } - if (FullyScheduled) { - // If not all instrs in the bundle are in the same SchedBundle then this - // should be considered as partially-scheduled, because we will need to - // re-schedule. - SchedBundle *SB = DAG.getNode(Instrs[0])->getSchedBundle(); - assert(SB != nullptr && "FullyScheduled assumes that there is an SB!"); - if (any_of(drop_begin(Instrs), [this, SB](sandboxir::Value *SBV) { - return DAG.getNode(cast(SBV)) - ->getSchedBundle() != SB; - })) + auto *SB = N != nullptr ? N->getSchedBundle() : nullptr; + if (SB != nullptr) { + // We found a scheduled instr, so there is now way all are unscheduled. + AllUnscheduled = false; + if (SB->isSingleton()) { + // We found an instruction in a temporarily scheduled singleton. There + // is no way that all instructions are scheduled in the same bundle. + FullyScheduled = false; + } + } + + if (SB != SB0) { + // Either one of SB, SB0 is null, or they are in different bundles, so + // Instrs are definitely not in the same vector bundle. FullyScheduled = false; + // One of SB, SB0 are in a vector bundle and they differ. + if ((SB != nullptr && !SB->isSingleton()) || + (SB0 != nullptr && !SB0->isSingleton())) + return BndlSchedState::AlreadyScheduled; + } } - return FullyScheduled ? BndlSchedState::FullyScheduled - : PartiallyScheduled ? BndlSchedState::PartiallyOrDifferentlyScheduled - : BndlSchedState::NoneScheduled; + return AllUnscheduled ? BndlSchedState::NoneScheduled + : FullyScheduled ? BndlSchedState::FullyScheduled + : BndlSchedState::TemporarilyScheduled; } void Scheduler::trimSchedule(ArrayRef Instrs) { @@ -203,13 +209,14 @@ void Scheduler::trimSchedule(ArrayRef Instrs) { // Instruction *TopI = &*ScheduleTopItOpt.value(); Instruction *LowestI = VecUtils::getLowest(Instrs); - // Destroy the schedule bundles from LowestI all the way to the top. + // Destroy the singleton schedule bundles from LowestI all the way to the top. for (auto *I = LowestI, *E = TopI->getPrevNode(); I != E; I = I->getPrevNode()) { auto *N = DAG.getNode(I); if (N == nullptr) continue; - if (auto *SB = N->getSchedBundle()) + auto *SB = N->getSchedBundle(); + if (SB->isSingleton()) eraseBundle(SB); } // The DAG Nodes contain state like the number of UnscheduledSuccs and the @@ -259,7 +266,12 @@ bool Scheduler::trySchedule(ArrayRef Instrs) { case BndlSchedState::FullyScheduled: // Nothing to do. return true; - case BndlSchedState::PartiallyOrDifferentlyScheduled: + case BndlSchedState::AlreadyScheduled: + // Instructions are part of a different vector schedule, so we can't + // schedule \p Instrs in the same bundle (without destroying the existing + // schedule). + return false; + case BndlSchedState::TemporarilyScheduled: // If one or more instrs are already scheduled we need to destroy the // top-most part of the schedule that includes the instrs in the bundle and // re-schedule. diff --git a/llvm/test/CodeGen/AMDGPU/fmaximum3.ll b/llvm/test/CodeGen/AMDGPU/fmaximum3.ll index 66de7d535db4b..2a372dffce650 100644 --- a/llvm/test/CodeGen/AMDGPU/fmaximum3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmaximum3.ll @@ -1252,19 +1252,25 @@ define half @v_fmaximum3_f16(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) ret half %max1 @@ -1281,19 +1287,25 @@ define half @v_fmaximum3_f16_commute(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v2, v0, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_commute: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v2, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v2, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_commute: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v2, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v2, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_commute: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v2, v0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %c, half %max0) ret half %max1 @@ -1311,22 +1323,33 @@ define amdgpu_ps i32 @s_fmaximum3_f16(half inreg %a, half inreg %b, half inreg % ; GFX12-NEXT: s_wait_alu 0xf1ff ; GFX12-NEXT: ; return to shader part epilog ; -; GFX9-LABEL: s_fmaximum3_f16: -; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: v_max_f16_e32 v1, s0, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_max_f16_e32 v1, s2, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: v_readfirstlane_b32 s0, v0 -; GFX9-NEXT: ; return to shader part epilog +; GFX942-LABEL: s_fmaximum3_f16: +; GFX942: ; %bb.0: +; GFX942-NEXT: v_mov_b32_e32 v0, s1 +; GFX942-NEXT: v_max_f16_e32 v1, s0, v0 +; GFX942-NEXT: v_mov_b32_e32 v2, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_max_f16_e32 v1, s2, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_readfirstlane_b32 s0, v0 +; GFX942-NEXT: ; return to shader part epilog +; +; GFX950-LABEL: s_fmaximum3_f16: +; GFX950: ; %bb.0: +; GFX950-NEXT: v_mov_b32_e32 v0, s1 +; GFX950-NEXT: v_mov_b32_e32 v1, s2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, s0, v0, v1 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_readfirstlane_b32 s0, v0 +; GFX950-NEXT: ; return to shader part epilog %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) %cast = bitcast half %max1 to i16 @@ -1346,19 +1369,26 @@ define half @v_fmaximum3_f16_fabs0(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, |v0|, v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fabs0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, |v0|, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fabs0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, |v0|, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, |v0|, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fabs0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call half @llvm.fabs.f16(half %a) %max0 = call half @llvm.maximum.f16(half %a.fabs, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) @@ -1376,19 +1406,26 @@ define half @v_fmaximum3_f16_fabs1(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, |v1|, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fabs1: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, v0, |v1| -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v1| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fabs1: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, v0, |v1| +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, |v1| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fabs1: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %b.fabs = call half @llvm.fabs.f16(half %b) %max0 = call half @llvm.maximum.f16(half %a, half %b.fabs) %max1 = call half @llvm.maximum.f16(half %max0, half %c) @@ -1406,19 +1443,26 @@ define half @v_fmaximum3_f16_fabs2(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, |v2| ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fabs2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e64 v1, v0, |v2| -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fabs2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e64 v1, v0, |v2| +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fabs2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %c.fabs = call half @llvm.fabs.f16(half %c) %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c.fabs) @@ -1436,19 +1480,28 @@ define half @v_fmaximum3_f16_fabs_all(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, |v0|, |v1|, |v2| ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fabs_all: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, |v0|, |v1| -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e64 v1, v0, |v2| -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fabs_all: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, |v0|, |v1| +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e64 v1, v0, |v2| +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fabs_all: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call half @llvm.fabs.f16(half %a) %b.fabs = call half @llvm.fabs.f16(half %b) %c.fabs = call half @llvm.fabs.f16(half %c) @@ -1468,19 +1521,28 @@ define half @v_fmaximum3_f16_fneg_all(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, -v0, -v1, -v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fneg_all: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, -v0, -v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e64 v1, v0, -v2 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fneg_all: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, -v0, -v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e64 v1, v0, -v2 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fneg_all: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX950-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX950-NEXT: v_xor_b32_e32 v2, 0x8000, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg half %a %b.fneg = fneg half %b %c.fneg = fneg half %c @@ -1500,19 +1562,28 @@ define half @v_fmaximum3_f16_fneg_fabs_all(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, -|v0|, -|v1|, -|v2| ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fneg_fabs_all: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, -|v0|, -|v1| -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -|v0|, -|v1| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e64 v1, v0, -|v2| -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -|v2| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fneg_fabs_all: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, -|v0|, -|v1| +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, -|v0|, -|v1| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e64 v1, v0, -|v2| +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -|v2| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fneg_fabs_all: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_or_b32_e32 v0, 0x8000, v0 +; GFX950-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX950-NEXT: v_or_b32_e32 v2, 0x8000, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call half @llvm.fabs.f16(half %a) %b.fabs = call half @llvm.fabs.f16(half %b) %c.fabs = call half @llvm.fabs.f16(half %c) @@ -1535,19 +1606,26 @@ define half @v_fmaximum3_f16_fneg0(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, -v0, v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fneg0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, -v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fneg0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, -v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, -v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fneg0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg half %a %max0 = call half @llvm.maximum.f16(half %a.fneg, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) @@ -1565,19 +1643,26 @@ define half @v_fmaximum3_f16_fneg1(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, -v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fneg1: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e64 v3, v0, -v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fneg1: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e64 v3, v0, -v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fneg1: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %b.fneg = fneg half %b %max0 = call half @llvm.maximum.f16(half %a, half %b.fneg) %max1 = call half @llvm.maximum.f16(half %max0, half %c) @@ -1595,19 +1680,26 @@ define half @v_fmaximum3_f16_fneg2(half %a, half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, -v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_fneg2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e64 v1, v0, -v2 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_fneg2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e64 v1, v0, -v2 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_fneg2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v2, 0x8000, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %c.fneg = fneg half %c %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c.fneg) @@ -1625,19 +1717,26 @@ define half @v_fmaximum3_f16_const0(half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, 0x4800, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_const0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v2, 0x4800, v0 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_const0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v2, 0x4800, v0 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_max_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_const0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: s_movk_i32 s0, 0x4800 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, s0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half 8.0, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) ret half %max1 @@ -1654,19 +1753,26 @@ define half @v_fmaximum3_f16__const2(half %a, half %b) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, 0x4800 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16__const2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_max_f16_e32 v1, 0x4800, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16__const2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_max_f16_e32 v1, 0x4800, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16__const2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: s_movk_i32 s0, 0x4800 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, s0 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half 8.0) ret half %max1 @@ -1683,19 +1789,25 @@ define half @v_fmaximum3_f16_inlineimm0(half %b, half %c) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, 4.0, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_inlineimm0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v2, 4.0, v0 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_inlineimm0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v2, 4.0, v0 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_max_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_inlineimm0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 4.0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half 4.0, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) ret half %max1 @@ -1712,19 +1824,25 @@ define half @v_fmaximum3_f16__inlineimm(half %a, half %b) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, 4.0 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16__inlineimm: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_max_f16_e32 v1, 4.0, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16__inlineimm: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_max_f16_e32 v1, 4.0, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16__inlineimm: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, 4.0 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half 4.0) ret half %max1 @@ -1743,19 +1861,27 @@ define half @v_fmaximum3_f16_const1_const2(half %a) { ; GFX12-NEXT: v_maximum3_f16 v0, v0, s0, 0x4c00 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fmaximum3_f16_const1_const2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v1, 0x4800, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_max_f16_e32 v1, 0x4c00, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fmaximum3_f16_const1_const2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v1, 0x4800, v0 +; GFX942-NEXT: v_mov_b32_e32 v2, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_max_f16_e32 v1, 0x4c00, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fmaximum3_f16_const1_const2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: s_movk_i32 s0, 0x4800 +; GFX950-NEXT: v_mov_b32_e32 v1, 0x4c00 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, s0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half %a, half 8.0) %max1 = call half @llvm.maximum.f16(half %max0, half 16.0) ret half %max1 @@ -1802,9 +1928,7 @@ define <2 x half> @v_fmaximum3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c ; GFX950-LABEL: v_fmaximum3_v2f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v2, v0, v0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v2, v0, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b) %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %c, <2 x half> %max0) @@ -1852,9 +1976,7 @@ define <2 x half> @v_fmaximum3_v2f16_commute(<2 x half> %a, <2 x half> %b, <2 x ; GFX950-LABEL: v_fmaximum3_v2f16_commute: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b) %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> %c) @@ -1910,9 +2032,7 @@ define <2 x half> @v_fmaximum3_v2f16__fabs_all(<2 x half> %a, <2 x half> %b, <2 ; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1 ; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %a) %b.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %b) @@ -1963,9 +2083,7 @@ define <2 x half> @v_fmaximum3_v2f16__fneg_all(<2 x half> %a, <2 x half> %b, <2 ; GFX950-LABEL: v_fmaximum3_v2f16__fneg_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v2 neg_lo:[1,1,1] neg_hi:[1,1,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg <2 x half> %a %b.fneg = fneg <2 x half> %b @@ -2016,9 +2134,7 @@ define <2 x half> @v_fmaximum3_v2f16__inlineimm1(<2 x half> %a, <2 x half> %c) { ; GFX950-LABEL: v_fmaximum3_v2f16__inlineimm1: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 2.0, v1 op_sel_hi:[1,0,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> ) %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> %c) @@ -2066,9 +2182,7 @@ define <2 x half> @v_fmaximum3_v2f16__inlineimm2(<2 x half> %a, <2 x half> %b) { ; GFX950-LABEL: v_fmaximum3_v2f16__inlineimm2: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 4.0, 4.0 op_sel_hi:[1,0,0] +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, 4.0 op_sel_hi:[1,1,0] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b) %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> ) @@ -2130,10 +2244,8 @@ define <3 x half> @v_fmaximum3_v3f16(<3 x half> %a, <3 x half> %b, <3 x half> %c ; GFX950-LABEL: v_fmaximum3_v3f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v5, v1, v1 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v4, v0, v0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v4, v0, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v5, v1, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> %b) %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %c, <3 x half> %max0) @@ -2195,10 +2307,8 @@ define <3 x half> @v_fmaximum3_v3f16_commute(<3 x half> %a, <3 x half> %b, <3 x ; GFX950-LABEL: v_fmaximum3_v3f16_commute: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> %b) %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> %c) @@ -2271,16 +2381,14 @@ define <3 x half> @v_fmaximum3_v3f16__fabs_all(<3 x half> %a, <3 x half> %b, <3 ; GFX950-LABEL: v_fmaximum3_v3f16__fabs_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1 -; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3 +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 ; GFX950-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5 ; GFX950-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %a) %b.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %b) @@ -2345,10 +2453,8 @@ define <3 x half> @v_fmaximum3_v3f16__fneg_all(<3 x half> %a, <3 x half> %b, <3 ; GFX950-LABEL: v_fmaximum3_v3f16__fneg_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v5, v5 neg_lo:[0,1,1] neg_hi:[0,1,1] -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v4, v4 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v4 neg_lo:[1,1,1] neg_hi:[1,1,1] +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v5 neg_lo:[1,1,1] neg_hi:[1,1,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg <3 x half> %a %b.fneg = fneg <3 x half> %b @@ -2410,10 +2516,8 @@ define <3 x half> @v_fmaximum3_v3f16__inlineimm1(<3 x half> %a, <3 x half> %c) { ; GFX950-LABEL: v_fmaximum3_v3f16__inlineimm1: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, 2.0, 2.0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 2.0, v2 op_sel_hi:[1,0,1] +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, 2.0, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> ) %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> %c) @@ -2475,10 +2579,8 @@ define <3 x half> @v_fmaximum3_v3f16__inlineimm2(<3 x half> %a, <3 x half> %b) { ; GFX950-LABEL: v_fmaximum3_v3f16__inlineimm2: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, 4.0, 4.0 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 4.0, 4.0 op_sel_hi:[1,0,0] +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, 4.0 op_sel_hi:[1,1,0] +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, 4.0 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> %b) %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> ) @@ -2546,10 +2648,8 @@ define <4 x half> @v_fmaximum3_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c ; GFX950-LABEL: v_fmaximum3_v4f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v5, v1, v1 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v4, v0, v0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v4, v0, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v5, v1, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b) %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %c, <4 x half> %max0) @@ -2617,10 +2717,8 @@ define <4 x half> @v_fmaximum3_v4f16_commute(<4 x half> %a, <4 x half> %b, <4 x ; GFX950-LABEL: v_fmaximum3_v4f16_commute: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b) %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> %c) @@ -2699,16 +2797,14 @@ define <4 x half> @v_fmaximum3_v4f16__fabs_all(<4 x half> %a, <4 x half> %b, <4 ; GFX950-LABEL: v_fmaximum3_v4f16__fabs_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1 -; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3 +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 ; GFX950-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5 ; GFX950-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %a) %b.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %b) @@ -2779,10 +2875,8 @@ define <4 x half> @v_fmaximum3_v4f16__fneg_all(<4 x half> %a, <4 x half> %b, <4 ; GFX950-LABEL: v_fmaximum3_v4f16__fneg_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v5, v5 neg_lo:[0,1,1] neg_hi:[0,1,1] -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v4, v4 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v4 neg_lo:[1,1,1] neg_hi:[1,1,1] +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v5 neg_lo:[1,1,1] neg_hi:[1,1,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg <4 x half> %a %b.fneg = fneg <4 x half> %b @@ -2851,10 +2945,8 @@ define <4 x half> @v_fmaximum3_v4f16__inlineimm1(<4 x half> %a, <4 x half> %c) { ; GFX950-LABEL: v_fmaximum3_v4f16__inlineimm1: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 2.0, v2 op_sel_hi:[1,0,1] +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, 2.0, v3 op_sel_hi:[1,0,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> ) %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> %c) @@ -2922,10 +3014,8 @@ define <4 x half> @v_fmaximum3_v4f16__inlineimm2(<4 x half> %a, <4 x half> %b) { ; GFX950-LABEL: v_fmaximum3_v4f16__inlineimm2: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, 4.0, 4.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, 4.0, 4.0 op_sel_hi:[1,0,0] +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v2, 4.0 op_sel_hi:[1,1,0] +; GFX950-NEXT: v_pk_maximum3_f16 v1, v1, v3, 4.0 op_sel_hi:[1,1,0] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b) %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> ) @@ -3623,20 +3713,30 @@ define <2 x half> @v_no_fmaximum3_f16__multi_use(half %a, half %b, half %c) { ; GFX12-NEXT: v_pack_b32_f16 v0, v0, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_no_fmaximum3_f16__multi_use: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_max_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc -; GFX9-NEXT: v_pack_b32_f16 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_no_fmaximum3_f16__multi_use: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_max_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_max_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc +; GFX942-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_no_fmaximum3_f16__multi_use: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v0, v2, v2 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) %insert.0 = insertelement <2 x half> poison, half %max0, i32 0 @@ -3654,23 +3754,35 @@ define amdgpu_ps <2 x i32> @s_no_fmaximum3_f16__multi_use(half inreg %a, half in ; GFX12-NEXT: s_and_b32 s1, 0xffff, s1 ; GFX12-NEXT: ; return to shader part epilog ; -; GFX9-LABEL: s_no_fmaximum3_f16__multi_use: -; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: v_max_f16_e32 v1, s0, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_max_f16_e32 v1, s2, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc -; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX9-NEXT: v_readfirstlane_b32 s0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s1, v1 -; GFX9-NEXT: ; return to shader part epilog +; GFX942-LABEL: s_no_fmaximum3_f16__multi_use: +; GFX942: ; %bb.0: +; GFX942-NEXT: v_mov_b32_e32 v0, s1 +; GFX942-NEXT: v_max_f16_e32 v1, s0, v0 +; GFX942-NEXT: v_mov_b32_e32 v2, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_max_f16_e32 v1, s2, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 +; GFX942-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX942-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX942-NEXT: v_readfirstlane_b32 s0, v0 +; GFX942-NEXT: v_readfirstlane_b32 s1, v1 +; GFX942-NEXT: ; return to shader part epilog +; +; GFX950-LABEL: s_no_fmaximum3_f16__multi_use: +; GFX950: ; %bb.0: +; GFX950-NEXT: v_mov_b32_e32 v0, s0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, s1, s1 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_pk_maximum3_f16 v1, v0, s2, s2 +; GFX950-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX950-NEXT: v_readfirstlane_b32 s0, v0 +; GFX950-NEXT: v_readfirstlane_b32 s1, v1 +; GFX950-NEXT: ; return to shader part epilog %max0 = call half @llvm.maximum.f16(half %a, half %b) %max1 = call half @llvm.maximum.f16(half %max0, half %c) %cast0 = bitcast half %max0 to i16 diff --git a/llvm/test/CodeGen/AMDGPU/fminimum3.ll b/llvm/test/CodeGen/AMDGPU/fminimum3.ll index 56e0b2c2f06ce..34d7e5acb7896 100644 --- a/llvm/test/CodeGen/AMDGPU/fminimum3.ll +++ b/llvm/test/CodeGen/AMDGPU/fminimum3.ll @@ -1252,19 +1252,25 @@ define half @v_fminimum3_f16(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) ret half %max1 @@ -1281,19 +1287,25 @@ define half @v_fminimum3_f16_commute(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v2, v0, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_commute: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v2, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v2, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_commute: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v2, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v2, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_commute: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v2, v0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %c, half %max0) ret half %max1 @@ -1311,22 +1323,33 @@ define amdgpu_ps i32 @s_fminimum3_f16(half inreg %a, half inreg %b, half inreg % ; GFX12-NEXT: s_wait_alu 0xf1ff ; GFX12-NEXT: ; return to shader part epilog ; -; GFX9-LABEL: s_fminimum3_f16: -; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: v_min_f16_e32 v1, s0, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_min_f16_e32 v1, s2, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: v_readfirstlane_b32 s0, v0 -; GFX9-NEXT: ; return to shader part epilog +; GFX942-LABEL: s_fminimum3_f16: +; GFX942: ; %bb.0: +; GFX942-NEXT: v_mov_b32_e32 v0, s1 +; GFX942-NEXT: v_min_f16_e32 v1, s0, v0 +; GFX942-NEXT: v_mov_b32_e32 v2, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_min_f16_e32 v1, s2, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_readfirstlane_b32 s0, v0 +; GFX942-NEXT: ; return to shader part epilog +; +; GFX950-LABEL: s_fminimum3_f16: +; GFX950: ; %bb.0: +; GFX950-NEXT: v_mov_b32_e32 v0, s1 +; GFX950-NEXT: v_mov_b32_e32 v1, s2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, s0, v0, v1 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_readfirstlane_b32 s0, v0 +; GFX950-NEXT: ; return to shader part epilog %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) %cast = bitcast half %max1 to i16 @@ -1346,19 +1369,26 @@ define half @v_fminimum3_f16_fabs0(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, |v0|, v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fabs0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, |v0|, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fabs0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, |v0|, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, |v0|, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fabs0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call half @llvm.fabs.f16(half %a) %max0 = call half @llvm.minimum.f16(half %a.fabs, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) @@ -1376,19 +1406,26 @@ define half @v_fminimum3_f16_fabs1(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, |v1|, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fabs1: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, v0, |v1| -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v1| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fabs1: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, v0, |v1| +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, |v1| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fabs1: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %b.fabs = call half @llvm.fabs.f16(half %b) %max0 = call half @llvm.minimum.f16(half %a, half %b.fabs) %max1 = call half @llvm.minimum.f16(half %max0, half %c) @@ -1406,19 +1443,26 @@ define half @v_fminimum3_f16_fabs2(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, |v2| ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fabs2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e64 v1, v0, |v2| -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fabs2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e64 v1, v0, |v2| +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fabs2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %c.fabs = call half @llvm.fabs.f16(half %c) %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c.fabs) @@ -1436,19 +1480,28 @@ define half @v_fminimum3_f16_fabs_all(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, |v0|, |v1|, |v2| ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fabs_all: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, |v0|, |v1| -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e64 v1, v0, |v2| -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fabs_all: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, |v0|, |v1| +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e64 v1, v0, |v2| +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fabs_all: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call half @llvm.fabs.f16(half %a) %b.fabs = call half @llvm.fabs.f16(half %b) %c.fabs = call half @llvm.fabs.f16(half %c) @@ -1468,19 +1521,28 @@ define half @v_fminimum3_f16_fneg_all(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, -v0, -v1, -v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fneg_all: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, -v0, -v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e64 v1, v0, -v2 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fneg_all: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, -v0, -v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e64 v1, v0, -v2 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fneg_all: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX950-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX950-NEXT: v_xor_b32_e32 v2, 0x8000, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg half %a %b.fneg = fneg half %b %c.fneg = fneg half %c @@ -1500,19 +1562,28 @@ define half @v_fminimum3_f16_fneg_fabs_all(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, -|v0|, -|v1|, -|v2| ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fneg_fabs_all: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, -|v0|, -|v1| -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -|v0|, -|v1| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e64 v1, v0, -|v2| -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -|v2| -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fneg_fabs_all: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, -|v0|, -|v1| +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, -|v0|, -|v1| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e64 v1, v0, -|v2| +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -|v2| +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fneg_fabs_all: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_or_b32_e32 v0, 0x8000, v0 +; GFX950-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX950-NEXT: v_or_b32_e32 v2, 0x8000, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call half @llvm.fabs.f16(half %a) %b.fabs = call half @llvm.fabs.f16(half %b) %c.fabs = call half @llvm.fabs.f16(half %c) @@ -1535,19 +1606,26 @@ define half @v_fminimum3_f16_fneg0(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, -v0, v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fneg0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, -v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fneg0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, -v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, -v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fneg0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg half %a %max0 = call half @llvm.minimum.f16(half %a.fneg, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) @@ -1565,19 +1643,26 @@ define half @v_fminimum3_f16_fneg1(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, -v1, v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fneg1: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e64 v3, v0, -v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fneg1: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e64 v3, v0, -v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fneg1: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %b.fneg = fneg half %b %max0 = call half @llvm.minimum.f16(half %a, half %b.fneg) %max1 = call half @llvm.minimum.f16(half %max0, half %c) @@ -1595,19 +1680,26 @@ define half @v_fminimum3_f16_fneg2(half %a, half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, -v2 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_fneg2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e64 v1, v0, -v2 -; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_fneg2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e64 v1, v0, -v2 +; GFX942-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_fneg2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_xor_b32_e32 v2, 0x8000, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 +; GFX950-NEXT: s_setpc_b64 s[30:31] %c.fneg = fneg half %c %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c.fneg) @@ -1625,19 +1717,26 @@ define half @v_fminimum3_f16_const0(half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, 0x4800, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_const0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v2, 0x4800, v0 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_const0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v2, 0x4800, v0 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_min_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_const0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: s_movk_i32 s0, 0x4800 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, s0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half 8.0, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) ret half %max1 @@ -1654,19 +1753,26 @@ define half @v_fminimum3_f16__const2(half %a, half %b) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, 0x4800 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16__const2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_min_f16_e32 v1, 0x4800, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16__const2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_min_f16_e32 v1, 0x4800, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16__const2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: s_movk_i32 s0, 0x4800 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, s0 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half 8.0) ret half %max1 @@ -1683,19 +1789,25 @@ define half @v_fminimum3_f16_inlineimm0(half %b, half %c) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, 4.0, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_inlineimm0: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v2, 4.0, v0 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_inlineimm0: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v2, 4.0, v0 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_min_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_inlineimm0: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 4.0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half 4.0, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) ret half %max1 @@ -1712,19 +1824,25 @@ define half @v_fminimum3_f16__inlineimm(half %a, half %b) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, 4.0 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16__inlineimm: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_min_f16_e32 v1, 4.0, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16__inlineimm: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v2, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v3, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX942-NEXT: v_min_f16_e32 v1, 4.0, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16__inlineimm: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, 4.0 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half 4.0) ret half %max1 @@ -1743,19 +1861,27 @@ define half @v_fminimum3_f16_const1_const2(half %a) { ; GFX12-NEXT: v_minimum3_f16 v0, v0, s0, 0x4c00 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_fminimum3_f16_const1_const2: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v1, 0x4800, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_min_f16_e32 v1, 0x4c00, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_fminimum3_f16_const1_const2: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v1, 0x4800, v0 +; GFX942-NEXT: v_mov_b32_e32 v2, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_min_f16_e32 v1, 0x4c00, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fminimum3_f16_const1_const2: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: s_movk_i32 s0, 0x4800 +; GFX950-NEXT: v_mov_b32_e32 v1, 0x4c00 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, s0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half %a, half 8.0) %max1 = call half @llvm.minimum.f16(half %max0, half 16.0) ret half %max1 @@ -1802,9 +1928,7 @@ define <2 x half> @v_fminimum3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c ; GFX950-LABEL: v_fminimum3_v2f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v2, v0, v0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v2, v0, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b) %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %c, <2 x half> %max0) @@ -1852,9 +1976,7 @@ define <2 x half> @v_fminimum3_v2f16_commute(<2 x half> %a, <2 x half> %b, <2 x ; GFX950-LABEL: v_fminimum3_v2f16_commute: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b) %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> %c) @@ -1910,9 +2032,7 @@ define <2 x half> @v_fminimum3_v2f16__fabs_all(<2 x half> %a, <2 x half> %b, <2 ; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1 ; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %a) %b.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %b) @@ -1963,9 +2083,7 @@ define <2 x half> @v_fminimum3_v2f16__fneg_all(<2 x half> %a, <2 x half> %b, <2 ; GFX950-LABEL: v_fminimum3_v2f16__fneg_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v2 neg_lo:[1,1,1] neg_hi:[1,1,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg <2 x half> %a %b.fneg = fneg <2 x half> %b @@ -2016,9 +2134,7 @@ define <2 x half> @v_fminimum3_v2f16__inlineimm1(<2 x half> %a, <2 x half> %c) { ; GFX950-LABEL: v_fminimum3_v2f16__inlineimm1: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 2.0, v1 op_sel_hi:[1,0,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> ) %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> %c) @@ -2066,9 +2182,7 @@ define <2 x half> @v_fminimum3_v2f16__inlineimm2(<2 x half> %a, <2 x half> %b) { ; GFX950-LABEL: v_fminimum3_v2f16__inlineimm2: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 -; GFX950-NEXT: s_nop 0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 4.0, 4.0 op_sel_hi:[1,0,0] +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, 4.0 op_sel_hi:[1,1,0] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b) %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> ) @@ -2130,10 +2244,8 @@ define <3 x half> @v_fminimum3_v3f16(<3 x half> %a, <3 x half> %b, <3 x half> %c ; GFX950-LABEL: v_fminimum3_v3f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v5, v1, v1 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v4, v0, v0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v4, v0, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v5, v1, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> %b) %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %c, <3 x half> %max0) @@ -2195,10 +2307,8 @@ define <3 x half> @v_fminimum3_v3f16_commute(<3 x half> %a, <3 x half> %b, <3 x ; GFX950-LABEL: v_fminimum3_v3f16_commute: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> %b) %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> %c) @@ -2271,16 +2381,14 @@ define <3 x half> @v_fminimum3_v3f16__fabs_all(<3 x half> %a, <3 x half> %b, <3 ; GFX950-LABEL: v_fminimum3_v3f16__fabs_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1 -; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3 +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 ; GFX950-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5 ; GFX950-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %a) %b.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %b) @@ -2345,10 +2453,8 @@ define <3 x half> @v_fminimum3_v3f16__fneg_all(<3 x half> %a, <3 x half> %b, <3 ; GFX950-LABEL: v_fminimum3_v3f16__fneg_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v5, v5 neg_lo:[0,1,1] neg_hi:[0,1,1] -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v4, v4 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v4 neg_lo:[1,1,1] neg_hi:[1,1,1] +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v5 neg_lo:[1,1,1] neg_hi:[1,1,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg <3 x half> %a %b.fneg = fneg <3 x half> %b @@ -2410,10 +2516,8 @@ define <3 x half> @v_fminimum3_v3f16__inlineimm1(<3 x half> %a, <3 x half> %c) { ; GFX950-LABEL: v_fminimum3_v3f16__inlineimm1: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, 2.0, 2.0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 2.0, v2 op_sel_hi:[1,0,1] +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, 2.0, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> ) %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> %c) @@ -2475,10 +2579,8 @@ define <3 x half> @v_fminimum3_v3f16__inlineimm2(<3 x half> %a, <3 x half> %b) { ; GFX950-LABEL: v_fminimum3_v3f16__inlineimm2: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, 4.0, 4.0 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 4.0, 4.0 op_sel_hi:[1,0,0] +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, 4.0 op_sel_hi:[1,1,0] +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, 4.0 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> %b) %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> ) @@ -2546,10 +2648,8 @@ define <4 x half> @v_fminimum3_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c ; GFX950-LABEL: v_fminimum3_v4f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v5, v1, v1 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v4, v0, v0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v4, v0, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v5, v1, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b) %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %c, <4 x half> %max0) @@ -2617,10 +2717,8 @@ define <4 x half> @v_fminimum3_v4f16_commute(<4 x half> %a, <4 x half> %b, <4 x ; GFX950-LABEL: v_fminimum3_v4f16_commute: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b) %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> %c) @@ -2699,16 +2797,14 @@ define <4 x half> @v_fminimum3_v4f16__fabs_all(<4 x half> %a, <4 x half> %b, <4 ; GFX950-LABEL: v_fminimum3_v4f16__fabs_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1 -; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 +; GFX950-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 ; GFX950-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3 +; GFX950-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2 ; GFX950-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5 ; GFX950-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v5, v5 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v4, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v4 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v5 ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %a) %b.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %b) @@ -2779,10 +2875,8 @@ define <4 x half> @v_fminimum3_v4f16__fneg_all(<4 x half> %a, <4 x half> %b, <4 ; GFX950-LABEL: v_fminimum3_v4f16__fneg_all: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 neg_lo:[1,1,1] neg_hi:[1,1,1] -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v5, v5 neg_lo:[0,1,1] neg_hi:[0,1,1] -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v4, v4 neg_lo:[0,1,1] neg_hi:[0,1,1] +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v4 neg_lo:[1,1,1] neg_hi:[1,1,1] +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v5 neg_lo:[1,1,1] neg_hi:[1,1,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %a.fneg = fneg <4 x half> %a %b.fneg = fneg <4 x half> %b @@ -2851,10 +2945,8 @@ define <4 x half> @v_fminimum3_v4f16__inlineimm1(<4 x half> %a, <4 x half> %c) { ; GFX950-LABEL: v_fminimum3_v4f16__inlineimm1: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 2.0, 2.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 2.0, v2 op_sel_hi:[1,0,1] +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, 2.0, v3 op_sel_hi:[1,0,1] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> ) %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> %c) @@ -2922,10 +3014,8 @@ define <4 x half> @v_fminimum3_v4f16__inlineimm2(<4 x half> %a, <4 x half> %b) { ; GFX950-LABEL: v_fminimum3_v4f16__inlineimm2: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, v3 -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, v2 -; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, 4.0, 4.0 op_sel_hi:[1,0,0] -; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, 4.0, 4.0 op_sel_hi:[1,0,0] +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v2, 4.0 op_sel_hi:[1,1,0] +; GFX950-NEXT: v_pk_minimum3_f16 v1, v1, v3, 4.0 op_sel_hi:[1,1,0] ; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b) %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> ) @@ -3623,20 +3713,30 @@ define <2 x half> @v_no_fminimum3_f16__multi_use(half %a, half %b, half %c) { ; GFX12-NEXT: v_pack_b32_f16 v0, v0, v1 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_no_fminimum3_f16__multi_use: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v3, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc -; GFX9-NEXT: v_min_f16_e32 v1, v0, v2 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc -; GFX9-NEXT: v_pack_b32_f16 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: v_no_fminimum3_f16__multi_use: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_min_f16_e32 v3, v0, v1 +; GFX942-NEXT: v_mov_b32_e32 v4, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX942-NEXT: v_min_f16_e32 v1, v0, v2 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, v0, v2 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc +; GFX942-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_no_fminimum3_f16__multi_use: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v0, v2, v2 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) %insert.0 = insertelement <2 x half> poison, half %max0, i32 0 @@ -3654,23 +3754,35 @@ define amdgpu_ps <2 x i32> @s_no_fminimum3_f16__multi_use(half inreg %a, half in ; GFX12-NEXT: s_and_b32 s1, 0xffff, s1 ; GFX12-NEXT: ; return to shader part epilog ; -; GFX9-LABEL: s_no_fminimum3_f16__multi_use: -; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: v_min_f16_e32 v1, s0, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 -; GFX9-NEXT: s_nop 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; GFX9-NEXT: v_min_f16_e32 v1, s2, v0 -; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc -; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX9-NEXT: v_readfirstlane_b32 s0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s1, v1 -; GFX9-NEXT: ; return to shader part epilog +; GFX942-LABEL: s_no_fminimum3_f16__multi_use: +; GFX942: ; %bb.0: +; GFX942-NEXT: v_mov_b32_e32 v0, s1 +; GFX942-NEXT: v_min_f16_e32 v1, s0, v0 +; GFX942-NEXT: v_mov_b32_e32 v2, 0x7e00 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 +; GFX942-NEXT: s_nop 1 +; GFX942-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX942-NEXT: v_min_f16_e32 v1, s2, v0 +; GFX942-NEXT: v_cmp_o_f16_e32 vcc, s2, v0 +; GFX942-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX942-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX942-NEXT: v_readfirstlane_b32 s0, v0 +; GFX942-NEXT: v_readfirstlane_b32 s1, v1 +; GFX942-NEXT: ; return to shader part epilog +; +; GFX950-LABEL: s_no_fminimum3_f16__multi_use: +; GFX950: ; %bb.0: +; GFX950-NEXT: v_mov_b32_e32 v0, s0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, s1, s1 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: v_pk_minimum3_f16 v1, v0, s2, s2 +; GFX950-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX950-NEXT: v_readfirstlane_b32 s0, v0 +; GFX950-NEXT: v_readfirstlane_b32 s1, v1 +; GFX950-NEXT: ; return to shader part epilog %max0 = call half @llvm.minimum.f16(half %a, half %b) %max1 = call half @llvm.minimum.f16(half %max0, half %c) %cast0 = bitcast half %max0 to i16 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll index a0ba97d3b639c..1ab27337632b6 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll @@ -1282,3 +1282,1162 @@ define i32 @test_cvt_scalef32_fp4_bf16_byte3(<2 x bfloat> %src0, float %scale, i %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.bf16(i32 %old, <2 x bfloat> %src0, float %scale, i32 3) ret i32 %ret } + +define amdgpu_ps void @test_scalef32_pk32_fp6_f32_vv_inreg_src(<16 x float> inreg %src, float %scale, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_f32_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v19, v2 +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-SDAG-NEXT: v_mov_b32_e32 v18, v1 +; GFX950-SDAG-NEXT: v_cvt_scalef32_2xpk16_fp6_f32 v[0:5], v[2:17], v[2:17], v0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[18:19], v[4:5], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[18:19], v[0:3], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_fp6_f32_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v19, v2 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v18, v1 +; GFX950-GISEL-NEXT: v_cvt_scalef32_2xpk16_fp6_f32 v[0:5], v[2:17], v[2:17], v0 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[18:19], v[4:5], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.2xpk16.fp6.f32(<16 x float> %src, <16 x float> %src, float %scale) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_fp6_f32_sl_inreg_src(<16 x float> inreg %src, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_f32_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-SDAG-NEXT: s_mov_b32 s16, 0x42c80000 +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-SDAG-NEXT: v_cvt_scalef32_2xpk16_fp6_f32 v[2:7], v[2:17], v[2:17], s16 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[0:1], v[2:5], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_fp6_f32_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v18, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_2xpk16_fp6_f32 v[2:7], v[2:17], v[2:17], v18 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[0:1], v[2:5], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.2xpk16.fp6.f32(<16 x float> %src, <16 x float> %src, float 100.0) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_bf6_f32_vv_inreg_src(<16 x float> inreg %src, float %scale, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_bf6_f32_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v19, v2 +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-SDAG-NEXT: v_mov_b32_e32 v18, v1 +; GFX950-SDAG-NEXT: v_cvt_scalef32_2xpk16_bf6_f32 v[0:5], v[2:17], v[2:17], v0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[18:19], v[4:5], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[18:19], v[0:3], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_bf6_f32_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v19, v2 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v18, v1 +; GFX950-GISEL-NEXT: v_cvt_scalef32_2xpk16_bf6_f32 v[0:5], v[2:17], v[2:17], v0 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[18:19], v[4:5], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.2xpk16.bf6.f32(<16 x float> %src, <16 x float> %src, float %scale) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_bf6_f32_sl_inreg_src(<16 x float> inreg inreg %src, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_bf6_f32_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-SDAG-NEXT: s_mov_b32 s16, 0x42c80000 +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-SDAG-NEXT: v_cvt_scalef32_2xpk16_bf6_f32 v[2:7], v[2:17], v[2:17], s16 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[0:1], v[2:5], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_bf6_f32_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v18, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_2xpk16_bf6_f32 v[2:7], v[2:17], v[2:17], v18 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[0:1], v[2:5], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[0:1], v[6:7], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.2xpk16.bf6.f32(<16 x float> %src, <16 x float> %src, float 100.0) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte0_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte0_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 0, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0] +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte3_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte3_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,1,0] +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 3, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte0_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte0_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 0, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_fp8_byte3_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte3_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,1,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 3, i1 true) + ret <2 x half> %ret +} + +define float @test_cvt_scalef32_f32_fp8_byte0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 0) + ret float %ret +} + +define float @test_cvt_scalef32_f32_fp8_byte1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1) + ret float %ret +} + +define float @test_cvt_scalef32_f32_fp8_byte2_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2) + ret float %ret +} + +define float @test_cvt_scalef32_f32_fp8_byte3_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 3) + ret float %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte0_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte0_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 0, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0] +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte3_dst_lo_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte3_dst_lo_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,1,0] +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 3, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte0_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte0_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 0, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_f16_bf8_byte3_dst_hi_inreg_src(i32 inreg %src, float %scale, <2 x half> %old) { +; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte3_dst_hi_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,1,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 3, i1 true) + ret <2 x half> %ret +} + +define float @test_cvt_scalef32_f32_bf8_byte0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 0) + ret float %ret +} + +define float @test_cvt_scalef32_f32_bf8_byte1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1) + ret float %ret +} + +define float @test_cvt_scalef32_f32_bf8_byte2_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2) + ret float %ret +} + +define float @test_cvt_scalef32_f32_bf8_byte3_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 3) + ret float %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_fp8_f32_word0_inreg_src(<2 x i16> inreg %old, float %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_fp8_f32_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v3, s0 +; GCN-NEXT: v_cvt_scalef32_pk_fp8_f32 v3, v0, v1, v2 +; GCN-NEXT: v_mov_b32_e32 v0, v3 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.f32(<2 x i16> %old, float %src0, float %src1, float %scale, i1 false) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_fp8_f32_word1_inreg_src(<2 x i16> inreg %old, float %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_fp8_f32_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v3, s0 +; GCN-NEXT: v_cvt_scalef32_pk_fp8_f32 v3, v0, v1, v2 op_sel:[0,0,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v3 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.f32(<2 x i16> %old, float %src0, float %src1, float %scale, i1 true) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_bf8_f32_word0_inreg_src(<2 x i16> inreg %old, float %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf8_f32_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v3, s0 +; GCN-NEXT: v_cvt_scalef32_pk_bf8_f32 v3, v0, v1, v2 +; GCN-NEXT: v_mov_b32_e32 v0, v3 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.f32(<2 x i16> %old, float %src0, float %src1, float %scale, i1 false) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_bf8_f32_word1_inreg_src(<2 x i16> %old, float inreg %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf8_f32_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf8_f32 v0, s0, v1, v2 op_sel:[0,0,0,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.f32(<2 x i16> %old, float %src0, float %src1, float %scale, i1 true) + ret <2 x i16> %ret +} + +define <2 x float> @test_cvt_scalef32_pk_f32_fp8_word0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f32_fp8_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_fp8 v[0:1], s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32 %src, float %scale, i1 false) + ret <2 x float> %ret +} + +define <2 x float> @test_cvt_scalef32_pk_f32_fp8_word1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f32_fp8_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_fp8 v[0:1], s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32 %src, float %scale, i1 true) + ret <2 x float> %ret +} + +define <2 x float> @test_cvt_scalef32_pk_f32_bf8_word0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f32_bf8_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_bf8 v[0:1], s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32 %src, float %scale, i1 false) + ret <2 x float> %ret +} + +define <2 x float> @test_cvt_scalef32_pk_f32_bf8_word1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f32_bf8_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_bf8 v[0:1], s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32 %src, float %scale, i1 true) + ret <2 x float> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_fp8_f16_word0_inreg_src(<2 x i16> %old, <2 x half> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_fp8_f16_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp8_f16 v0, s0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.f16(<2 x i16> %old, <2 x half> %src, float %scale, i1 false) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_fp8_f16_word1_inreg_src(<2 x i16> %old, <2 x half> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_fp8_f16_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp8_f16 v0, s0, v1 op_sel:[0,0,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.f16(<2 x i16> %old, <2 x half> %src, float %scale, i1 true) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_fp8_bf16_word0_inreg_src(<2 x i16> %old, <2 x bfloat> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_fp8_bf16_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp8_bf16 v0, s0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.bf16(<2 x i16> %old, <2 x bfloat> %src, float %scale, i1 false) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_fp8_bf16_word1_inreg_src(<2 x i16> %old, <2 x bfloat> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_fp8_bf16_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp8_bf16 v0, s0, v1 op_sel:[0,0,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.bf16(<2 x i16> %old, <2 x bfloat> %src, float %scale, i1 true) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_bf8_f16_word0_inreg_src(<2 x i16> %old, <2 x half> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf8_f16_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf8_f16 v0, s0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.f16(<2 x i16> %old, <2 x half> %src, float %scale, i1 false) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_bf8_f16_word1_inreg_src(<2 x i16> %old, <2 x half> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf8_f16_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf8_f16 v0, s0, v1 op_sel:[0,0,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.f16(<2 x i16> %old, <2 x half> %src, float %scale, i1 true) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_bf8_bf16_word0_inreg_src(<2 x i16> %old, <2 x bfloat> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf8_bf16_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf8_bf16 v0, s0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.bf16(<2 x i16> %old, <2 x bfloat> %src, float %scale, i1 false) + ret <2 x i16> %ret +} + +define <2 x i16> @test_cvt_scalef32_pk_bf8_bf16_word1_inreg_src(<2 x i16> %old, <2 x bfloat> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf8_bf16_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf8_bf16 v0, s0, v1 op_sel:[0,0,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.bf16(<2 x i16> %old, <2 x bfloat> %src, float %scale, i1 true) + ret <2 x i16> %ret +} + +define <2 x float> @test_cvt_scale_f32_fp4_byte0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f32_fp4_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 0) + ret <2 x float> %ret +} + +define <2 x float> @test_cvt_scale_f32_fp4_byte1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f32_fp4_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1) + ret <2 x float> %ret +} + +define <2 x float> @test_cvt_scale_f32_fp4_byte2_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f32_fp4_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2) + ret <2 x float> %ret +} + +define <2 x float> @test_cvt_scale_f32_fp4_byte3_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f32_fp4_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 3) + ret <2 x float> %ret +} + +define i32 @test_cvt_scale_fp4_f32_byte0_inreg_src(i32 %old, float inreg %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scale_fp4_f32_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f32 v0, s0, v1, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f32(i32 %old, float %src0, float %src1, float %scale, i32 0) + ret i32 %ret +} + +define i32 @test_cvt_scale_fp4_f32_byte1_inreg_src(i32 %old, float inreg %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scale_fp4_f32_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f32 v0, s0, v1, v2 op_sel:[0,0,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f32(i32 %old, float %src0, float %src1, float %scale, i32 1) + ret i32 %ret +} + +define i32 @test_cvt_scale_fp4_f32_byte2_inreg_src(i32 %old, float inreg %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scale_fp4_f32_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f32 v0, s0, v1, v2 op_sel:[0,0,0,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f32(i32 %old, float %src0, float %src1, float %scale, i32 2) + ret i32 %ret +} + +define i32 @test_cvt_scale_fp4_f32_byte3_inreg_src(i32 %old, float inreg %src0, float %src1, float %scale) { +; GCN-LABEL: test_cvt_scale_fp4_f32_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f32 v0, s0, v1, v2 op_sel:[0,0,1,1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f32(i32 %old, float %src0, float %src1, float %scale, i32 3) + ret i32 %ret +} + +define <2 x half> @test_cvt_scale_f16_fp4_byte0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f16_fp4_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 0) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scale_f16_fp4_byte1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f16_fp4_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scale_f16_fp4_byte2_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f16_fp4_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scale_f16_fp4_byte3_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_f16_fp4_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 3) + ret <2 x half> %ret +} + +define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_bf16_fp4_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 0) + ret <2 x bfloat> %ret +} + +define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1) + ret <2 x bfloat> %ret +} + +define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2) + ret <2 x bfloat> %ret +} + +define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte3_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scale_bf16_fp4_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,1,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 3) + ret <2 x bfloat> %ret +} + +define <32 x float> @test_cvt_scale_pk32_f32_fp6_inreg_src(<6 x i32> inreg %src, float %scale) { +; GFX950-SDAG-LABEL: test_cvt_scale_pk32_f32_fp6_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-SDAG-NEXT: v_mov_b32_e32 v32, v0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v34, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v35, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v36, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v37, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v38, s16 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v39, s17 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_f32_fp6 v[0:31], v[34:39], v32 +; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-GISEL-LABEL: test_cvt_scale_pk32_f32_fp6_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-GISEL-NEXT: s_mov_b32 s4, s16 +; GFX950-GISEL-NEXT: s_mov_b32 s5, s17 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v32, v0 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[0:1] +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_f32_fp6 v[0:31], v[34:39], v32 +; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x float> @llvm.amdgcn.cvt.scalef32.pk32.f32.fp6(<6 x i32> %src, float %scale) + ret <32 x float> %ret +} + +define <32 x float> @test_cvt_scale_pk32_f32_bf6_inreg_src(<6 x i32> inreg %src, float %scale) { +; GFX950-SDAG-LABEL: test_cvt_scale_pk32_f32_bf6_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-SDAG-NEXT: v_mov_b32_e32 v32, v0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v34, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v35, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v36, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v37, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v38, s16 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v39, s17 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_f32_bf6 v[0:31], v[34:39], v32 +; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-GISEL-LABEL: test_cvt_scale_pk32_f32_bf6_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-GISEL-NEXT: s_mov_b32 s4, s16 +; GFX950-GISEL-NEXT: s_mov_b32 s5, s17 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v32, v0 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[0:1] +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_f32_bf6 v[0:31], v[34:39], v32 +; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x float> @llvm.amdgcn.cvt.scalef32.pk32.f32.bf6(<6 x i32> %src, float %scale) + ret <32 x float> %ret +} + +define <32 x half> @test_cvt_scalef32_pk32_f16_fp6_vv_inreg_src(<6 x i32> inreg %src, float %scale) { +; GFX950-SDAG-LABEL: test_cvt_scalef32_pk32_f16_fp6_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, v0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v18, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v19, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v20, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v21, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v22, s16 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v23, s17 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_f16_fp6 v[0:15], v[18:23], v16 +; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-GISEL-LABEL: test_cvt_scalef32_pk32_f16_fp6_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-GISEL-NEXT: s_mov_b32 s4, s16 +; GFX950-GISEL-NEXT: s_mov_b32 s5, s17 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[22:23], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v16, v0 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[20:21], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[18:19], s[0:1] +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_f16_fp6 v[0:15], v[18:23], v16 +; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x half> @llvm.amdgcn.cvt.scalef32.pk32.f16.fp6(<6 x i32> %src, float %scale) + ret <32 x half> %ret +} + +define <32 x half> @test_cvt_scalef32_pk32_f16_fp6_sl_inreg_src(<6 x i32> inreg inreg %src) { +; GFX950-SDAG-LABEL: test_cvt_scalef32_pk32_f16_fp6_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v18, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v19, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v20, s16 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v21, s17 +; GFX950-SDAG-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_f16_fp6 v[0:15], v[16:21], s0 +; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-GISEL-LABEL: test_cvt_scalef32_pk32_f16_fp6_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-GISEL-NEXT: s_mov_b32 s4, s16 +; GFX950-GISEL-NEXT: s_mov_b32 s5, s17 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[20:21], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[18:19], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v22, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_f16_fp6 v[0:15], v[16:21], v22 +; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x half> @llvm.amdgcn.cvt.scalef32.pk32.f16.fp6(<6 x i32> %src, float 100.0) + ret <32 x half> %ret +} + +define <32 x bfloat> @test_cvt_scalef32_pk32_bf16_fp6_vv_inreg_src(<6 x i32> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk32_bf16_fp6_vv_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v16, v0 +; GCN-NEXT: v_mov_b32_e32 v18, s0 +; GCN-NEXT: v_mov_b32_e32 v19, s1 +; GCN-NEXT: v_mov_b32_e32 v20, s2 +; GCN-NEXT: v_mov_b32_e32 v21, s3 +; GCN-NEXT: v_mov_b32_e32 v22, s16 +; GCN-NEXT: v_mov_b32_e32 v23, s17 +; GCN-NEXT: v_cvt_scalef32_pk32_bf16_fp6 v[0:15], v[18:23], v16 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x bfloat> @llvm.amdgcn.cvt.scalef32.pk32.bf16.fp6(<6 x i32> %src, float %scale) + ret <32 x bfloat> %ret +} + +define <32 x bfloat> @test_cvt_scalef32_pk32_bf16_fp6_sl_inreg_src(<6 x i32> inreg inreg %src) { +; GCN-LABEL: test_cvt_scalef32_pk32_bf16_fp6_sl_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v16, s0 +; GCN-NEXT: v_mov_b32_e32 v17, s1 +; GCN-NEXT: v_mov_b32_e32 v18, s2 +; GCN-NEXT: v_mov_b32_e32 v19, s3 +; GCN-NEXT: v_mov_b32_e32 v20, s16 +; GCN-NEXT: v_mov_b32_e32 v21, s17 +; GCN-NEXT: s_mov_b32 s0, 0x42c80000 +; GCN-NEXT: v_cvt_scalef32_pk32_bf16_fp6 v[0:15], v[16:21], s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x bfloat> @llvm.amdgcn.cvt.scalef32.pk32.bf16.fp6(<6 x i32> %src, float 100.0) + ret <32 x bfloat> %ret +} + +define <32 x half> @test_cvt_scalef32_pk32_f16_bf6_vv_inreg_src(<6 x i32> inreg %src, float %scale) { +; GFX950-SDAG-LABEL: test_cvt_scalef32_pk32_f16_bf6_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, v0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v18, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v19, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v20, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v21, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v22, s16 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v23, s17 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_f16_bf6 v[0:15], v[18:23], v16 +; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-GISEL-LABEL: test_cvt_scalef32_pk32_f16_bf6_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-GISEL-NEXT: s_mov_b32 s4, s16 +; GFX950-GISEL-NEXT: s_mov_b32 s5, s17 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[22:23], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v16, v0 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[20:21], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[18:19], s[0:1] +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_f16_bf6 v[0:15], v[18:23], v16 +; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x half> @llvm.amdgcn.cvt.scalef32.pk32.f16.bf6(<6 x i32> %src, float %scale) + ret <32 x half> %ret +} + +define <32 x half> @test_cvt_scalef32_pk32_f16_bf6_sl_inreg_src(<6 x i32> inreg inreg %src) { +; GFX950-SDAG-LABEL: test_cvt_scalef32_pk32_f16_bf6_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v18, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v19, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v20, s16 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v21, s17 +; GFX950-SDAG-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_f16_bf6 v[0:15], v[16:21], s0 +; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-GISEL-LABEL: test_cvt_scalef32_pk32_f16_bf6_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-GISEL-NEXT: s_mov_b32 s4, s16 +; GFX950-GISEL-NEXT: s_mov_b32 s5, s17 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[20:21], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[18:19], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v22, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_f16_bf6 v[0:15], v[16:21], v22 +; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x half> @llvm.amdgcn.cvt.scalef32.pk32.f16.bf6(<6 x i32> %src, float 100.0) + ret <32 x half> %ret +} + +define <32 x bfloat> @test_cvt_scalef32_pk32_bf16_bf6_vv_inreg_src(<6 x i32> inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk32_bf16_bf6_vv_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v16, v0 +; GCN-NEXT: v_mov_b32_e32 v18, s0 +; GCN-NEXT: v_mov_b32_e32 v19, s1 +; GCN-NEXT: v_mov_b32_e32 v20, s2 +; GCN-NEXT: v_mov_b32_e32 v21, s3 +; GCN-NEXT: v_mov_b32_e32 v22, s16 +; GCN-NEXT: v_mov_b32_e32 v23, s17 +; GCN-NEXT: v_cvt_scalef32_pk32_bf16_bf6 v[0:15], v[18:23], v16 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x bfloat> @llvm.amdgcn.cvt.scalef32.pk32.bf16.bf6(<6 x i32> %src, float %scale) + ret <32 x bfloat> %ret +} + +define <32 x bfloat> @test_cvt_scalef32_pk32_bf16_bf6_sl_inreg_src(<6 x i32> inreg inreg %src) { +; GCN-LABEL: test_cvt_scalef32_pk32_bf16_bf6_sl_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v16, s0 +; GCN-NEXT: v_mov_b32_e32 v17, s1 +; GCN-NEXT: v_mov_b32_e32 v18, s2 +; GCN-NEXT: v_mov_b32_e32 v19, s3 +; GCN-NEXT: v_mov_b32_e32 v20, s16 +; GCN-NEXT: v_mov_b32_e32 v21, s17 +; GCN-NEXT: s_mov_b32 s0, 0x42c80000 +; GCN-NEXT: v_cvt_scalef32_pk32_bf16_bf6 v[0:15], v[16:21], s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <32 x bfloat> @llvm.amdgcn.cvt.scalef32.pk32.bf16.bf6(<6 x i32> %src, float 100.0) + ret <32 x bfloat> %ret +} + +define <2 x half> @test_cvt_scalef32_pk_f16_fp8_word0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f16_fp8_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_fp8 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp8(i32 %src, float %scale, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_pk_f16_fp8_word1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f16_fp8_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_fp8 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp8(i32 %src, float %scale, i1 true) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_pk_f16_bf8_word0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f16_bf8_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_bf8 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.bf8(i32 %src, float %scale, i1 false) + ret <2 x half> %ret +} + +define <2 x half> @test_cvt_scalef32_pk_f16_bf8_word1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_f16_bf8_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_f16_bf8 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.bf8(i32 %src, float %scale, i1 true) + ret <2 x half> %ret +} + +define <2 x bfloat> @test_cvt_scalef32_pk_bf16_fp8_word0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf16_fp8_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp8 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp8(i32 %src, float %scale, i1 false) + ret <2 x bfloat> %ret +} + +define <2 x bfloat> @test_cvt_scalef32_pk_bf16_fp8_word1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf16_fp8_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp8 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp8(i32 %src, float %scale, i1 true) + ret <2 x bfloat> %ret +} + +define <2 x bfloat> @test_cvt_scalef32_pk_bf16_bf8_word0_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf16_bf8_word0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_bf8 v0, s0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.bf8(i32 %src, float %scale, i1 false) + ret <2 x bfloat> %ret +} + +define <2 x bfloat> @test_cvt_scalef32_pk_bf16_bf8_word1_inreg_src(i32 inreg %src, float %scale) { +; GCN-LABEL: test_cvt_scalef32_pk_bf16_bf8_word1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_bf16_bf8 v0, s0, v0 op_sel:[1,0,0] +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.bf8(i32 %src, float %scale, i1 true) + ret <2 x bfloat> %ret +} + +define i32 @test_cvt_scalef32_fp4_f16_byte0_inreg_src(<2 x half> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_f16_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f16 v1, s0, v0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f16(i32 %old, <2 x half> %src0, float %scale, i32 0) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_f16_byte1_inreg_src(<2 x half> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_f16_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f16 v1, s0, v0 op_sel:[0,0,1,0] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f16(i32 %old, <2 x half> %src0, float %scale, i32 1) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_f16_byte2_inreg_src(<2 x half> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_f16_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f16 v1, s0, v0 op_sel:[0,0,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f16(i32 %old, <2 x half> %src0, float %scale, i32 2) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_f16_byte3_inreg_src(<2 x half> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_f16_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_f16 v1, s0, v0 op_sel:[0,0,1,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.f16(i32 %old, <2 x half> %src0, float %scale, i32 3) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_bf16_byte0_inreg_src(<2 x bfloat> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_bf16_byte0_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_bf16 v1, s0, v0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.bf16(i32 %old, <2 x bfloat> %src0, float %scale, i32 0) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_bf16_byte1_inreg_src(<2 x bfloat> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_bf16_byte1_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_bf16 v1, s0, v0 op_sel:[0,0,1,0] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.bf16(i32 %old, <2 x bfloat> %src0, float %scale, i32 1) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_bf16_byte2_inreg_src(<2 x bfloat> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_bf16_byte2_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_bf16 v1, s0, v0 op_sel:[0,0,0,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.bf16(i32 %old, <2 x bfloat> %src0, float %scale, i32 2) + ret i32 %ret +} + +define i32 @test_cvt_scalef32_fp4_bf16_byte3_inreg_src(<2 x bfloat> inreg %src0, float %scale, i32 %old) { +; GCN-LABEL: test_cvt_scalef32_fp4_bf16_byte3_inreg_src: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_scalef32_pk_fp4_bf16 v1, s0, v0 op_sel:[0,0,1,1] +; GCN-NEXT: s_nop 0 +; GCN-NEXT: v_mov_b32_e32 v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.cvt.scalef32.pk.fp4.bf16(i32 %old, <2 x bfloat> %src0, float %scale, i32 3) + ret i32 %ret +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.ll index 517c87193598d..4e5b85344197a 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.ll @@ -294,3 +294,404 @@ define amdgpu_ps void @test_scalef32_pk32_fp6_f16_sl(<32 x half> inreg %src, ptr store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 ret void } + +define amdgpu_ps void @test_scalef32_pk32_bf6_bf16_vv_inreg_src(<32 x bfloat> inreg %src, float %scale, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_bf6_bf16_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_bf6_bf16 v[18:23], v[2:17], v0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_bf6_bf16_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_bf6_bf16 v[18:23], v[2:17], v0 +; GFX950-GISEL-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.bf6.bf16(<32 x bfloat> %src, float %scale) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_bf6_bf16_sl_inreg_src(<32 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_bf6_bf16_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_bf6_bf16 v[18:23], v[2:17], s0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_bf6_bf16_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-GISEL-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_bf6_bf16 v[18:23], v[2:17], s0 +; GFX950-GISEL-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.bf6.bf16(<32 x bfloat> %src, float 100.0) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_bf6_f16_vv_inreg_src(<32 x half> inreg %src, float %scale, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_bf6_f16_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_bf6_f16 v[18:23], v[2:17], v0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_bf6_f16_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_bf6_f16 v[18:23], v[2:17], v0 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.bf6.f16(<32 x half> %src, float %scale) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_bf6_f16_sl_inreg_src(<32 x half> inreg %src, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_bf6_f16_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_bf6_f16 v[18:23], v[2:17], s0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_bf6_f16_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v24, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_bf6_f16 v[18:23], v[2:17], v24 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.bf6.f16(<32 x half> %src, float 100.0) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_fp6_bf16_vv_inreg_src(<32 x bfloat> inreg %src, float %scale, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_bf16_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_fp6_bf16 v[18:23], v[2:17], v0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_fp6_bf16_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_fp6_bf16 v[18:23], v[2:17], v0 +; GFX950-GISEL-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.fp6.bf16(<32 x bfloat> %src, float %scale) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_fp6_bf16_sl_inreg_src(<32 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_bf16_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_fp6_bf16 v[18:23], v[2:17], s0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_fp6_bf16_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-GISEL-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-GISEL-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_fp6_bf16 v[18:23], v[2:17], s0 +; GFX950-GISEL-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.fp6.bf16(<32 x bfloat> %src, float 100.0) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_fp6_f16_vv_inreg_src(<32 x half> inreg %src, float %scale, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_f16_vv_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_fp6_f16 v[18:23], v[2:17], v0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_fp6_f16_vv_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b32_e32 v25, v2 +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v24, v1 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_fp6_f16 v[18:23], v[2:17], v0 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[24:25], v[18:21], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[24:25], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.fp6.f16(<32 x half> %src, float %scale) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk32_fp6_f16_sl_inreg_src(<32 x half> inreg %src, ptr addrspace(1) %out) { +; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_f16_sl_inreg_src: +; GFX950-SDAG: ; %bb.0: +; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v3, s1 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v4, s2 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v5, s3 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, s4 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s5 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v8, s6 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v9, s7 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v10, s8 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v11, s9 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v12, s10 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v13, s11 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v14, s12 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v15, s13 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s14 +; GFX950-SDAG-NEXT: v_mov_b32_e32 v17, s15 +; GFX950-SDAG-NEXT: s_mov_b32 s0, 0x42c80000 +; GFX950-SDAG-NEXT: v_cvt_scalef32_pk32_fp6_f16 v[18:23], v[2:17], s0 +; GFX950-SDAG-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-SDAG-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-SDAG-NEXT: s_endpgm +; +; GFX950-GISEL-LABEL: test_scalef32_pk32_fp6_f16_sl_inreg_src: +; GFX950-GISEL: ; %bb.0: +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX950-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX950-GISEL-NEXT: v_mov_b32_e32 v24, 0x42c80000 +; GFX950-GISEL-NEXT: v_cvt_scalef32_pk32_fp6_f16 v[18:23], v[2:17], v24 +; GFX950-GISEL-NEXT: global_store_dwordx4 v[0:1], v[18:21], off +; GFX950-GISEL-NEXT: global_store_dwordx2 v[0:1], v[22:23], off offset:16 +; GFX950-GISEL-NEXT: s_endpgm + %cvt = tail call <6 x i32> @llvm.amdgcn.cvt.scalef32.pk32.fp6.f16(<32 x half> %src, float 100.0) + store <6 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll index 4532571d5cf2a..e828a12442fb8 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll @@ -42,11 +42,7 @@ define half @v_maximum_f16(half %src0, half %src1) { ; GFX950-LABEL: v_maximum_f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_maximum_f16: @@ -96,11 +92,17 @@ define half @v_maximum_f16__nnan(half %src0, half %src1) { ; GFX8-NEXT: v_max_f16_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_maximum_f16__nnan: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX900-LABEL: v_maximum_f16__nnan: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_maximum_f16__nnan: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_maximum_f16__nnan: ; GFX10: ; %bb.0: @@ -162,11 +164,7 @@ define half @v_maximum_f16__nsz(half %src0, half %src1) { ; GFX950-LABEL: v_maximum_f16__nsz: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_maximum_f16__nsz: @@ -216,11 +214,17 @@ define half @v_maximum_f16__nnan_nsz(half %src0, half %src1) { ; GFX8-NEXT: v_max_f16_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_maximum_f16__nnan_nsz: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX900-LABEL: v_maximum_f16__nnan_nsz: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_maximum_f16__nnan_nsz: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_maximum_f16__nnan_nsz: ; GFX10: ; %bb.0: @@ -286,11 +290,7 @@ define half @v_maximum_f16__nnan_src0(half %arg0, half %src1) { ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX950-NEXT: v_add_f16_e32 v0, 1.0, v0 -; GFX950-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_maximum_f16__nnan_src0: @@ -367,11 +367,7 @@ define half @v_maximum_f16__nnan_src1(half %src0, half %arg1) { ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX950-NEXT: v_add_f16_e32 v1, 1.0, v1 -; GFX950-NEXT: v_max_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_maximum_f16__nnan_src1: @@ -458,12 +454,9 @@ define void @s_maximum_f16(half inreg %src0, half inreg %src1) { ; GFX950-LABEL: s_maximum_f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_mov_b32_e32 v0, s1 -; GFX950-NEXT: v_max_f16_e32 v1, s0, v0 -; GFX950-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX950-NEXT: v_mov_b32_e32 v0, s0 +; GFX950-NEXT: v_pk_maximum3_f16 v0, v0, s1, s1 +; GFX950-NEXT: s_nop 0 ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX950-NEXT: ;;#ASMSTART ; GFX950-NEXT: ; use v0 @@ -2505,3 +2498,4 @@ define <16 x half> @v_maximum_v16f16(<16 x half> %src0, <16 x half> %src1) { } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; GCN: {{.*}} +; GFX9: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll index 0b9cb9682ea5f..9a2ef15737308 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll @@ -30,11 +30,7 @@ define half @v_minimum_f16(half %src0, half %src1) { ; GFX950-LABEL: v_minimum_f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_minimum_f16: @@ -74,11 +70,17 @@ define half @v_minimum_f16__nnan(half %src0, half %src1) { ; GFX8-NEXT: v_min_f16_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_minimum_f16__nnan: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX900-LABEL: v_minimum_f16__nnan: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_minimum_f16__nnan: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_minimum_f16__nnan: ; GFX10: ; %bb.0: @@ -127,11 +129,7 @@ define half @v_minimum_f16__nsz(half %src0, half %src1) { ; GFX950-LABEL: v_minimum_f16__nsz: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_minimum_f16__nsz: @@ -171,11 +169,17 @@ define half @v_minimum_f16__nnan_nsz(half %src0, half %src1) { ; GFX8-NEXT: v_min_f16_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: v_minimum_f16__nnan_nsz: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX900-LABEL: v_minimum_f16__nnan_nsz: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_minimum_f16__nnan_nsz: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 +; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_minimum_f16__nnan_nsz: ; GFX10: ; %bb.0: @@ -227,11 +231,7 @@ define half @v_minimum_f16__nnan_src0(half %arg0, half %src1) { ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX950-NEXT: v_add_f16_e32 v0, 1.0, v0 -; GFX950-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_minimum_f16__nnan_src0: @@ -294,11 +294,7 @@ define half @v_minimum_f16__nnan_src1(half %src0, half %arg1) { ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX950-NEXT: v_add_f16_e32 v1, 1.0, v1 -; GFX950-NEXT: v_min_f16_e32 v2, v0, v1 -; GFX950-NEXT: v_mov_b32_e32 v3, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, v0, v1 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, v1, v1 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_minimum_f16__nnan_src1: @@ -368,12 +364,9 @@ define void @s_minimum_f16(half inreg %src0, half inreg %src1) { ; GFX950-LABEL: s_minimum_f16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_mov_b32_e32 v0, s1 -; GFX950-NEXT: v_min_f16_e32 v1, s0, v0 -; GFX950-NEXT: v_mov_b32_e32 v2, 0x7e00 -; GFX950-NEXT: v_cmp_o_f16_e32 vcc, s0, v0 -; GFX950-NEXT: s_nop 1 -; GFX950-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX950-NEXT: v_mov_b32_e32 v0, s0 +; GFX950-NEXT: v_pk_minimum3_f16 v0, v0, s1, s1 +; GFX950-NEXT: s_nop 0 ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX950-NEXT: ;;#ASMSTART ; GFX950-NEXT: ; use v0 @@ -1924,3 +1917,4 @@ define <16 x half> @v_minimum_v16f16(<16 x half> %src0, <16 x half> %src1) { } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; GCN: {{.*}} +; GFX9: {{.*}} diff --git a/llvm/test/CodeGen/MIR/NVPTX/expected-floating-point-literal.mir b/llvm/test/CodeGen/MIR/NVPTX/expected-floating-point-literal.mir index d6f792b354e04..62ede3b9eef3b 100644 --- a/llvm/test/CodeGen/MIR/NVPTX/expected-floating-point-literal.mir +++ b/llvm/test/CodeGen/MIR/NVPTX/expected-floating-point-literal.mir @@ -16,7 +16,7 @@ registers: - { id: 1, class: float32regs } body: | bb.0.entry: - %0 = LD_f32_avar 0, 4, 1, 2, 32, &test_param_0 + %0 = LD_f32_asi 0, 4, 1, 2, 32, &test_param_0, 0 ; CHECK: [[@LINE+1]]:33: expected a floating point literal %1 = FADD_rnf32ri %0, float 3 StoreRetvalF32 %1, 0 diff --git a/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir b/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir index 9122ef7e60136..69c1e25a06024 100644 --- a/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir +++ b/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir @@ -40,9 +40,9 @@ registers: - { id: 7, class: float32regs } body: | bb.0.entry: - %0 = LD_f32_avar 0, 0, 4, 1, 2, 32, &test_param_0 + %0 = LD_f32_asi 0, 0, 4, 1, 2, 32, &test_param_0, 0 %1 = CVT_f64_f32 %0, 0 - %2 = LD_i32_avar 0, 0, 4, 1, 0, 32, &test_param_1 + %2 = LD_i32_asi 0, 0, 4, 1, 0, 32, &test_param_1, 0 ; CHECK: %3:float64regs = FADD_rnf64ri %1, double 3.250000e+00 %3 = FADD_rnf64ri %1, double 3.250000e+00 %4 = CVT_f32_f64 %3, 5 @@ -66,9 +66,9 @@ registers: - { id: 7, class: float32regs } body: | bb.0.entry: - %0 = LD_f32_avar 0, 0, 4, 1, 2, 32, &test2_param_0 + %0 = LD_f32_asi 0, 0, 4, 1, 2, 32, &test2_param_0, 0 %1 = CVT_f64_f32 %0, 0 - %2 = LD_i32_avar 0, 0, 4, 1, 0, 32, &test2_param_1 + %2 = LD_i32_asi 0, 0, 4, 1, 0, 32, &test2_param_1, 0 ; CHECK: %3:float64regs = FADD_rnf64ri %1, double 0x7FF8000000000000 %3 = FADD_rnf64ri %1, double 0x7FF8000000000000 %4 = CVT_f32_f64 %3, 5 diff --git a/llvm/test/CodeGen/MIR/NVPTX/floating-point-invalid-type-error.mir b/llvm/test/CodeGen/MIR/NVPTX/floating-point-invalid-type-error.mir index 6280d4e90ebf1..cc9a36509db33 100644 --- a/llvm/test/CodeGen/MIR/NVPTX/floating-point-invalid-type-error.mir +++ b/llvm/test/CodeGen/MIR/NVPTX/floating-point-invalid-type-error.mir @@ -16,7 +16,7 @@ registers: - { id: 1, class: float32regs } body: | bb.0.entry: - %0 = LD_f32_avar 0, 4, 1, 2, 32, &test_param_0 + %0 = LD_f32_asi 0, 4, 1, 2, 32, &test_param_0, 0 ; CHECK: [[@LINE+1]]:33: floating point constant does not have type 'float' %1 = FADD_rnf32ri %0, float 0xH3C00 StoreRetvalF32 %1, 0 diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll index f7ed690efabcf..eda4121fee702 100644 --- a/llvm/test/CodeGen/NVPTX/variadics-backend.ll +++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll @@ -215,21 +215,18 @@ define dso_local i32 @bar() { ; CHECK-PTX-NEXT: .reg .b64 %SPL; ; CHECK-PTX-NEXT: .reg .b16 %rs<10>; ; CHECK-PTX-NEXT: .reg .b32 %r<4>; -; CHECK-PTX-NEXT: .reg .b64 %rd<7>; +; CHECK-PTX-NEXT: .reg .b64 %rd<4>; ; CHECK-PTX-EMPTY: ; CHECK-PTX-NEXT: // %bb.0: // %entry ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot3; ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL; ; CHECK-PTX-NEXT: mov.u64 %rd1, __const_$_bar_$_s1; -; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7; -; CHECK-PTX-NEXT: ld.global.nc.u8 %rs1, [%rd2]; +; CHECK-PTX-NEXT: ld.global.nc.u8 %rs1, [%rd1+7]; ; CHECK-PTX-NEXT: cvt.u16.u8 %rs2, %rs1; ; CHECK-PTX-NEXT: st.u8 [%SP+2], %rs2; -; CHECK-PTX-NEXT: add.s64 %rd3, %rd1, 5; -; CHECK-PTX-NEXT: ld.global.nc.u8 %rs3, [%rd3]; +; CHECK-PTX-NEXT: ld.global.nc.u8 %rs3, [%rd1+5]; ; CHECK-PTX-NEXT: cvt.u16.u8 %rs4, %rs3; -; CHECK-PTX-NEXT: add.s64 %rd4, %rd1, 6; -; CHECK-PTX-NEXT: ld.global.nc.u8 %rs5, [%rd4]; +; CHECK-PTX-NEXT: ld.global.nc.u8 %rs5, [%rd1+6]; ; CHECK-PTX-NEXT: cvt.u16.u8 %rs6, %rs5; ; CHECK-PTX-NEXT: shl.b16 %rs7, %rs6, 8; ; CHECK-PTX-NEXT: or.b16 %rs8, %rs7, %rs4; @@ -238,14 +235,14 @@ define dso_local i32 @bar() { ; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1; ; CHECK-PTX-NEXT: mov.b16 %rs9, 1; ; CHECK-PTX-NEXT: st.u8 [%SP+12], %rs9; -; CHECK-PTX-NEXT: mov.b64 %rd5, 1; -; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd5; -; CHECK-PTX-NEXT: add.u64 %rd6, %SP, 8; +; CHECK-PTX-NEXT: mov.b64 %rd2, 1; +; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd2; +; CHECK-PTX-NEXT: add.u64 %rd3, %SP, 8; ; CHECK-PTX-NEXT: { // callseq 1, 0 ; CHECK-PTX-NEXT: .param .b32 param0; ; CHECK-PTX-NEXT: st.param.b32 [param0], 1; ; CHECK-PTX-NEXT: .param .b64 param1; -; CHECK-PTX-NEXT: st.param.b64 [param1], %rd6; +; CHECK-PTX-NEXT: st.param.b64 [param1], %rd3; ; CHECK-PTX-NEXT: .param .b32 retval0; ; CHECK-PTX-NEXT: call.uni (retval0), ; CHECK-PTX-NEXT: variadics2, @@ -384,7 +381,7 @@ define dso_local void @qux() { ; CHECK-PTX-NEXT: .reg .b64 %SP; ; CHECK-PTX-NEXT: .reg .b64 %SPL; ; CHECK-PTX-NEXT: .reg .b32 %r<3>; -; CHECK-PTX-NEXT: .reg .b64 %rd<7>; +; CHECK-PTX-NEXT: .reg .b64 %rd<6>; ; CHECK-PTX-EMPTY: ; CHECK-PTX-NEXT: // %bb.0: // %entry ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot7; @@ -392,18 +389,17 @@ define dso_local void @qux() { ; CHECK-PTX-NEXT: ld.global.nc.u64 %rd1, [__const_$_qux_$_s]; ; CHECK-PTX-NEXT: st.u64 [%SP], %rd1; ; CHECK-PTX-NEXT: mov.u64 %rd2, __const_$_qux_$_s; -; CHECK-PTX-NEXT: add.s64 %rd3, %rd2, 8; -; CHECK-PTX-NEXT: ld.global.nc.u64 %rd4, [%rd3]; -; CHECK-PTX-NEXT: st.u64 [%SP+8], %rd4; -; CHECK-PTX-NEXT: mov.b64 %rd5, 1; -; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd5; -; CHECK-PTX-NEXT: add.u64 %rd6, %SP, 16; +; CHECK-PTX-NEXT: ld.global.nc.u64 %rd3, [%rd2+8]; +; CHECK-PTX-NEXT: st.u64 [%SP+8], %rd3; +; CHECK-PTX-NEXT: mov.b64 %rd4, 1; +; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd4; +; CHECK-PTX-NEXT: add.u64 %rd5, %SP, 16; ; CHECK-PTX-NEXT: { // callseq 3, 0 ; CHECK-PTX-NEXT: .param .align 8 .b8 param0[16]; ; CHECK-PTX-NEXT: st.param.b64 [param0], %rd1; -; CHECK-PTX-NEXT: st.param.b64 [param0+8], %rd4; +; CHECK-PTX-NEXT: st.param.b64 [param0+8], %rd3; ; CHECK-PTX-NEXT: .param .b64 param1; -; CHECK-PTX-NEXT: st.param.b64 [param1], %rd6; +; CHECK-PTX-NEXT: st.param.b64 [param1], %rd5; ; CHECK-PTX-NEXT: .param .b32 retval0; ; CHECK-PTX-NEXT: call.uni (retval0), ; CHECK-PTX-NEXT: variadics4, diff --git a/llvm/test/MC/AMDGPU/gfx950_err.s b/llvm/test/MC/AMDGPU/gfx950_err.s index e0b832d8fe297..099916f48b5e7 100644 --- a/llvm/test/MC/AMDGPU/gfx950_err.s +++ b/llvm/test/MC/AMDGPU/gfx950_err.s @@ -434,3 +434,66 @@ v_cvt_scalef32_sr_pk32_bf6_f32 v[0:5], v[6:37], v38, v39 clamp // GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction v_cvt_scalef32_sr_pk32_fp6_f32 v[0:5], v[6:37], v38, v39 clamp + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_f32_fp6 v[0:31], s[32:37], v6 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_f32_bf6 v[0:31], s[32:37], v6 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_f16_fp6 v[0:15], s[20:25], v8 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_bf16_fp6 v[0:15], s[20:25], v8 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_f16_bf6 v[0:15], s[20:25], v8 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_bf16_bf6 v[0:15], s[20:25], v8 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_fp6_f16 v[18:23], s[0:15], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_bf6_f16 v[18:23], s[0:15], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_fp6_bf16 v[18:23], s[0:15], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_pk32_bf6_bf16 v[18:23], s[0:15], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_sr_pk32_bf6_bf16 v[20:25], s[0:15], v16, v17 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_sr_pk32_bf6_f16 v[20:25], s[0:15], v16, v17 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid or unsupported register size +v_cvt_scalef32_sr_pk32_bf6_f32 v[36:41], s[0:31], v32, v33 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_sr_pk32_fp6_bf16 v[20:25], s[0:15], v16, v17 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_sr_pk32_fp6_f16 v[20:25], s[0:15], v16, v17 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid or unsupported register size +v_cvt_scalef32_sr_pk32_fp6_f32 v[36:41], s[0:31], v32, v33 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_2xpk16_fp6_f32 v[0:5], s[0:15], v[6:21], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_2xpk16_fp6_f32 v[0:5], v[6:21], s[0:15], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_2xpk16_bf6_f32 v[0:5], s[0:15], v[6:21], v16 + +// GFX950: :[[@LINE+1]]:{{[0-9]+}}: error: invalid operand for instruction +v_cvt_scalef32_2xpk16_bf6_f32 v[0:5], v[6:21], s[0:15], v16 + +// GFX950: v_cvt_scalef32_sr_pk_fp4_f32 v0, s[2:3]/*Invalid register, operand has 'VReg_64' register class*/, v4, v5 +v_cvt_scalef32_sr_pk_fp4_f32 v0, s[2:3], v4, v5 diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll index fc5795708c7d8..531ed8cb618fc 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll @@ -386,3 +386,33 @@ define void @vecInstrsPlacement(ptr %ptr0) { store double %add1, ptr %ptr1 ret void } + +; During the bottom-up traversal we form bundle {ldA0,ldA1} but later when we +; visit the RHS operands of the additions we try to form {ldA1,ldA2} +; which is not allowed. +define void @instrsInMultipleBundles(ptr noalias %ptr) { +; CHECK-LABEL: define void @instrsInMultipleBundles( +; CHECK-SAME: ptr noalias [[PTR:%.*]]) { +; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0 +; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 2 +; CHECK-NEXT: [[LDA2:%.*]] = load i8, ptr [[GEP2]], align 1 +; CHECK-NEXT: [[VECL:%.*]] = load <2 x i8>, ptr [[GEP0]], align 1 +; CHECK-NEXT: [[VEXT:%.*]] = extractelement <2 x i8> [[VECL]], i32 1 +; CHECK-NEXT: [[VINS:%.*]] = insertelement <2 x i8> poison, i8 [[VEXT]], i32 0 +; CHECK-NEXT: [[VINS1:%.*]] = insertelement <2 x i8> [[VINS]], i8 [[LDA2]], i32 1 +; CHECK-NEXT: [[VEC:%.*]] = add <2 x i8> [[VECL]], [[VINS1]] +; CHECK-NEXT: store <2 x i8> [[VEC]], ptr [[GEP0]], align 1 +; CHECK-NEXT: ret void +; + %gep0 = getelementptr i8, ptr %ptr, i64 0 + %gep1 = getelementptr i8, ptr %ptr, i64 1 + %gep2 = getelementptr i8, ptr %ptr, i64 2 + %ldA0 = load i8, ptr %gep0 + %ldA1 = load i8, ptr %gep1 + %ldA2 = load i8, ptr %gep2 + %add0 = add i8 %ldA0, %ldA1 + %add1 = add i8 %ldA1, %ldA2 + store i8 %add0, ptr %gep0 + store i8 %add1, ptr %gep1 + ret void +} diff --git a/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_h.yaml b/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_h.yaml new file mode 100644 index 0000000000000..26f3493d62143 --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_h.yaml @@ -0,0 +1,133 @@ +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_I386 + Characteristics: [ IMAGE_FILE_LINE_NUMS_STRIPPED, IMAGE_FILE_32BIT_MACHINE ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' + - Name: .data + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: .bss + Characteristics: [ IMAGE_SCN_CNT_UNINITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: '.idata$2' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '0000000000000000000000000000000000000000' + SizeOfRawData: 20 + Relocations: + - VirtualAddress: 0 + SymbolName: '.idata$4' + Type: IMAGE_REL_I386_DIR32NB + - VirtualAddress: 12 + SymbolName: __foo_lib_iname + Type: IMAGE_REL_I386_DIR32NB + - VirtualAddress: 16 + SymbolName: '.idata$5' + Type: IMAGE_REL_I386_DIR32NB + - Name: '.idata$5' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: '.idata$4' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .file + Value: 0 + SectionNumber: -2 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_FILE + File: fake + - Name: hname + Value: 0 + SectionNumber: 6 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: fthunk + Value: 0 + SectionNumber: 5 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: .data + Value: 0 + SectionNumber: 2 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: .bss + Value: 0 + SectionNumber: 3 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: '.idata$2' + Value: 0 + SectionNumber: 4 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 20 + NumberOfRelocations: 3 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: '.idata$4' + Value: 0 + SectionNumber: 6 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: '.idata$5' + Value: 0 + SectionNumber: 5 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: __head_foo_lib + Value: 0 + SectionNumber: 4 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL + - Name: __foo_lib_iname + Value: 0 + SectionNumber: 0 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL +... diff --git a/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_s00000.yaml b/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_s00000.yaml new file mode 100644 index 0000000000000..f09437fc99255 --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_s00000.yaml @@ -0,0 +1,116 @@ +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_I386 + Characteristics: [ IMAGE_FILE_LINE_NUMS_STRIPPED, IMAGE_FILE_32BIT_MACHINE ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: FF25000000009090 + SizeOfRawData: 8 + Relocations: + - VirtualAddress: 2 + SymbolName: '.idata$5' + Type: IMAGE_REL_I386_DIR32 + - Name: .data + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: .bss + Characteristics: [ IMAGE_SCN_CNT_UNINITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: '.idata$7' + Characteristics: [ IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '00000000' + SizeOfRawData: 4 + Relocations: + - VirtualAddress: 0 + SymbolName: __head_foo_lib + Type: IMAGE_REL_I386_DIR32NB + - Name: '.idata$5' + Characteristics: [ IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '00000000' + SizeOfRawData: 4 + Relocations: + - VirtualAddress: 0 + SymbolName: '.idata$6' + Type: IMAGE_REL_I386_DIR32NB + - Name: '.idata$4' + Characteristics: [ IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '00000000' + SizeOfRawData: 4 + Relocations: + - VirtualAddress: 0 + SymbolName: '.idata$6' + Type: IMAGE_REL_I386_DIR32NB + - Name: '.idata$6' + Characteristics: [ IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 2 + SectionData: '010066756E633100' + SizeOfRawData: 8 +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: .data + Value: 0 + SectionNumber: 2 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: .bss + Value: 0 + SectionNumber: 3 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: '.idata$7' + Value: 0 + SectionNumber: 4 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: '.idata$5' + Value: 0 + SectionNumber: 5 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: '.idata$4' + Value: 0 + SectionNumber: 6 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: '.idata$6' + Value: 0 + SectionNumber: 7 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: _func1 + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL + - Name: __imp__func1 + Value: 0 + SectionNumber: 5 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL + - Name: __head_foo_lib + Value: 0 + SectionNumber: 0 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL +... diff --git a/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_t.yaml b/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_t.yaml new file mode 100644 index 0000000000000..e4465293bec1a --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/Inputs/gnu_foo_lib_t.yaml @@ -0,0 +1,119 @@ +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_I386 + Characteristics: [ IMAGE_FILE_RELOCS_STRIPPED, IMAGE_FILE_LINE_NUMS_STRIPPED, IMAGE_FILE_32BIT_MACHINE ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' + - Name: .data + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: .bss + Characteristics: [ IMAGE_SCN_CNT_UNINITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '' + - Name: '.idata$4' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '00000000' + SizeOfRawData: 4 + - Name: '.idata$5' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '00000000' + SizeOfRawData: 4 + - Name: '.idata$7' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: 666F6F2E646C6C00 + SizeOfRawData: 8 +symbols: + - Name: .file + Value: 0 + SectionNumber: -2 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_FILE + File: fake + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: .data + Value: 0 + SectionNumber: 2 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: .bss + Value: 0 + SectionNumber: 3 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: '.idata$4' + Value: 0 + SectionNumber: 4 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 4 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: '.idata$5' + Value: 0 + SectionNumber: 5 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 4 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: '.idata$7' + Value: 0 + SectionNumber: 6 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 8 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 0 + - Name: __foo_lib_iname + Value: 0 + SectionNumber: 6 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL +... diff --git a/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_1.yaml b/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_1.yaml new file mode 100644 index 0000000000000..f3f669d63bcad --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_1.yaml @@ -0,0 +1,69 @@ +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_AMD64 + Characteristics: [ ] +sections: + - Name: '.idata$2' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '0000000000000000000000000000000000000000' + SizeOfRawData: 20 + Relocations: + - VirtualAddress: 12 + SymbolName: '.idata$6' + Type: IMAGE_REL_AMD64_ADDR32NB + - VirtualAddress: 0 + SymbolName: '.idata$4' + Type: IMAGE_REL_AMD64_ADDR32NB + - VirtualAddress: 16 + SymbolName: '.idata$5' + Type: IMAGE_REL_AMD64_ADDR32NB + - Name: '.idata$6' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 2 + SectionData: 666F6F2E646C6C00 + SizeOfRawData: 8 +symbols: + - Name: __IMPORT_DESCRIPTOR_foo + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL + - Name: '.idata$2' + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_SECTION + - Name: '.idata$6' + Value: 0 + SectionNumber: 2 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + - Name: '.idata$4' + Value: 0 + SectionNumber: 0 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_SECTION + - Name: '.idata$5' + Value: 0 + SectionNumber: 0 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_SECTION + - Name: __NULL_IMPORT_DESCRIPTOR + Value: 0 + SectionNumber: 0 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL + - Name: "foo_NULL_THUNK_DATA" + Value: 0 + SectionNumber: 0 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL +... diff --git a/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_2.yaml b/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_2.yaml new file mode 100644 index 0000000000000..26b601fb74c54 --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_2.yaml @@ -0,0 +1,18 @@ +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_AMD64 + Characteristics: [ ] +sections: + - Name: '.idata$3' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 4 + SectionData: '0000000000000000000000000000000000000000' + SizeOfRawData: 20 +symbols: + - Name: __NULL_IMPORT_DESCRIPTOR + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL +... diff --git a/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_3.yaml b/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_3.yaml new file mode 100644 index 0000000000000..68248597cbaeb --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/Inputs/llvm_foo_dll_3.yaml @@ -0,0 +1,23 @@ +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_AMD64 + Characteristics: [ ] +sections: + - Name: '.idata$5' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 8 + SectionData: '0000000000000000' + SizeOfRawData: 8 + - Name: '.idata$4' + Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ] + Alignment: 8 + SectionData: '0000000000000000' + SizeOfRawData: 8 +symbols: + - Name: "foo_NULL_THUNK_DATA" + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_EXTERNAL +... diff --git a/llvm/test/tools/llvm-dlltool/identify.test b/llvm/test/tools/llvm-dlltool/identify.test new file mode 100644 index 0000000000000..eb2792a8e41ae --- /dev/null +++ b/llvm/test/tools/llvm-dlltool/identify.test @@ -0,0 +1,69 @@ +Test the -I / --identify option. + +Test with both GNU style and LLVM style import libraries; using +sources from yaml to preserve the checking behaviour even if the +output of llvm-dlltool itself would change. + +RUN: rm -rf %t && mkdir -p %t +RUN: split-file %s %t + +RUN: yaml2obj %S/Inputs/gnu_foo_lib_h.yaml > %t/gnu_foo_lib_h.o +RUN: yaml2obj %S/Inputs/gnu_foo_lib_s00000.yaml > %t/gnu_foo_lib_s00000.o +RUN: yaml2obj %S/Inputs/gnu_foo_lib_t.yaml > %t/gnu_foo_lib_t.o +RUN: llvm-ar rcs %t/gnu.a %t/gnu_foo_lib_h.o %t/gnu_foo_lib_s00000.o %t/gnu_foo_lib_t.o + +RUN: yaml2obj %S/Inputs/llvm_foo_dll_1.yaml > %t/llvm_foo_dll_1.o +RUN: yaml2obj %S/Inputs/llvm_foo_dll_2.yaml > %t/llvm_foo_dll_2.o +RUN: yaml2obj %S/Inputs/llvm_foo_dll_3.yaml > %t/llvm_foo_dll_3.o +RUN: llvm-ar rcs %t/llvm.a %t/llvm_foo_dll_1.o %t/llvm_foo_dll_2.o %t/llvm_foo_dll_3.o + + +Check that we can identify the DLL name from a GNU style import library. + +RUN: llvm-dlltool -I %t/gnu.a | FileCheck --check-prefix=FOO %s +RUN: llvm-dlltool --identify %t/gnu.a | count 1 + +FOO: foo.dll + + +Check that we successfully can identify run while passing the +--identify-strict option. + +RUN: llvm-dlltool -I %t/gnu.a --identify-strict | FileCheck --check-prefix=FOO %s + + +Check that we can identify the DLL name from an LLVM style import library. + +RUN: llvm-dlltool -I %t/llvm.a | FileCheck --check-prefix=FOO %s +RUN: llvm-dlltool -I %t/llvm.a | count 1 + + +Check that we can identify the DLL names from an import library that +contains imports for multiple DLLs. + +RUN: llvm-dlltool -m i386:x86-64 -d %t/lib1.def -l %t/lib1.a +RUN: llvm-dlltool -m i386:x86-64 -d %t/lib2.def -l %t/lib2.a +RUN: llvm-ar qcsL %t/merged.a %t/lib1.a %t/lib2.a + +RUN: llvm-dlltool -I %t/merged.a | FileCheck --check-prefix=MERGED %s + +MERGED-DAG: lib1.dll +MERGED-DAG: lib2.dll + +Check that --identify-strict fails this case, when there are multiple +outputs. + +RUN: not llvm-dlltool -I %t/merged.a --identify-strict 2>&1 | FileCheck --check-prefix=ERROR %s + +ERROR: contains imports for two or more DLLs + + +#--- lib1.def +LIBRARY lib1.dll +EXPORTS + func1 + +#--- lib2.def +LIBRARY lib2.dll +EXPORTS + func2 diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp index 9e6d3df297fc7..d6fd06c52c212 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp +++ b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp @@ -427,6 +427,23 @@ bool lazyLinkingRequested() { return false; } +static Error applyLibraryLinkModifiers(Session &S, LinkGraph &G) { + // If there are hidden archives and this graph is an archive + // member then apply hidden modifier. + if (!S.HiddenArchives.empty()) { + StringRef ObjName(G.getName()); + if (ObjName.ends_with(')')) { + auto LibName = ObjName.split('(').first; + if (S.HiddenArchives.count(LibName)) { + for (auto *Sym : G.defined_symbols()) + Sym->setScope(std::max(Sym->getScope(), Scope::Hidden)); + } + } + } + + return Error::success(); +} + static Error applyHarnessPromotions(Session &S, LinkGraph &G) { std::lock_guard Lock(S.M); @@ -1328,6 +1345,8 @@ void Session::modifyPassConfig(LinkGraph &G, PassConfiguration &PassConfig) { ++ActiveLinks; return Error::success(); }); + PassConfig.PrePrunePasses.push_back( + [this](LinkGraph &G) { return applyLibraryLinkModifiers(*this, G); }); PassConfig.PrePrunePasses.push_back( [this](LinkGraph &G) { return applyHarnessPromotions(*this, G); }); @@ -2182,11 +2201,6 @@ static Error addLibraries(Session &S, LibraryLoadQueue.push_back(std::move(LL)); } - // If there are any load- options then turn on flag overrides - // to avoid flag mismatch errors. - if (!LibrariesHidden.empty() || !LoadHidden.empty()) - S.ObjLayer.setOverrideObjectFlagsWithResponsibilityFlags(true); - // Sort library loads by position in the argument list. llvm::sort(LibraryLoadQueue, [](const LibraryLoad &LHS, const LibraryLoad &RHS) { @@ -2204,6 +2218,7 @@ static Error addLibraries(Session &S, break; case LibraryLoad::Hidden: GetObjFileInterface = getObjectFileInterfaceHidden; + S.HiddenArchives.insert(Path); break; } diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.h b/llvm/tools/llvm-jitlink/llvm-jitlink.h index 92d667e797b88..ca5162f1a2d52 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink.h +++ b/llvm/tools/llvm-jitlink/llvm-jitlink.h @@ -136,6 +136,8 @@ struct Session { StringSet<> HarnessDefinitions; DenseMap CanonicalWeakDefs; + StringSet<> HiddenArchives; + std::optional ShowGraphsRegex; private: diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp index 97724100ba341..f827bd7424a06 100644 --- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp @@ -145,6 +145,78 @@ define void @foo(ptr %ptr, i8 %v0, i8 %v1) { testing::ElementsAre(SN0, SN1)); } +// Check that when we erase a DAG node its SchedBundle gets updated. +TEST_F(SchedulerTest, SchedBundleEraseDGNode) { + parseIR(C, R"IR( +define void @foo(ptr %ptr, i8 %v0, i8 %v1, i8 %v2, i8 %v3) { + store i8 %v0, ptr %ptr + store i8 %v1, ptr %ptr + store i8 %v2, ptr %ptr + store i8 %v3, ptr %ptr + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *S0 = cast(&*It++); + auto *S1 = cast(&*It++); + auto *S2 = cast(&*It++); + auto *S3 = cast(&*It++); + + sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx); + DAG.extend({&*BB->begin(), BB->getTerminator()}); + auto *SN0 = DAG.getNode(S0); + auto *SN1 = DAG.getNode(S1); + auto *SN2 = DAG.getNode(S2); + auto *SN3 = DAG.getNode(S3); + { + // Check the common case, when the bundle contains unique nodes. + sandboxir::SchedBundle Bndl({SN0, SN1}); + S0->eraseFromParent(); + EXPECT_THAT(Bndl, testing::ElementsAre(SN1)); + } + { + // Check corner case when the node appears more than once. + sandboxir::SchedBundle Bndl({SN2, SN3, SN2}); + S2->eraseFromParent(); + EXPECT_THAT(Bndl, testing::ElementsAre(SN3)); + } +} + +// Check that assigning a bundle to a DAG Node that is already assigned to a +// bundle, removes the node from the old bundle. +TEST_F(SchedulerTest, SchedBundleReassign) { + parseIR(C, R"IR( +define void @foo(ptr %ptr, i8 %v0, i8 %v1, i8 %v2) { + store i8 %v0, ptr %ptr + store i8 %v1, ptr %ptr + store i8 %v2, ptr %ptr + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *S0 = cast(&*It++); + auto *S1 = cast(&*It++); + auto *S2 = cast(&*It++); + + sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx); + DAG.extend({&*BB->begin(), BB->getTerminator()}); + auto *SN0 = DAG.getNode(S0); + auto *SN1 = DAG.getNode(S1); + auto *SN2 = DAG.getNode(S2); + sandboxir::SchedBundle BndlOld({SN0, SN1}); + sandboxir::SchedBundle BndlNew({SN0, SN2}); + EXPECT_THAT(BndlOld, testing::ElementsAre(SN1)); + EXPECT_THAT(BndlNew, testing::ElementsAre(SN0, SN2)); +} + TEST_F(SchedulerTest, Basic) { parseIR(C, R"IR( define void @foo(ptr %ptr, i8 %v0, i8 %v1) { @@ -210,9 +282,10 @@ define void @foo(ptr noalias %ptr0, ptr noalias %ptr1) { EXPECT_TRUE(Sched.trySchedule({L0, L1})); } -TEST_F(SchedulerTest, RescheduleAlreadyScheduled) { +TEST_F(SchedulerTest, TrimSchedule) { parseIR(C, R"IR( -define void @foo(ptr noalias %ptr0, ptr noalias %ptr1) { +define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, i8 %arg) { + %zext = zext i8 0 to i32 %ld0 = load i8, ptr %ptr0 %ld1 = load i8, ptr %ptr1 %add0 = add i8 %ld0, %ld0 @@ -227,6 +300,7 @@ define void @foo(ptr noalias %ptr0, ptr noalias %ptr1) { auto *F = Ctx.createFunction(LLVMF); auto *BB = &*F->begin(); auto It = BB->begin(); + auto *Z = cast(&*It++); auto *L0 = cast(&*It++); auto *L1 = cast(&*It++); auto *Add0 = cast(&*It++); @@ -240,10 +314,224 @@ define void @foo(ptr noalias %ptr0, ptr noalias %ptr1) { EXPECT_TRUE(Sched.trySchedule({S0, S1})); EXPECT_TRUE(Sched.trySchedule({L0, L1})); // At this point Add0 and Add1 should have been individually scheduled - // as single bundles. + // as singleton bundles, but {S0,S1} and {L0,L1} as vector bundles. // Check if rescheduling works. EXPECT_TRUE(Sched.trySchedule({Add0, Add1})); + // These should fail because {L0,L1} is a vector bundle. + EXPECT_FALSE(Sched.trySchedule({L0, Z})); + EXPECT_FALSE(Sched.trySchedule({L1, Z})); + // This should succeed because it matches the original vec bundle. + EXPECT_TRUE(Sched.trySchedule({L0, L1})); +} + +// Test that an instruction can't belong in two bundles! +TEST_F(SchedulerTest, CheckBundles) { + parseIR(C, R"IR( +define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, ptr noalias %ptr2) { + %L0 = load i8, ptr %ptr0 + %L1 = load i8, ptr %ptr1 ; This belongs in 2 bundles! + %L2 = load i8, ptr %ptr2 + %add0 = add i8 %L0, %L1 + %add1 = add i8 %L1, %L2 + store i8 %add0, ptr %ptr0 + store i8 %add1, ptr %ptr1 + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *L0 = cast(&*It++); + auto *L1 = cast(&*It++); + auto *L2 = cast(&*It++); + auto *Add0 = cast(&*It++); + auto *Add1 = cast(&*It++); + auto *S0 = cast(&*It++); + auto *S1 = cast(&*It++); + + sandboxir::Scheduler Sched(getAA(*LLVMF), Ctx); + EXPECT_TRUE(Sched.trySchedule({S0, S1})); + EXPECT_TRUE(Sched.trySchedule({Add0, Add1})); + EXPECT_TRUE(Sched.trySchedule({L0, L1})); + // This should fail because L1 is already part of {L0,L1} + EXPECT_FALSE(Sched.trySchedule({L1, L2})); + EXPECT_FALSE(Sched.trySchedule({L2, L1})); +} + +// Try schedule a bundle {L1,L2} where L1 is already scheduled in {L0,L1} +// but L2 is not in the DAG at all +TEST_F(SchedulerTest, CheckBundles2) { + parseIR(C, R"IR( +define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, ptr noalias %ptr2) { + %L2 = load i8, ptr %ptr2 ; This is not in the DAG + %L1 = load i8, ptr %ptr1 ; This belongs in 2 bundles! + %L0 = load i8, ptr %ptr0 + %add1 = add i8 %L1, %L2 + %add0 = add i8 %L0, %L1 + store i8 %add1, ptr %ptr1 + store i8 %add0, ptr %ptr0 + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *L2 = cast(&*It++); + auto *L1 = cast(&*It++); + auto *L0 = cast(&*It++); + auto *Add1 = cast(&*It++); + auto *Add0 = cast(&*It++); + auto *S1 = cast(&*It++); + auto *S0 = cast(&*It++); + + sandboxir::Scheduler Sched(getAA(*LLVMF), Ctx); + EXPECT_TRUE(Sched.trySchedule({S0, S1})); + EXPECT_TRUE(Sched.trySchedule({Add0, Add1})); + EXPECT_TRUE(Sched.trySchedule({L0, L1})); + // This should fail because L1 is already part of {L0,L1}. + EXPECT_FALSE(Sched.trySchedule({L1, L2})); + EXPECT_FALSE(Sched.trySchedule({L2, L1})); +} + +// Try schedule a bundle {L1,L2} where L1 is already scheduled in {L0,L1} +// but L2 is in the DAG but isn't scheduled. +TEST_F(SchedulerTest, CheckBundles3) { + parseIR(C, R"IR( +define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, ptr noalias %ptr2) { + %L2 = load i8, ptr %ptr2 ; This is not in the DAG + %L1 = load i8, ptr %ptr1 ; This belongs in 2 bundles! + %L0 = load i8, ptr %ptr0 + %add1 = add i8 %L1, %L2 + %add0 = add i8 %L0, %L1 + store i8 %add1, ptr %ptr1 + store i8 %add0, ptr %ptr0 + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *L2 = cast(&*It++); + auto *L1 = cast(&*It++); + auto *L0 = cast(&*It++); + auto *Add1 = cast(&*It++); + auto *Add0 = cast(&*It++); + auto *S1 = cast(&*It++); + auto *S0 = cast(&*It++); + + sandboxir::Scheduler Sched(getAA(*LLVMF), Ctx); + EXPECT_TRUE(Sched.trySchedule({S0, S1})); + EXPECT_TRUE(Sched.trySchedule({Add0, Add1})); + EXPECT_TRUE(Sched.trySchedule({L0, L1})); + // Add L2 to the DAG, but don't schedule it. + auto &DAG = sandboxir::SchedulerInternalsAttorney::getDAG(Sched); + DAG.extend(L2); + // This should fail because L1 is already part of {L0,L1}. + EXPECT_FALSE(Sched.trySchedule({L1, L2})); + EXPECT_FALSE(Sched.trySchedule({L2, L1})); +} + +// Check that Scheduler::getBndlSchedState() works correctly. +TEST_F(SchedulerTest, GetBndlSchedState) { + parseIR(C, R"IR( +define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, ptr noalias %ptr2) { + %L2 = load i8, ptr %ptr2 ; This is not in the DAG + %L1 = load i8, ptr %ptr1 ; This belongs in 2 bundles! + %L0 = load i8, ptr %ptr0 + %add1 = add i8 %L1, %L2 + %add0 = add i8 %L0, %L1 + store i8 %add1, ptr %ptr1 + store i8 %add0, ptr %ptr0 + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *L2 = cast(&*It++); + auto *L1 = cast(&*It++); + auto *L0 = cast(&*It++); + auto *Add1 = cast(&*It++); + auto *Add0 = cast(&*It++); + auto *S1 = cast(&*It++); + auto *S0 = cast(&*It++); + + sandboxir::Scheduler Sched(getAA(*LLVMF), Ctx); + auto &DAG = sandboxir::SchedulerInternalsAttorney::getDAG(Sched); + auto GetBndlSchedState = [&Sched](ArrayRef Instrs) { + return sandboxir::SchedulerInternalsAttorney::getBndlSchedState(Sched, + Instrs); + }; + using BndlSchedState = sandboxir::SchedulerInternalsAttorney::BndlSchedState; + // Check when instructions are not in the DAG. + EXPECT_EQ(GetBndlSchedState({S0}), BndlSchedState::NoneScheduled); + EXPECT_EQ(GetBndlSchedState({S0, S1}), BndlSchedState::NoneScheduled); + EXPECT_EQ(GetBndlSchedState({S0, S1}), BndlSchedState::NoneScheduled); + // Check when instructions are in the DAG. + DAG.extend({S0, S1}); + EXPECT_EQ(GetBndlSchedState({S0}), BndlSchedState::NoneScheduled); + EXPECT_EQ(GetBndlSchedState({S0, S1}), BndlSchedState::NoneScheduled); + EXPECT_EQ(GetBndlSchedState({S0, S1}), BndlSchedState::NoneScheduled); + // One instruction in the DAG and the other not in the DAG. + EXPECT_EQ(GetBndlSchedState({S0, Add0}), BndlSchedState::NoneScheduled); + + // Check with scheduled instructions. + Sched.clear(); // Manually extending the DAG messes with the scheduler. + EXPECT_TRUE(Sched.trySchedule({S0, S1})); + // Check fully scheduled. + EXPECT_EQ(GetBndlSchedState({S0, S1}), BndlSchedState::FullyScheduled); + // Check scheduled + not in DAG. + EXPECT_EQ(GetBndlSchedState({S0, Add0}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({Add0, S0}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({Add0, S1}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({Add0, Add1}), BndlSchedState::NoneScheduled); + // Extend DAG such that Add0 and Add1 are in the DAG but are not scheduled. + DAG.extend({Add0, Add1}); + // Check both in DAG but not scheduled. + EXPECT_EQ(GetBndlSchedState({Add0, Add1}), BndlSchedState::NoneScheduled); + // Check scheduled + in DAG but not scheduled. + EXPECT_EQ(GetBndlSchedState({S0, Add0}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({Add0, S0}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({Add0, S1}), BndlSchedState::AlreadyScheduled); + + Sched.clear(); // Manually extending the DAG messes with the scheduler. + // Schedule instructions towards the top so that intermediate instructions + // (namely Add0, Add1) get temporarily scheduled in singleton bundles. + EXPECT_TRUE(Sched.trySchedule({S0, S1})); EXPECT_TRUE(Sched.trySchedule({L0, L1})); + // Check fully scheduled. + EXPECT_EQ(GetBndlSchedState({L0, L1}), BndlSchedState::FullyScheduled); + // Check both singletons. + EXPECT_EQ(GetBndlSchedState({Add0, Add1}), + BndlSchedState::TemporarilyScheduled); + // Check single singleton. + EXPECT_EQ(GetBndlSchedState({Add0}), BndlSchedState::TemporarilyScheduled); + EXPECT_EQ(GetBndlSchedState({Add1}), BndlSchedState::TemporarilyScheduled); + // Check singleton + scheduled. + EXPECT_EQ(GetBndlSchedState({L0, S1}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({S1, L0}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({L0, Add1}), BndlSchedState::AlreadyScheduled); + EXPECT_EQ(GetBndlSchedState({Add1, L0}), BndlSchedState::AlreadyScheduled); + // Check singleton + not in DAG. + EXPECT_EQ(GetBndlSchedState({Add1, L2}), + BndlSchedState::TemporarilyScheduled); + EXPECT_EQ(GetBndlSchedState({L2, Add0}), + BndlSchedState::TemporarilyScheduled); + + // Check duplicates. + // TODO: Should duplicates be allowed? + EXPECT_EQ(GetBndlSchedState({L2, L2}), BndlSchedState::NoneScheduled); + EXPECT_EQ(GetBndlSchedState({S0, S0}), BndlSchedState::FullyScheduled); + EXPECT_EQ(GetBndlSchedState({Add0, Add1}), + BndlSchedState::TemporarilyScheduled); } // Check scheduling in the following order: {A0,A1},{B0,B1},{C0,C1},{D0,D1} diff --git a/llvm/utils/lit/lit/llvm/config.py b/llvm/utils/lit/lit/llvm/config.py index e40a422d2db6c..58556b819d4fc 100644 --- a/llvm/utils/lit/lit/llvm/config.py +++ b/llvm/utils/lit/lit/llvm/config.py @@ -175,6 +175,8 @@ def __init__(self, lit_config, config): features.add("target-riscv64") elif re.match(r"^riscv32-.*-elf.", target_triple): features.add("target-riscv32") + elif re.match(r"^loongarch64.*", target_triple): + features.add("target-loongarch64") if not user_is_root(): features.add("non-root-user") diff --git a/mlir/lib/Analysis/DataFlowFramework.cpp b/mlir/lib/Analysis/DataFlowFramework.cpp index 028decbae31c3..29f57c602f9cb 100644 --- a/mlir/lib/Analysis/DataFlowFramework.cpp +++ b/mlir/lib/Analysis/DataFlowFramework.cpp @@ -118,21 +118,17 @@ LogicalResult DataFlowSolver::initializeAndRun(Operation *top) { } // Run the analysis until fixpoint. - do { - // Exhaust the worklist. - while (!worklist.empty()) { - auto [point, analysis] = worklist.front(); - worklist.pop(); - - DATAFLOW_DEBUG(llvm::dbgs() << "Invoking '" << analysis->debugName - << "' on: " << point << "\n"); - if (failed(analysis->visit(point))) - return failure(); - } - - // Iterate until all states are in some initialized state and the worklist - // is exhausted. - } while (!worklist.empty()); + // Iterate until all states are in some initialized state and the worklist + // is exhausted. + while (!worklist.empty()) { + auto [point, analysis] = worklist.front(); + worklist.pop(); + + DATAFLOW_DEBUG(llvm::dbgs() << "Invoking '" << analysis->debugName + << "' on: " << point << "\n"); + if (failed(analysis->visit(point))) + return failure(); + } return success(); } diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp index c3b3a78abe7f7..8b6c62ca2e36d 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -624,7 +624,8 @@ LogicalResult impl::scalarizeVectorOp(Operation *op, ValueRange operands, const LLVMTypeConverter &converter) { TypeRange operandTypes(operands); if (llvm::any_of(operandTypes, llvm::IsaPred)) { - VectorType vectorType = cast(op->getResultTypes()[0]); + VectorType vectorType = + cast(converter.convertType(op->getResultTypes()[0])); rewriter.replaceOp(op, scalarizeVectorOpHelper(op, operands, vectorType, rewriter, converter)); return success(); diff --git a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp index 85ec288268aeb..97da96afac4cd 100644 --- a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp +++ b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp @@ -18,6 +18,8 @@ #include "mlir/IR/TypeUtilities.h" #include "mlir/Pass/Pass.h" +#include "llvm/ADT/FloatingPointMode.h" + namespace mlir { #define GEN_PASS_DEF_CONVERTMATHTOLLVMPASS #include "mlir/Conversion/Passes.h.inc" @@ -286,6 +288,40 @@ struct RsqrtOpLowering : public ConvertOpToLLVMPattern { } }; +struct IsNaNOpLowering : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(math::IsNaNOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto operandType = adaptor.getOperand().getType(); + + if (!operandType || !LLVM::isCompatibleType(operandType)) + return failure(); + + rewriter.replaceOpWithNewOp( + op, op.getType(), adaptor.getOperand(), llvm::fcNan); + return success(); + } +}; + +struct IsFiniteOpLowering : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(math::IsFiniteOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto operandType = adaptor.getOperand().getType(); + + if (!operandType || !LLVM::isCompatibleType(operandType)) + return failure(); + + rewriter.replaceOpWithNewOp( + op, op.getType(), adaptor.getOperand(), llvm::fcFinite); + return success(); + } +}; + struct ConvertMathToLLVMPass : public impl::ConvertMathToLLVMPassBase { using Base::Base; @@ -309,6 +345,8 @@ void mlir::populateMathToLLVMConversionPatterns( patterns.add(converter, benefit); // clang-format off patterns.add< + IsNaNOpLowering, + IsFiniteOpLowering, AbsFOpLowering, AbsIOpLowering, CeilOpLowering, diff --git a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir index 45a37af293890..974743a55932b 100644 --- a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir +++ b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir @@ -263,6 +263,26 @@ func.func @ctpop_scalable_vector(%arg0 : vector<[4]xi32>) -> vector<[4]xi32> { // ----- +// CHECK-LABEL: func @isnan_double( +// CHECK-SAME: f64 +func.func @isnan_double(%arg0 : f64) { + // CHECK: "llvm.intr.is.fpclass"(%arg0) <{bit = 3 : i32}> : (f64) -> i1 + %0 = math.isnan %arg0 : f64 + func.return +} + +// ----- + +// CHECK-LABEL: func @isfinite_double( +// CHECK-SAME: f64 +func.func @isfinite_double(%arg0 : f64) { + // CHECK: "llvm.intr.is.fpclass"(%arg0) <{bit = 504 : i32}> : (f64) -> i1 + %0 = math.isfinite %arg0 : f64 + func.return +} + +// ----- + // CHECK-LABEL: func @rsqrt_double( // CHECK-SAME: f64 func.func @rsqrt_double(%arg0 : f64) { diff --git a/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir b/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir index 9448304f11dbd..313d7b086731e 100644 --- a/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir +++ b/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir @@ -516,6 +516,20 @@ module { // ----- +module @test_module { + // CHECK: llvm.func @__ocml_sin_f16(f16) -> f16 + // CHECK-LABEL: func @math_sin_vector_0d + func.func @math_sin_vector_0d(%arg : vector) -> vector { + // CHECK: llvm.extractelement {{.*}} : vector<1xf16> + // CHECK: llvm.call @__ocml_sin_f16(%{{.*}}) : (f16) -> f16 + // CHECK: llvm.insertelement {{.*}} : vector<1xf16> + %result = math.sin %arg : vector + func.return %result : vector + } +} + +// ----- + module @test_module { // CHECK: llvm.func @__ocml_sin_f16(f16) -> f16 // CHECK-LABEL: func @math_sin_vector_1d diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir index 860883e135750..383512e6fc454 100644 --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -346,7 +346,15 @@ func.func @test_mul_scalar_with_unranked_output(%arg0: tensor, %arg1: tenso } // ----- -// CHECK-LABEL: mul +// CHECK-LABEL: test_mul_scalar +func.func @test_mul_scalar(%arg0: tensor, %arg1: tensor) -> tensor { + %shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + %0 = tosa.mul %arg0, %arg1, %shift : (tensor, tensor, tensor<1xi8>) -> tensor + return %0 : tensor +} + +// ----- +// CHECK-LABEL: test_mul func.func @test_mul(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x1x3xf32>) -> tensor<13x21x3xf32> { %shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> %0 = tosa.mul %arg0, %arg1, %shift : (tensor<13x21x3xf32>, tensor<13x1x3xf32>, tensor<1xi8>) -> tensor<13x21x3xf32> diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir index ba0169c156c3d..fdd6d16b3f1d0 100644 --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -1481,3 +1481,13 @@ func.func @test_multiple_non_inferrable_consumers(%arg0: tensor<1x2x8xf32>) { %expanded_1 = tensor.expand_shape %0 [[0], [1, 2], [3]] output_shape [%dim, 1, 4, 8] : tensor into tensor return } + +// ----- +// CHECK-LABEL: test_mul_scalar +func.func @test_mul_scalar(%arg0: tensor, %arg1: tensor) -> tensor<*xf32> { + // CHECK: %[[SHIFT:.*]] = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK: tosa.mul %arg0, %arg1, %[[SHIFT]] : (tensor, tensor, tensor<1xi8>) -> tensor + %shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + %0 = tosa.mul %arg0, %arg1, %shift : (tensor, tensor, tensor<1xi8>) -> tensor<*xf32> + return %0 : tensor<*xf32> +} diff --git a/mlir/utils/generate-test-checks.py b/mlir/utils/generate-test-checks.py index 8faa425beace1..749bfa13fe734 100755 --- a/mlir/utils/generate-test-checks.py +++ b/mlir/utils/generate-test-checks.py @@ -159,7 +159,7 @@ def get_num_ssa_results(input_line): # Process a line of input that has been split at each SSA identifier '%'. -def process_line(line_chunks, variable_namer): +def process_line(line_chunks, variable_namer, strict_name_re=False): output_line = "" # Process the rest that contained an SSA value name. @@ -180,7 +180,14 @@ def process_line(line_chunks, variable_namer): else: # Otherwise, generate a new variable. variable = variable_namer.generate_name(ssa_name) - output_line += "%[[" + variable + ":.*]]" + if strict_name_re: + # Use stricter regexp for the variable name, if requested. + # Greedy matching may cause issues with the generic '.*' + # regexp when the checks are split across several + # lines (e.g. for CHECK-SAME). + output_line += "%[[" + variable + ":" + SSA_RE_STR + "]]" + else: + output_line += "%[[" + variable + ":.*]]" # Append the non named group. output_line += chunk[len(ssa_name) :] @@ -390,7 +397,9 @@ def main(): output_line += " " * len(ssa_split[0]) # Process the rest of the line. - output_line += process_line([argument], variable_namer) + output_line += process_line( + [argument], variable_namer, strict_name_re=True + ) # Append the output line. output_segments[-1].append(output_line) diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index 4695489ba8fa4..e4b1cffcea782 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -8963,6 +8963,7 @@ cc_library( ":LLVMDialect", ":MathDialect", ":Pass", + "//llvm:Support", ], ) @@ -12243,7 +12244,7 @@ gentbl_cc_library( ) gentbl_cc_library( - name = "MLIRTosaEnumsIncGen", + name = "TosaEnumsIncGen", tbl_outs = [ ( ["-gen-enum-decls"], @@ -12256,10 +12257,11 @@ gentbl_cc_library( ], tblgen = ":mlir-tblgen", td_file = "include/mlir/Dialect/Tosa/IR/TosaOpBase.td", + deps = [":TosaDialectTdFiles"], ) gentbl_cc_library( - name = "MLIRTosaAvailabilityIncGen", + name = "TosaAvailabilityIncGen", tbl_outs = [ ( ["-gen-avail-interface-decls"], @@ -12276,6 +12278,7 @@ gentbl_cc_library( ], tblgen = ":mlir-tblgen", td_file = "include/mlir/Dialect/Tosa/IR/TosaOps.td", + deps = [":TosaDialectTdFiles"], ) gentbl_cc_library( @@ -12377,8 +12380,10 @@ cc_library( ":SideEffectInterfaces", ":Support", ":TensorDialect", + ":TosaAvailabilityIncGen", ":TosaDialectBytecodeGen", ":TosaDialectIncGen", + ":TosaEnumsIncGen", ":TosaInterfacesIncGen", ":TosaPassIncGen", ":TransformUtils",