diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 9e8944d1114b8..6e162746bb424 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -170,6 +170,7 @@ struct MissingFeatures { static bool alignCXXRecordDecl() { return false; } static bool armComputeVolatileBitfields() { return false; } static bool asmLabelAttr() { return false; } + static bool assignMemcpyizer() { return false; } static bool astVarDeclInterface() { return false; } static bool attributeNoBuiltin() { return false; } static bool bitfields() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp index d3888baea5d5e..7c720681e0006 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp @@ -85,13 +85,13 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( return RValue::get(nullptr); } - bool trivialForCodegen = - md->isTrivial() || (md->isDefaulted() && md->getParent()->isUnion()); - bool trivialAssignment = - trivialForCodegen && - (md->isCopyAssignmentOperator() || md->isMoveAssignmentOperator()) && - !md->getParent()->mayInsertExtraPadding(); - (void)trivialAssignment; + // Note on trivial assignment + // -------------------------- + // Classic codegen avoids generating the trivial copy/move assignment operator + // when it isn't necessary, choosing instead to just produce IR with an + // equivalent effect. We have chosen not to do that in CIR, instead emitting + // trivial copy/move assignment operators and allowing later transformations + // to optimize them away if appropriate. // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment // operator before the LHS. @@ -99,9 +99,10 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( CallArgList *rtlArgs = nullptr; if (auto *oce = dyn_cast(ce)) { if (oce->isAssignmentOp()) { - cgm.errorNYI( - oce->getSourceRange(), - "emitCXXMemberOrOperatorMemberCallExpr: assignment operator"); + rtlArgs = &rtlArgStorage; + emitCallArgs(*rtlArgs, md->getType()->castAs(), + drop_begin(ce->arguments(), 1), ce->getDirectCallee(), + /*ParamsToSkip*/ 0); } } @@ -121,19 +122,9 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( return RValue::get(nullptr); } - if (trivialForCodegen) { - if (isa(md)) - return RValue::get(nullptr); - - if (trivialAssignment) { - cgm.errorNYI(ce->getSourceRange(), - "emitCXXMemberOrOperatorMemberCallExpr: trivial assignment"); - return RValue::get(nullptr); - } - - assert(md->getParent()->mayInsertExtraPadding() && - "unknown trivial member function"); - } + if ((md->isTrivial() || (md->isDefaulted() && md->getParent()->isUnion())) && + isa(md)) + return RValue::get(nullptr); // Compute the function type we're calling const CXXMethodDecl *calleeDecl = md; diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 278cc8931f308..da8166a596d42 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -258,6 +258,31 @@ void CIRGenFunction::emitDelegateCXXConstructorCall( /*Delegating=*/true, thisAddr, delegateArgs, loc); } +void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) { + const auto *assignOp = cast(curGD.getDecl()); + assert(assignOp->isCopyAssignmentOperator() || + assignOp->isMoveAssignmentOperator()); + const Stmt *rootS = assignOp->getBody(); + assert(isa(rootS) && + "Body of an implicit assignment operator should be compound stmt."); + const auto *rootCS = cast(rootS); + + assert(!cir::MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::runCleanupsScope()); + + // Classic codegen uses a special class to attempt to replace member + // initializers with memcpy. We could possibly defer that to the + // lowering or optimization phases to keep the memory accesses more + // explicit. For now, we don't insert memcpy at all, though in some + // cases the AST contains a call to memcpy. + assert(!cir::MissingFeatures::assignMemcpyizer()); + for (Stmt *s : rootCS->body()) + if (emitStmt(s, /*useCurrentScope=*/true).failed()) + cgm.errorNYI(s->getSourceRange(), + std::string("emitImplicitAssignmentOperatorBody: ") + + s->getStmtClassName()); +} + void CIRGenFunction::emitDelegatingCXXConstructorCall( const CXXConstructorDecl *ctor, const FunctionArgList &args) { assert(ctor->isDelegatingConstructor()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index c029853929a58..c4efabd6b12ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -462,21 +462,23 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn, startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin()); - if (isa(funcDecl)) + if (isa(funcDecl)) { getCIRGenModule().errorNYI(bodyRange, "C++ destructor definition"); - else if (isa(funcDecl)) + } else if (isa(funcDecl)) { emitConstructorBody(args); - else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && - funcDecl->hasAttr()) + } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && + funcDecl->hasAttr()) { getCIRGenModule().errorNYI(bodyRange, "CUDA kernel"); - else if (isa(funcDecl) && - cast(funcDecl)->isLambdaStaticInvoker()) + } else if (isa(funcDecl) && + cast(funcDecl)->isLambdaStaticInvoker()) { getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker"); - else if (funcDecl->isDefaulted() && isa(funcDecl) && - (cast(funcDecl)->isCopyAssignmentOperator() || - cast(funcDecl)->isMoveAssignmentOperator())) - getCIRGenModule().errorNYI(bodyRange, "Default assignment operator"); - else if (body) { + } else if (funcDecl->isDefaulted() && isa(funcDecl) && + (cast(funcDecl)->isCopyAssignmentOperator() || + cast(funcDecl)->isMoveAssignmentOperator())) { + // Implicit copy-assignment gets the same special treatment as implicit + // copy-constructors. + emitImplicitAssignmentOperatorBody(args); + } else if (body) { if (mlir::failed(emitFunctionBody(body))) { fn.erase(); return nullptr; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 2e54243f18cff..6bfbe3ef5516a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -867,6 +867,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult emitFunctionBody(const clang::Stmt *body); + void emitImplicitAssignmentOperatorBody(FunctionArgList &args); + void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index f24bee44f26a7..4f52a670c0039 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -392,12 +392,6 @@ void CIRGenModule::emitGlobal(clang::GlobalDecl gd) { void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op) { auto const *funcDecl = cast(gd.getDecl()); - if (funcDecl->getIdentifier() == nullptr) { - errorNYI(funcDecl->getSourceRange().getBegin(), - "function definition with a non-identifier for a name"); - return; - } - const CIRGenFunctionInfo &fi = getTypes().arrangeGlobalDeclaration(gd); cir::FuncType funcType = getTypes().getFunctionType(fi); cir::FuncOp funcOp = dyn_cast_if_present(op); diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp new file mode 100644 index 0000000000000..3e509f59368b6 --- /dev/null +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -0,0 +1,144 @@ +// RUN: %clang_cc1 -std=c++11 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++11 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -std=c++11 -triple aarch64-none-linux-android21 -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s + +class x { +public: int operator=(int); +}; +void a() { + x a; + a = 1u; +} + +// CIR: cir.func private @_ZN1xaSEi(!cir.ptr, !s32i) +// CIR: cir.func{{.*}} @_Z1av() +// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_x, !cir.ptr, ["a"] +// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CIR: %[[ONE_CAST:.*]] = cir.cast(integral, %[[ONE]] : !u32i), !s32i +// CIR: %[[RET:.*]] = cir.call @_ZN1xaSEi(%[[A_ADDR]], %[[ONE_CAST]]) : (!cir.ptr, !s32i) -> !s32i + +// LLVM: define{{.*}} @_Z1av() +// OGCG: define{{.*}} @_Z1av() + +void f(int i, int j) { + (i += j) = 17; +} + +// CIR: cir.func{{.*}} @_Z1fii(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) +// CIR: %[[I_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["i", init] +// CIR: %[[J_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["j", init] +// CIR: cir.store %arg0, %[[I_ADDR]] : !s32i, !cir.ptr +// CIR: cir.store %arg1, %[[J_ADDR]] : !s32i, !cir.ptr +// CIR: %[[SEVENTEEN:.*]] = cir.const #cir.int<17> : !s32i +// CIR: %[[J_LOAD:.*]] = cir.load align(4) %[[J_ADDR]] : !cir.ptr, !s32i +// CIR: %[[I_LOAD:.*]] = cir.load align(4) %[[I_ADDR]] : !cir.ptr, !s32i +// CIR: %[[ADD:.*]] = cir.binop(add, %[[I_LOAD]], %[[J_LOAD]]) nsw : !s32i +// CIR: cir.store align(4) %[[ADD]], %[[I_ADDR]] : !s32i, !cir.ptr +// CIR: cir.store align(4) %[[SEVENTEEN]], %[[I_ADDR]] : !s32i, !cir.ptr +// CIR: cir.return + +// Ensure that we use memcpy when we would have selected a trivial assignment +// operator, even for a non-trivially-copyable type. +struct A { + A &operator=(const A&); +}; +struct B { + B(const B&); + B &operator=(const B&) = default; + int n; +}; +struct C { + A a; + B b[16]; +}; +void copy_c(C &c1, C &c2) { + c1 = c2; +} + +// CIR: cir.func private @_ZN1AaSERKS_(!cir.ptr, !cir.ptr) -> !cir.ptr +// CIR: cir.func private @memcpy(!cir.ptr, !cir.ptr, !u64i) -> !cir.ptr + +// Implicit assignment operator for C. + +// CIR: cir.func comdat linkonce_odr @_ZN1CaSERKS_(%arg0: !cir.ptr {{.*}}, %arg1: !cir.ptr {{.*}}) -> !cir.ptr +// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] +// CIR: %[[ARG1_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["", init, const] +// CIR: %[[RET_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] +// CIR: cir.store %arg0, %[[THIS_ADDR]] +// CIR: cir.store %arg1, %[[ARG1_ADDR]] +// CIR: %[[THIS:.*]] = cir.load{{.*}} %[[THIS_ADDR]] +// CIR: %[[A_MEMBER:.*]] = cir.get_member %[[THIS]][0] {name = "a"} +// CIR: %[[ARG1_LOAD:.*]] = cir.load{{.*}} %[[ARG1_ADDR]] +// CIR: %[[A_MEMBER_2:.*]] = cir.get_member %[[ARG1_LOAD]][0] {name = "a"} +// CIR: %[[C_A:.*]] = cir.call @_ZN1AaSERKS_(%[[A_MEMBER]], %[[A_MEMBER_2]]) +// CIR: %[[B_MEMBER:.*]] = cir.get_member %[[THIS]][1] {name = "b"} +// CIR: %[[B_VOID_PTR:.*]] = cir.cast(bitcast, %[[B_MEMBER]] : !cir.ptr>), !cir.ptr +// CIR: %[[RET_LOAD:.*]] = cir.load %[[ARG1_ADDR]] +// CIR: %[[B_MEMBER_2:.*]] = cir.get_member %[[RET_LOAD]][1] {name = "b"} +// CIR: %[[B_VOID_PTR_2:.*]] = cir.cast(bitcast, %[[B_MEMBER_2]] : !cir.ptr>), !cir.ptr +// CIR: %[[SIZE:.*]] = cir.const #cir.int<64> : !u64i +// CIR: %[[COUNT:.*]] = cir.call @memcpy(%[[B_VOID_PTR]], %[[B_VOID_PTR_2]], %[[SIZE]]) +// CIR: cir.store %[[THIS]], %[[RET_ADDR]] +// CIR: %[[RET_VAL:.*]] = cir.load{{.*}} %[[RET_ADDR]] +// CIR: cir.return %[[RET_VAL]] + +// CIR: cir.func{{.*}} @_Z6copy_cR1CS0_(%arg0: !cir.ptr {{.*}}, %arg1: !cir.ptr {{.*}}) +// CIR: %[[C1_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init, const] +// CIR: %[[C2_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["c2", init, const] +// CIR: cir.store %arg0, %[[C1_ADDR]] +// CIR: cir.store %arg1, %[[C2_ADDR]] +// CIR: %[[C2_LOAD:.*]] = cir.load{{.*}} %[[C2_ADDR]] +// CIR: %[[C1_LOAD:.*]] = cir.load{{.*}} %[[C1_ADDR]] +// CIR: %[[RET:.*]] = cir.call @_ZN1CaSERKS_(%[[C1_LOAD]], %[[C2_LOAD]]) + +struct D { + D &operator=(const D&); +}; +struct E { + D &get_d_ref() { return d; } +private: + D d; +}; + +void copy_ref_to_ref(E &e1, E &e2) { + e1.get_d_ref() = e2.get_d_ref(); +} + +// The call to e2.get_d_ref() must occur before the call to e1.get_d_ref(). + +// CIR: cir.func{{.*}} @_Z15copy_ref_to_refR1ES0_(%arg0: !cir.ptr {{.*}}, %arg1: !cir.ptr {{.*}}) +// CIR: %[[E1_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["e1", init, const] +// CIR: %[[E2_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["e2", init, const] +// CIR: cir.store %arg0, %[[E1_ADDR]] : !cir.ptr, !cir.ptr> +// CIR: cir.store %arg1, %[[E2_ADDR]] : !cir.ptr, !cir.ptr> +// CIR: %[[E2:.*]] = cir.load %[[E2_ADDR]] +// CIR: %[[D2_REF:.*]] = cir.call @_ZN1E9get_d_refEv(%[[E2]]) +// CIR: %[[E1:.*]] = cir.load %[[E1_ADDR]] +// CIR: %[[D1_REF:.*]] = cir.call @_ZN1E9get_d_refEv(%[[E1]]) +// CIR: %[[D1_REF_2:.*]] = cir.call @_ZN1DaSERKS_(%[[D1_REF]], %[[D2_REF]]) +// CIR: cir.return + +// LLVM: define{{.*}} void @_Z15copy_ref_to_refR1ES0_(ptr %[[ARG0:.*]], ptr %[[ARG1:.*]]) { +// LLVM: %[[E1_ADDR:.*]] = alloca ptr +// LLVM: %[[E2_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[ARG0]], ptr %[[E1_ADDR]] +// LLVM: store ptr %[[ARG1]], ptr %[[E2_ADDR]] +// LLVM: %[[E2:.*]] = load ptr, ptr %[[E2_ADDR]] +// LLVM: %[[D2_REF:.*]] = call ptr @_ZN1E9get_d_refEv(ptr %[[E2]]) +// LLVM: %[[E1:.*]] = load ptr, ptr %[[E1_ADDR]] +// LLVM: %[[D1_REF:.*]] = call ptr @_ZN1E9get_d_refEv(ptr %[[E1]]) +// LLVM: %[[D1_REF_2:.*]] = call ptr @_ZN1DaSERKS_(ptr %[[D1_REF]], ptr %[[D2_REF]]) + +// OGCG: define{{.*}} void @_Z15copy_ref_to_refR1ES0_(ptr{{.*}} %[[ARG0:.*]], ptr{{.*}} %[[ARG1:.*]]) +// OGCG: %[[E1_ADDR:.*]] = alloca ptr +// OGCG: %[[E2_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[ARG0]], ptr %[[E1_ADDR]] +// OGCG: store ptr %[[ARG1]], ptr %[[E2_ADDR]] +// OGCG: %[[E2:.*]] = load ptr, ptr %[[E2_ADDR]] +// OGCG: %[[D2_REF:.*]] = call{{.*}} ptr @_ZN1E9get_d_refEv(ptr{{.*}} %[[E2]]) +// OGCG: %[[E1:.*]] = load ptr, ptr %[[E1_ADDR]] +// OGCG: %[[D1_REF:.*]] = call{{.*}} ptr @_ZN1E9get_d_refEv(ptr{{.*}} %[[E1]]) +// OGCG: %[[D1_REF_2:.*]] = call{{.*}} ptr @_ZN1DaSERKS_(ptr{{.*}} %[[D1_REF]], ptr{{.*}} %[[D2_REF]])