From ae3ed3605218e684ed3888290eca31bf88a53554 Mon Sep 17 00:00:00 2001 From: Quan Anh Mai Date: Sun, 27 Apr 2025 19:55:13 +0700 Subject: [PATCH 1/5] remove larval InlineTypeNode --- src/hotspot/share/opto/callGenerator.cpp | 2 +- src/hotspot/share/opto/compile.cpp | 55 ++++++- src/hotspot/share/opto/doCall.cpp | 140 ++++++++++------ src/hotspot/share/opto/graphKit.cpp | 77 +++------ src/hotspot/share/opto/graphKit.hpp | 9 +- src/hotspot/share/opto/inlinetypenode.cpp | 154 +++--------------- src/hotspot/share/opto/inlinetypenode.hpp | 29 +--- src/hotspot/share/opto/library_call.cpp | 62 +++---- src/hotspot/share/opto/macro.cpp | 17 +- src/hotspot/share/opto/memnode.cpp | 1 - src/hotspot/share/opto/parse.hpp | 5 +- src/hotspot/share/opto/parse1.cpp | 80 +++++---- src/hotspot/share/opto/parse2.cpp | 12 +- src/hotspot/share/opto/parse3.cpp | 76 ++------- src/hotspot/share/opto/parseHelper.cpp | 5 - test/hotspot/jtreg/ProblemList.txt | 1 - .../inlinetypes/TestValueConstruction.java | 135 +++++++++++---- 17 files changed, 419 insertions(+), 441 deletions(-) diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp index 7890becf037..0bcb3afc80a 100644 --- a/src/hotspot/share/opto/callGenerator.cpp +++ b/src/hotspot/share/opto/callGenerator.cpp @@ -722,7 +722,7 @@ void CallGenerator::do_late_inline_helper() { Node* buffer_oop = nullptr; ciMethod* inline_method = inline_cg()->method(); ciType* return_type = inline_method->return_type(); - if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() && + if (!call->tf()->returns_inline_type_as_fields() && return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) { // Allocate a buffer for the inline type returned as fields because the caller expects an oop return. // Do this before the method handle call in case the buffer allocation triggers deoptimization and diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index 17fe4ff8269..389404a4da7 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -2843,16 +2843,47 @@ void Compile::Optimize() { if (failing()) return; + { + // Eliminate some macro nodes before EA to reduce analysis pressure + PhaseMacroExpand mexp(igvn); + mexp.eliminate_macro_nodes(); + if (failing()) { + return; + } + igvn.set_delay_transform(false); + igvn.optimize(); + if (failing()) { + return; + } + print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); + } + // Perform escape analysis if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) { if (has_loops()) { // Cleanup graph (remove dead nodes). TracePhase tp(_t_idealLoop); PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll); - if (failing()) return; + if (failing()) { + return; + } + print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); + + // Eliminate some macro nodes before EA to reduce analysis pressure + PhaseMacroExpand mexp(igvn); + mexp.eliminate_macro_nodes(); + if (failing()) { + return; + } + igvn.set_delay_transform(false); + igvn.optimize(); + if (failing()) { + return; + } + print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); } + bool progress; - print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); do { ConnectionGraph::do_analysis(this, &igvn); @@ -2870,12 +2901,15 @@ void Compile::Optimize() { TracePhase tp(_t_macroEliminate); PhaseMacroExpand mexp(igvn); mexp.eliminate_macro_nodes(); - if (failing()) return; + if (failing()) { + return; + } igvn.set_delay_transform(false); igvn.optimize(); - if (failing()) return; - + if (failing()) { + return; + } print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); } @@ -2976,8 +3010,17 @@ void Compile::Optimize() { { TracePhase tp(_t_macroExpand); + PhaseMacroExpand mex(igvn); + // Last attempt to eliminate macro nodes. + mex.eliminate_macro_nodes(); + if (failing()) { + return; + } + igvn.set_delay_transform(false); + igvn.optimize(); + igvn.set_delay_transform(true); + print_method(PHASE_BEFORE_MACRO_EXPANSION, 3); - PhaseMacroExpand mex(igvn); if (mex.expand_macro_nodes()) { assert(failing(), "must bail out w/ explicit message"); return; diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index 4580d0ee6b7..5093ad10648 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -25,6 +25,7 @@ #include "ci/ciCallSite.hpp" #include "ci/ciMethodHandle.hpp" #include "ci/ciSymbols.hpp" +#include "classfile/vmIntrinsics.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" @@ -88,6 +89,64 @@ static void trace_type_profile(Compile* C, ciMethod* method, JVMState* jvms, } } +static bool arg_can_be_larval(ciMethod* callee, int arg_idx) { + if (callee->is_object_constructor() && arg_idx == 0) { + return true; + } + + if (arg_idx != 1) { + return false; + } + + switch (callee->intrinsic_id()) { + case vmIntrinsicID::_finishPrivateBuffer: + case vmIntrinsicID::_putBoolean: + case vmIntrinsicID::_putBooleanOpaque: + case vmIntrinsicID::_putBooleanRelease: + case vmIntrinsicID::_putBooleanVolatile: + case vmIntrinsicID::_putByte: + case vmIntrinsicID::_putByteOpaque: + case vmIntrinsicID::_putByteRelease: + case vmIntrinsicID::_putByteVolatile: + case vmIntrinsicID::_putChar: + case vmIntrinsicID::_putCharOpaque: + case vmIntrinsicID::_putCharRelease: + case vmIntrinsicID::_putCharUnaligned: + case vmIntrinsicID::_putCharVolatile: + case vmIntrinsicID::_putShort: + case vmIntrinsicID::_putShortOpaque: + case vmIntrinsicID::_putShortRelease: + case vmIntrinsicID::_putShortUnaligned: + case vmIntrinsicID::_putShortVolatile: + case vmIntrinsicID::_putInt: + case vmIntrinsicID::_putIntOpaque: + case vmIntrinsicID::_putIntRelease: + case vmIntrinsicID::_putIntUnaligned: + case vmIntrinsicID::_putIntVolatile: + case vmIntrinsicID::_putLong: + case vmIntrinsicID::_putLongOpaque: + case vmIntrinsicID::_putLongRelease: + case vmIntrinsicID::_putLongUnaligned: + case vmIntrinsicID::_putLongVolatile: + case vmIntrinsicID::_putFloat: + case vmIntrinsicID::_putFloatOpaque: + case vmIntrinsicID::_putFloatRelease: + case vmIntrinsicID::_putFloatVolatile: + case vmIntrinsicID::_putDouble: + case vmIntrinsicID::_putDoubleOpaque: + case vmIntrinsicID::_putDoubleRelease: + case vmIntrinsicID::_putDoubleVolatile: + case vmIntrinsicID::_putReference: + case vmIntrinsicID::_putReferenceOpaque: + case vmIntrinsicID::_putReferenceRelease: + case vmIntrinsicID::_putReferenceVolatile: + case vmIntrinsicID::_putValue: + return true; + default: + return false; + } +} + CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float prof_factor, ciKlass* speculative_receiver_type, @@ -645,6 +704,15 @@ void Parse::do_call() { set_stack(sp() - nargs, casted_receiver); } + // Scalarize value objects passed into this invocation because we know that they are not larval + for (int arg_idx = 0; arg_idx < nargs; arg_idx++) { + if (arg_can_be_larval(callee, arg_idx)) { + continue; + } + + cast_non_larval(peek(nargs - 1 - arg_idx)); + } + // Note: It's OK to try to inline a virtual call. // The call generator will not attempt to inline a polymorphic call // unless it knows how to optimize the receiver dispatch. @@ -807,57 +875,31 @@ void Parse::do_call() { if (is_reference_type(ct)) { record_profiled_return_for_speculation(); } - if (rtype->is_inlinetype() && !peek()->is_InlineType()) { - Node* retnode = pop(); - retnode = InlineTypeNode::make_from_oop(this, retnode, rtype->as_inline_klass()); - push_node(T_OBJECT, retnode); + + if (!rtype->is_void() && cg->method()->intrinsic_id() != vmIntrinsicID::_makePrivateBuffer) { + Node* retnode = peek(); + const Type* rettype = gvn().type(retnode); + if (rettype->is_inlinetypeptr() && !retnode->is_InlineType()) { + retnode = InlineTypeNode::make_from_oop(this, retnode, rettype->inline_klass()); + dec_sp(1); + push(retnode); + } } - // Note that: - // - The caller map is the state just before the call of the currently parsed method with all arguments - // on the stack. Therefore, we have caller_map->arg(0) == this. - // - local(0) contains the updated receiver after calling an inline type constructor. - // - Abstract value classes are not ciInlineKlass instances and thus abstract_value_klass->is_inlinetype() is false. - // We use the bottom type of the receiver node to determine if we have a value class or not. - const bool is_current_method_inline_type_constructor = - // Is current method a constructor (i.e )? - _method->is_object_constructor() && - // Is the holder of the current constructor method an inline type? - _caller->map()->argument(_caller, 0)->bottom_type()->is_inlinetypeptr(); - assert(!is_current_method_inline_type_constructor || !cg->method()->is_object_constructor() || receiver != nullptr, - "must have valid receiver after calling another constructor"); - if (is_current_method_inline_type_constructor && - // Is the just called method an inline type constructor? - cg->method()->is_object_constructor() && receiver->bottom_type()->is_inlinetypeptr() && - // AND: - // 1) ... invoked on the same receiver? Then it's another constructor on the same object doing the initialization. - (receiver == _caller->map()->argument(_caller, 0) || - // 2) ... abstract? Then it's the call to the super constructor which eventually calls Object. to - // finish the initialization of this larval. - cg->method()->holder()->is_abstract() || - // 3) ... Object.? Then we know it's the final call to finish the larval initialization. Other - // Object. calls would have a non-inline-type receiver which we already excluded in the check above. - cg->method()->holder()->is_java_lang_Object()) - ) { - assert(local(0)->is_InlineType() && receiver->bottom_type()->is_inlinetypeptr() && receiver->is_InlineType() && - _caller->map()->argument(_caller, 0)->bottom_type()->inline_klass() == receiver->bottom_type()->inline_klass(), - "Unexpected receiver"); - InlineTypeNode* updated_receiver = local(0)->as_InlineType(); - InlineTypeNode* cloned_updated_receiver = updated_receiver->clone_if_required(&_gvn, _map); - cloned_updated_receiver->set_is_larval(false); - cloned_updated_receiver = _gvn.transform(cloned_updated_receiver)->as_InlineType(); - // Receiver updated by the just called constructor. We need to update the map to make the effect visible. After - // the super() call, only the updated receiver in local(0) will be used from now on. Therefore, we do not need - // to update the original receiver 'receiver' but only the 'updated_receiver'. - replace_in_map(updated_receiver, cloned_updated_receiver); - - if (_caller->has_method()) { - // If the current method is inlined, we also need to update the exit map to propagate the updated receiver - // to the caller map. - Node* receiver_in_caller = _caller->map()->argument(_caller, 0); - assert(receiver_in_caller->bottom_type()->inline_klass() == receiver->bottom_type()->inline_klass(), - "Receiver type mismatch"); - _exits.map()->replace_edge(receiver_in_caller, cloned_updated_receiver, &_gvn); + if (cg->method()->is_object_constructor() && receiver != nullptr && gvn().type(receiver)->is_inlinetypeptr()) { + InlineTypeNode* non_larval = InlineTypeNode::make_from_oop(this, receiver, gvn().type(receiver)->inline_klass()); + // Relinquish the oop input, we will delay the allocation to the point it is needed + non_larval = non_larval->clone_if_required(&gvn(), nullptr); + non_larval->set_oop(gvn(), null()); + non_larval->set_is_buffered(gvn(), false); + non_larval = gvn().transform(non_larval)->as_InlineType(); + + // Replace the larval object with the non-larval one in all call frames. This avoids the oop + // alive until the outermost constructor exits. + JVMState* current = map()->jvms(); + while (current != nullptr) { + current->map()->replace_edge(receiver, non_larval); + current = current->caller(); } } } diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index 3f4ece1867c..b2f0d7f8068 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -990,14 +990,7 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { out_jvms->set_locoff(p); if (!can_prune_locals) { for (j = 0; j < l; j++) { - Node* val = in_map->in(k + j); - // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state - if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() && - callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) && - val->bottom_type()->is_inlinetypeptr()) { - val = callee_jvms->map()->local(callee_jvms, 0); // Receiver - } - call->set_req(p++, val); + call->set_req(p++, in_map->in(k + j)); } } else { p += l; // already set to top above by add_req_batch @@ -1009,14 +1002,7 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { out_jvms->set_stkoff(p); if (!can_prune_locals) { for (j = 0; j < l; j++) { - Node* val = in_map->in(k + j); - // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state - if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() && - callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) && - val->bottom_type()->is_inlinetypeptr()) { - val = callee_jvms->map()->local(callee_jvms, 0); // Receiver - } - call->set_req(p++, val); + call->set_req(p++, in_map->in(k + j)); } } else if (can_prune_locals && stack_slots_not_pruned != 0) { // Divide stack into {S0,...,S1}, where S0 is set to top. @@ -1515,6 +1501,21 @@ Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { return cast; // Return casted value } +Node* GraphKit::cast_non_larval(Node* obj) { + if (obj->is_InlineType()) { + return obj; + } + + const Type* obj_type = gvn().type(obj); + if (!obj_type->is_inlinetypeptr()) { + return obj; + } + + Node* new_obj = InlineTypeNode::make_from_oop(this, obj, obj_type->inline_klass()); + replace_in_map(obj, new_obj); + return new_obj; +} + // Sometimes in intrinsics, we implicitly know an object is not null // (there's no actual null check) so we can cast it to not null. In // the course of optimizations, the input to the cast can become null. @@ -1920,28 +1921,7 @@ void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inli continue; } else if (arg->is_InlineType()) { // Pass inline type argument via oop to callee - InlineTypeNode* inline_type = arg->as_InlineType(); - const ciMethod* method = call->method(); - ciInstanceKlass* holder = method->holder(); - const bool is_receiver = (i == TypeFunc::Parms); - const bool is_abstract_or_object_klass_constructor = method->is_object_constructor() && - (holder->is_abstract() || holder->is_java_lang_Object()); - const bool is_larval_receiver_on_super_constructor = is_receiver && is_abstract_or_object_klass_constructor; - bool must_init_buffer = true; - // We always need to buffer inline types when they are escaping. However, we can skip the actual initialization - // of the buffer if the inline type is a larval because we are going to update the buffer anyway which requires - // us to create a new one. But there is one special case where we are still required to initialize the buffer: - // When we have a larval receiver invoked on an abstract (value class) constructor or the Object constructor (that - // is not going to be inlined). After this call, the larval is completely initialized and thus not a larval anymore. - // We therefore need to force an initialization of the buffer to not lose all the field writes so far in case the - // buffer needs to be used (e.g. to read from when deoptimizing at runtime) or further updated in abstract super - // value class constructors which could have more fields to be initialized. Note that we do not need to - // initialize the buffer when invoking another constructor in the same class on a larval receiver because we - // have not initialized any fields, yet (this is done completely by the other constructor call). - if (inline_type->is_larval() && !is_larval_receiver_on_super_constructor) { - must_init_buffer = false; - } - arg = inline_type->buffer(this, true, must_init_buffer); + arg = arg->as_InlineType()->buffer(this, true); } if (t != Type::HALF) { arg_num++; @@ -2018,20 +1998,6 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_p } } - // We just called the constructor on a value type receiver. Reload it from the buffer - ciMethod* method = call->method(); - if (method->is_object_constructor() && !method->holder()->is_java_lang_Object()) { - InlineTypeNode* inline_type_receiver = call->in(TypeFunc::Parms)->isa_InlineType(); - if (inline_type_receiver != nullptr) { - assert(inline_type_receiver->is_larval(), "must be larval"); - assert(inline_type_receiver->is_allocated(&gvn()), "larval must be buffered"); - InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, inline_type_receiver->get_oop(), - inline_type_receiver->bottom_type()->inline_klass()); - assert(!reloaded->is_larval(), "should not be larval anymore"); - replace_in_map(inline_type_receiver, reloaded); - } - } - return ret; } @@ -3458,7 +3424,7 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac // If failure_control is supplied and not null, it is filled in with // the control edge for the cast failure. Otherwise, an appropriate // uncommon trap or exception is thrown. -Node* GraphKit::gen_checkcast(Node* obj, Node* superklass, Node* *failure_control, bool null_free) { +Node* GraphKit::gen_checkcast(Node* obj, Node* superklass, Node* *failure_control, bool null_free, bool maybe_larval) { kill_dead_locals(); // Benefit all the uncommon traps const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr(); const Type* obj_type = _gvn.type(obj); @@ -3474,6 +3440,9 @@ Node* GraphKit::gen_checkcast(Node* obj, Node* superklass, Node* *failure_contro return obj; } + // Else it must be a non-larval object + obj = cast_non_larval(obj); + const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve(); const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type(); bool safe_for_replace = (failure_control == nullptr); @@ -3704,7 +3673,7 @@ Node* GraphKit::gen_checkcast(Node* obj, Node* superklass, Node* *failure_contro if (!stopped() && !res->is_InlineType()) { res = record_profiled_receiver_for_speculation(res); - if (toop->is_inlinetypeptr()) { + if (toop->is_inlinetypeptr() && !maybe_larval) { Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass()); res = vt; if (safe_for_replace) { diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp index 0d2c8400cfd..0f06e78ec19 100644 --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -450,6 +450,13 @@ class GraphKit : public Phase { // Cast obj to not-null on this path Node* cast_not_null(Node* obj, bool do_replace_in_map = true); + // If a larval object appears multiple times in the JVMS and we encounter a loop, they will + // become multiple Phis and we cannot change all of them to non-larval when we invoke the + // constructor on one. The other case is that we don't know whether a parameter of an OSR + // compilation is larval or not. If such a maybe-larval object is passed into an operation that + // does not permit larval objects, we can be sure that it is not larval and scalarize it if it + // is a value object. + Node* cast_non_larval(Node* obj); // Replace all occurrences of one node by another. void replace_in_map(Node* old, Node* neww); @@ -814,7 +821,7 @@ class GraphKit : public Phase { // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode - Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = nullptr, bool null_free = false); + Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = nullptr, bool null_free = false, bool maybe_larval = false); // Inline types Node* mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock = true); diff --git a/src/hotspot/share/opto/inlinetypenode.cpp b/src/hotspot/share/opto/inlinetypenode.cpp index 7a5c8da7bdd..0e29b81b14c 100644 --- a/src/hotspot/share/opto/inlinetypenode.cpp +++ b/src/hotspot/share/opto/inlinetypenode.cpp @@ -328,18 +328,6 @@ uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, Node_Li } void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) { - // We should not scalarize larvals in debug info of their constructor calls because their fields could still be - // updated. If we scalarize and update the fields in the constructor, the updates won't be visible in the caller after - // deoptimization because the scalarized field values are local to the caller. We need to use a buffer to make the - // updates visible to the outside. - if (is_larval() && sfpt->is_CallJava() && sfpt->as_CallJava()->method() != nullptr && - sfpt->as_CallJava()->method()->is_object_constructor() && bottom_type()->is_inlinetypeptr() && - sfpt->in(TypeFunc::Parms) == this) { - // Receiver is always buffered because it's passed as oop, see special case in CompiledEntrySignature::compute_calling_conventions(). - assert(is_allocated(igvn), "receiver must be allocated"); - return; - } - JVMState* jvms = sfpt->jvms(); assert(jvms != nullptr, "missing JVMS"); uint first_ind = (sfpt->req() - jvms->scloff()); @@ -902,7 +890,7 @@ void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass } } -InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace, bool must_init) { +InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) { if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) { // Already buffered return this; @@ -953,23 +941,13 @@ InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace, boo kit->kill_dead_locals(); Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); Node* alloc_oop = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this); + store(kit, alloc_oop, alloc_oop, vk); - if (must_init) { - // Either not a larval or a larval receiver on which we are about to invoke an abstract value class constructor - // or the Object constructor which is not inlined. It is therefore escaping, and we must initialize the buffer - // because we have not done this, yet, for larvals (see else case). - store(kit, alloc_oop, alloc_oop, vk); - - // Do not let stores that initialize this buffer be reordered with a subsequent - // store that would make this buffer accessible by other threads. - AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop); - assert(alloc != nullptr, "must have an allocation node"); - kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); - } else { - // We do not need to initialize the buffer because a larval could still be updated which will create a new buffer. - // Once the larval escapes, we will initialize the buffer (must_init set). - assert(is_larval(), "only larvals can possibly skip the initialization of their buffer"); - } + // Do not let stores that initialize this buffer be reordered with a subsequent + // store that would make this buffer accessible by other threads. + AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop); + assert(alloc != nullptr, "must have an allocation node"); + kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); oop->init_req(3, alloc_oop); region->init_req(3, kit->control()); io ->init_req(3, kit->i_o()); @@ -1099,6 +1077,8 @@ static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) { Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* oop = get_oop(); + Node* is_buffered = get_is_buffered(); + if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) { InlineTypeNode* vtptr = oop->as_InlineType(); set_oop(*phase, vtptr->get_oop()); @@ -1110,12 +1090,16 @@ Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) { return this; } - // Use base oop if fields are loaded from memory + // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an + // allocation because the only case we load from a naked CheckCastPP is when we exit a + // constructor of an inline type and we want to relinquish the larval oop there Node* base = is_loaded(phase); - if (base != nullptr && get_oop() != base && !phase->type(base)->maybe_null()) { - set_oop(*phase, base); - assert(is_allocated(phase), "should now be allocated"); - return this; + if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) { + if (oop != base || phase->type(is_buffered) != TypeInt::ONE) { + set_oop(*phase, base); + set_is_buffered(*phase); + return this; + } } if (can_reshape) { @@ -1152,18 +1136,17 @@ InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* return vt; } -InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk, bool is_larval) { +InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) { GrowableArray visited; visited.push(vk); - return make_all_zero_impl(gvn, vk, visited, is_larval); + return make_all_zero_impl(gvn, vk, visited); } -InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray& visited, bool is_larval) { +InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray& visited) { // Create a new InlineTypeNode initialized with all zero InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true); vt->set_is_buffered(gvn, false); vt->set_is_init(gvn); - vt->set_is_larval(is_larval); for (uint i = 0; i < vt->field_count(); ++i) { ciType* ft = vt->field_type(i); Node* value = gvn.zerocon(ft->basic_type()); @@ -1215,13 +1198,13 @@ bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const { return true; } -InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool is_larval) { +InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) { GrowableArray visited; visited.push(vk); - return make_from_oop_impl(kit, oop, vk, visited, is_larval); + return make_from_oop_impl(kit, oop, vk, visited); } -InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray& visited, bool is_larval) { +InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray& visited) { PhaseGVN& gvn = kit->gvn(); // Create and initialize an InlineTypeNode by loading all field @@ -1229,16 +1212,10 @@ InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciI InlineTypeNode* vt = nullptr; if (oop->isa_InlineType()) { - // TODO 8335256 Re-enable assert and fix OSR code - // Issue triggers with TestValueConstruction.java and -XX:Tier0BackedgeNotifyFreqLog=0 -XX:Tier2BackedgeNotifyFreqLog=0 -XX:Tier3BackedgeNotifyFreqLog=0 -XX:Tier2BackEdgeThreshold=1 -XX:Tier3BackEdgeThreshold=1 -XX:Tier4BackEdgeThreshold=1 -Xbatch -XX:-TieredCompilation - // assert(!is_larval || oop->as_InlineType()->is_larval(), "must be larval"); - if (is_larval && !oop->as_InlineType()->is_larval()) { - vt = oop->clone()->as_InlineType(); - vt->set_is_larval(true); - return gvn.transform(vt)->as_InlineType(); - } return oop->as_InlineType(); - } else if (gvn.type(oop)->maybe_null()) { + } + + if (gvn.type(oop)->maybe_null()) { // Add a null check because the oop may be null Node* null_ctl = kit->top(); Node* not_null_oop = kit->null_check_oop(oop, &null_ctl); @@ -1252,7 +1229,6 @@ InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciI vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false); vt->set_is_buffered(gvn); vt->set_is_init(gvn); - vt->set_is_larval(is_larval); vt->load(kit, not_null_oop, not_null_oop, vk, visited); if (null_ctl != kit->top()) { @@ -1271,7 +1247,6 @@ InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciI Node* init_ctl = kit->control(); vt->set_is_buffered(gvn); vt->set_is_init(gvn); - vt->set_is_larval(is_larval); vt->load(kit, oop, oop, vk, visited); // TODO 8284443 // assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType || @@ -1468,70 +1443,7 @@ InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, return kit->gvn().transform(vt)->as_InlineType(); } -InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const { - ciInlineKlass* vk = inline_klass(); - InlineTypeNode* res = make_uninitialized(kit->gvn(), vk); - for (uint i = 1; i < req(); ++i) { - res->set_req(i, in(i)); - } - - if (allocate) { - // Re-execute if buffering triggers deoptimization - PreserveReexecuteState preexecs(kit); - kit->jvms()->set_should_reexecute(true); - Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); - Node* alloc_oop = kit->new_instance(klass_node, nullptr, nullptr, true); - AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop); - alloc->_larval = true; - - store(kit, alloc_oop, alloc_oop, vk); - res->set_oop(kit->gvn(), alloc_oop); - } - // TODO 8239003 - //res->set_type(TypeInlineType::make(vk, true)); - res = kit->gvn().transform(res)->as_InlineType(); - assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated"); - return res; -} - -InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const { - Node* obj = get_oop(); - Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); - Node* mark = kit->make_load(nullptr, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); - mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place))); - kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), MemNode::unordered); - - // Do not let stores that initialize this buffer be reordered with a subsequent - // store that would make this buffer accessible by other threads. - AllocateNode* alloc = AllocateNode::Ideal_allocation(obj); - assert(alloc != nullptr, "must have an allocation node"); - kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); - - ciInlineKlass* vk = inline_klass(); - InlineTypeNode* res = make_uninitialized(kit->gvn(), vk); - for (uint i = 1; i < req(); ++i) { - res->set_req(i, in(i)); - } - // TODO 8239003 - //res->set_type(TypeInlineType::make(vk, false)); - res = kit->gvn().transform(res)->as_InlineType(); - return res; -} - -bool InlineTypeNode::is_larval(PhaseGVN* gvn) const { - if (!is_allocated(gvn)) { - return false; - } - - Node* oop = get_oop(); - AllocateNode* alloc = AllocateNode::Ideal_allocation(oop); - return alloc != nullptr && alloc->_larval; -} - Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) { - if (is_larval() || is_larval(phase)) { - return nullptr; - } if (vk == nullptr) { vk = inline_klass(); } @@ -1736,10 +1648,6 @@ void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& ba // Search for multiple allocations of this inline type and try to replace them by dominating allocations. // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations. void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) { - // TODO 8332886 Really needed? GVN is disabled anyway. - if (is_larval()) { - return; - } PhaseIterGVN* igvn = &phase->igvn(); // Search for allocations of this inline type. Ignore scalar replaceable ones, they // will be removed anyway and changing the memory chain will confuse other optimizations. @@ -1836,11 +1744,3 @@ const Type* InlineTypeNode::Value(PhaseGVN* phase) const { } return t; } - -#ifndef PRODUCT -void InlineTypeNode::dump_spec(outputStream* st) const { - if (_is_larval) { - st->print(" #larval"); - } -} -#endif // NOT PRODUCT diff --git a/src/hotspot/share/opto/inlinetypenode.hpp b/src/hotspot/share/opto/inlinetypenode.hpp index 9ca8e326bb5..7a7750b28ff 100644 --- a/src/hotspot/share/opto/inlinetypenode.hpp +++ b/src/hotspot/share/opto/inlinetypenode.hpp @@ -41,7 +41,6 @@ class InlineTypeNode : public TypeNode { init_class_id(Class_InlineType); init_req(Oop, oop); Compile::current()->add_inline_type(this); - _is_larval = false; } enum { Control, // Control input. @@ -52,13 +51,6 @@ class InlineTypeNode : public TypeNode { // Nodes are connected in increasing order of the index of the field they correspond to. }; - bool _is_larval; - - virtual uint hash() const { return TypeNode::hash() + _is_larval; } - // Don't GVN larvals because the inputs might be updated - virtual bool cmp(const Node &n) const { return TypeNode::cmp(n) && !(n.isa_InlineType()->_is_larval || _is_larval); } - virtual uint size_of() const { return sizeof(*this); } - // Get the klass defining the field layout of the inline type ciInlineKlass* inline_klass() const { return type()->inline_klass(); } @@ -67,9 +59,6 @@ class InlineTypeNode : public TypeNode { const TypePtr* field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const; - // Checks if the inline type oop is an allocated buffer with larval state - bool is_larval(PhaseGVN* gvn) const; - // Checks if the inline type is loaded from memory and if so returns the oop Node* is_loaded(PhaseGVN* phase, ciInlineKlass* vk = nullptr, Node* base = nullptr, int holder_offset = 0); @@ -78,8 +67,8 @@ class InlineTypeNode : public TypeNode { InlineTypeNode* adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray& visited); - static InlineTypeNode* make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray& visited, bool is_larval = false); - static InlineTypeNode* make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray& visited, bool is_larval = false); + static InlineTypeNode* make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray& visited); + static InlineTypeNode* make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray& visited); static InlineTypeNode* make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray& visited, bool transform = true); static InlineTypeNode* make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, bool atomic, int null_marker_offset, DecoratorSet decorators, GrowableArray& visited); @@ -88,11 +77,11 @@ class InlineTypeNode : public TypeNode { public: // Create with all-zero field values - static InlineTypeNode* make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk, bool is_larval = false); + static InlineTypeNode* make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk); // Create uninitialized static InlineTypeNode* make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free = true); // Create and initialize by loading the field values from an oop - static InlineTypeNode* make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool is_larval = false); + static InlineTypeNode* make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk); // Create and initialize by loading the field values from a flat field or array static InlineTypeNode* make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder = nullptr, int holder_offset = 0, bool atomic = false, int null_marker_offset = -1, DecoratorSet decorators = IN_HEAP | MO_UNORDERED); @@ -115,9 +104,6 @@ class InlineTypeNode : public TypeNode { Node* get_is_buffered() const { return in(IsBuffered); } void set_is_buffered(PhaseGVN& gvn, bool buffered = true) { set_req_X(IsBuffered, gvn.intcon(buffered ? 1 : 0), &gvn); } - void set_is_larval(bool is_larval) { _is_larval = is_larval; } - bool is_larval() const { return _is_larval; } - // Checks if the inline type fields are all set to zero bool is_all_zero(PhaseGVN* gvn, bool flat = false) const; @@ -149,7 +135,7 @@ class InlineTypeNode : public TypeNode { InlineTypeNode* adjust_scalarization_depth(GraphKit* kit); // Allocates the inline type (if not yet allocated) - InlineTypeNode* buffer(GraphKit* kit, bool safe_for_replace = true, bool must_init = true); + InlineTypeNode* buffer(GraphKit* kit, bool safe_for_replace = true); bool is_allocated(PhaseGVN* phase) const; void replace_call_results(GraphKit* kit, CallNode* call, Compile* C); @@ -165,9 +151,6 @@ class InlineTypeNode : public TypeNode { // Pass inline type as fields at a call or return void pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free = true); - InlineTypeNode* make_larval(GraphKit* kit, bool allocate) const; - InlineTypeNode* finish_larval(GraphKit* kit) const; - // Allocation optimizations void remove_redundant_allocations(PhaseIdealLoop* phase); @@ -178,8 +161,6 @@ class InlineTypeNode : public TypeNode { virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); virtual int Opcode() const; - - NOT_PRODUCT(void dump_spec(outputStream* st) const;) }; #endif // SHARE_VM_OPTO_INLINETYPENODE_HPP diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 368fbca747d..1cd65f678f2 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -2425,44 +2425,38 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c } if (base->is_InlineType()) { + assert(!is_store, "InlineTypeNodes are non-larval value objects"); InlineTypeNode* vt = base->as_InlineType(); - if (is_store) { - if (!vt->is_allocated(&_gvn)) { + if (offset->is_Con()) { + long off = find_long_con(offset, 0); + ciInlineKlass* vk = vt->type()->inline_klass(); + if ((long)(int)off != off || !vk->contains_field_offset(off)) { return false; } - base = vt->get_oop(); - } else { - if (offset->is_Con()) { - long off = find_long_con(offset, 0); - ciInlineKlass* vk = vt->type()->inline_klass(); - if ((long)(int)off != off || !vk->contains_field_offset(off)) { - return false; - } - ciField* field = vk->get_non_flat_field_by_offset(off); - if (field != nullptr) { - BasicType bt = type2field[field->type()->basic_type()]; - if (bt == T_ARRAY || bt == T_NARROWOOP) { - bt = T_OBJECT; - } - if (bt == type && (!field->is_flat() || field->type() == inline_klass)) { - Node* value = vt->field_value_by_offset(off, false); - if (value->is_InlineType()) { - value = value->as_InlineType()->adjust_scalarization_depth(this); - } - set_result(value); - return true; + ciField* field = vk->get_non_flat_field_by_offset(off); + if (field != nullptr) { + BasicType bt = type2field[field->type()->basic_type()]; + if (bt == T_ARRAY || bt == T_NARROWOOP) { + bt = T_OBJECT; + } + if (bt == type && (!field->is_flat() || field->type() == inline_klass)) { + Node* value = vt->field_value_by_offset(off, false); + if (value->is_InlineType()) { + value = value->as_InlineType()->adjust_scalarization_depth(this); } + set_result(value); + return true; } } - { - // Re-execute the unsafe access if allocation triggers deoptimization. - PreserveReexecuteState preexecs(this); - jvms()->set_should_reexecute(true); - vt = vt->buffer(this); - } - base = vt->get_oop(); } + { + // Re-execute the unsafe access if allocation triggers deoptimization. + PreserveReexecuteState preexecs(this); + jvms()->set_should_reexecute(true); + vt = vt->buffer(this); + } + base = vt->get_oop(); } // 32-bit machines ignore the high half! @@ -2696,12 +2690,6 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c } } - if (argument(1)->is_InlineType() && is_store) { - InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass()); - value = value->make_larval(this, false); - replace_in_map(argument(1), value); - } - return true; } @@ -2766,7 +2754,7 @@ bool LibraryCallKit::inline_unsafe_finish_private_buffer() { // We must ensure that the buffer is properly published insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); assert(!type->maybe_null(), "result of an allocation should not be null"); - set_result(InlineTypeNode::make_from_oop(this, buffer, type->inline_klass(), false)); + set_result(InlineTypeNode::make_from_oop(this, buffer, type->inline_klass())); return true; } diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 065a0880106..44f56e9f1c6 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -1031,8 +1031,6 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray is_inlinetypeptr() || C->has_circular_inline_type(), - "Inline type allocations should have been scalarized earlier"); Unique_Node_List value_worklist; while (safepoints.length() > 0) { SafePointNode* sfpt = safepoints.pop(); @@ -1291,8 +1289,6 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { // are already replaced with SafePointScalarObject because // we can't search for a fields value without instance_id. if (safepoints.length() > 0) { - assert(!inline_alloc || C->has_circular_inline_type(), - "Inline type allocations should have been scalarized earlier"); return false; } } @@ -2998,6 +2994,16 @@ void PhaseMacroExpand::eliminate_macro_nodes() { assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); progress = progress || success; } + + // If an allocation is used only in safepoints, elimination of another macro nodes can remove + // all these safepoints, allowing the allocation to be removed. Hence we do igvn to remove + // all the excessive uses. + _igvn.set_delay_transform(false); + _igvn.optimize(); + if (C->failing()) { + return; + } + _igvn.set_delay_transform(true); } #ifndef PRODUCT if (PrintOptoStatistics) { @@ -3015,9 +3021,6 @@ bool PhaseMacroExpand::expand_macro_nodes() { if (StressMacroExpansion) { C->shuffle_macro_nodes(); } - // Last attempt to eliminate macro nodes. - eliminate_macro_nodes(); - if (C->failing()) return true; // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations. bool progress = true; diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 7f3123eccc3..312828f2b21 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -1101,7 +1101,6 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const { static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) { if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) { InlineTypeNode* vt = base->as_InlineType(); - assert(!vt->is_larval(), "must not load from a larval object"); Node* value = vt->field_value_by_offset(offset, true); assert(value != nullptr, "must see some value"); return value; diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index 90bcf88d85f..b21119d3133 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -481,8 +481,8 @@ class Parse : public GraphKit { // Helper: Merge the current mapping into the given basic block void merge_common(Block* target, int pnum); // Helper functions for merging individual cells. - PhiNode *ensure_phi( int idx, bool nocreate = false); - PhiNode *ensure_memory_phi(int idx, bool nocreate = false); + Node* ensure_phi( int idx, bool nocreate = false); + PhiNode* ensure_memory_phi(int idx, bool nocreate = false); // Helper to merge the current memory state into the given basic block void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); @@ -554,7 +554,6 @@ class Parse : public GraphKit { // common code for actually performing the load or store void do_get_xxx(Node* obj, ciField* field); void do_put_xxx(Node* obj, ciField* field, bool is_field); - void set_inline_type_field(Node* obj, ciField* field, Node* val); ciType* improve_abstract_inline_type_klass(ciType* field_klass); diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index dbbd81139c5..c7fac35db42 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -183,7 +183,7 @@ Node* Parse::check_interpreter_type(Node* l, const Type* type, l = null_check_oop(l, &bad_type_ctrl); bad_type_exit->control()->add_req(bad_type_ctrl); } - l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl); + l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl, false, true); bad_type_exit->control()->add_req(bad_type_ctrl); } @@ -622,10 +622,11 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) Node* parm = local(i); const Type* t = _gvn.type(parm); if (t->is_inlinetypeptr()) { - // Create InlineTypeNode from the oop and replace the parameter - bool is_larval = (i == 0) && method()->is_object_constructor() && !method()->holder()->is_java_lang_Object(); - Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), is_larval); - replace_in_map(parm, vt); + if (!is_osr_parse() && !(method()->is_object_constructor() && i == 0)) { + // Create InlineTypeNode from the oop and replace the parameter + Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass()); + replace_in_map(parm, vt); + } } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() && t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_flat() && (!t->is_aryptr()->is_not_null_free() || !t->is_aryptr()->is_not_flat())) { @@ -1209,11 +1210,7 @@ SafePointNode* Parse::create_entry_map() { Node* receiver = kit.argument(0); Node* null_free = kit.null_check_receiver_before_call(method()); _caller = kit.transfer_exceptions_into_jvms(); - if (receiver->is_InlineType() && receiver->as_InlineType()->is_larval()) { - // Replace the larval inline type receiver in the exit map as well to make sure that - // we can find and update it in Parse::do_call when we are done with the initialization. - _exits.map()->replace_edge(receiver, null_free); - } + if (kit.stopped()) { _exits.add_exception_states_from(_caller); _exits.set_jvms(_caller); @@ -1782,13 +1779,33 @@ void Parse::merge_common(Parse::Block* target, int pnum) { t = target->stack_type_at(j - tmp_jvms->stkoff()); } if (t != nullptr && t != Type::BOTTOM) { - if (n->is_InlineType() && !t->is_inlinetypeptr()) { - // Allocate inline type in src block to be able to merge it with oop in target block - map()->set_req(j, n->as_InlineType()->buffer(this)); - } else if (!n->is_InlineType() && t->is_inlinetypeptr()) { - // Scalarize null in src block to be able to merge it with inline type in target block - assert(gvn().type(n)->is_zero_type(), "Should have been scalarized"); - map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass())); + if (!t->is_inlinetypeptr()) { + if (n->is_InlineType()) { + // The merge is an oop phi, we need to buffer the inline type + map()->set_req(j, n->as_InlineType()->buffer(this)); + } + } else { + Node* phi = nullptr; + if (target->is_merged()) { + phi = target->start_map()->in(j); + } + if (phi != nullptr && !phi->is_InlineType()) { + if (n->is_InlineType()) { + // The merge is an oop phi, we need to buffer the inline type + map()->set_req(j, n->as_InlineType()->buffer(this)); + } + } else { + if (!n->is_InlineType()) { + // We cannot blindly expand an inline type here since it may be larval + if (gvn().type(n)->is_zero_type()) { + // Null constant implies that this is not a larval objects + map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass())); + } else if (phi != nullptr && phi->is_InlineType()) { + // Larval oops cannot be merged with non-larval ones + map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass())); + } + } + } } } } @@ -1890,11 +1907,11 @@ void Parse::merge_common(Parse::Block* target, int pnum) { for (uint j = 1; j < newin->req(); j++) { Node* m = map()->in(j); // Current state of target. Node* n = newin->in(j); // Incoming change to target state. - PhiNode* phi; + Node* phi; if (m->is_Phi() && m->as_Phi()->region() == r) { - phi = m->as_Phi(); + phi = m; } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) { - phi = m->as_InlineType()->get_oop()->as_Phi(); + phi = m; } else { phi = nullptr; } @@ -1945,22 +1962,24 @@ void Parse::merge_common(Parse::Block* target, int pnum) { // It is a bug if we create a phi which sees a garbage value on a live path. // Merging two inline types? - if (phi != nullptr && phi->bottom_type()->is_inlinetypeptr()) { + if (phi != nullptr && phi->is_InlineType()) { // Reload current state because it may have been updated by ensure_phi - m = map()->in(j); - InlineTypeNode* vtm = m->as_InlineType(); // Current inline type + assert(phi == map()->in(j), "unexpected value in map"); + assert(phi->as_InlineType()->has_phi_inputs(r), ""); + InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type - assert(vtm->get_oop() == phi, "Inline type should have Phi input"); - if (TraceOptoParse) { + assert(vtm == phi, "Inline type should have Phi input"); + #ifdef ASSERT + if (TraceOptoParse) { tty->print_cr("\nMerging inline types"); tty->print_cr("Current:"); vtm->dump(2); tty->print_cr("Incoming:"); vtn->dump(2); tty->cr(); -#endif } +#endif // Do the merge vtm->merge_with(&_gvn, vtn, pnum, last_merge); if (last_merge) { @@ -1969,7 +1988,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) { } } else if (phi != nullptr) { assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); - assert(phi->region() == r, ""); + assert(phi->as_Phi()->region() == r, ""); phi->set_req(pnum, n); // Then add 'n' to the merge if (last_merge) { // Last merge for this Phi. @@ -2150,7 +2169,7 @@ int Parse::Block::add_new_path() { //------------------------------ensure_phi------------------------------------- // Turn the idx'th entry of the current map into a Phi -PhiNode *Parse::ensure_phi(int idx, bool nocreate) { +Node* Parse::ensure_phi(int idx, bool nocreate) { SafePointNode* map = this->map(); Node* region = map->control(); assert(region->is_Region(), ""); @@ -2165,7 +2184,7 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) { } InlineTypeNode* vt = o->isa_InlineType(); if (vt != nullptr && vt->has_phi_inputs(region)) { - return vt->get_oop()->as_Phi(); + return vt; } // Now use a Phi here for merging @@ -2206,7 +2225,7 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) { // represents the merged inline type and update the map. vt = vt->clone_with_phis(&_gvn, region); map->set_req(idx, vt); - return vt->get_oop()->as_Phi(); + return vt; } else { PhiNode* phi = PhiNode::make(region, o, t); gvn().set_type(phi, t); @@ -2348,7 +2367,6 @@ void Parse::return_current(Node* value) { Node* phi = _exits.argument(0); const Type* return_type = phi->bottom_type(); const TypeInstPtr* tr = return_type->isa_instptr(); - assert(!value->is_InlineType() || !value->as_InlineType()->is_larval(), "returning a larval"); if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) && return_type->is_inlinetypeptr()) { // Inline type is returned as fields, make sure it is scalarized diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index 2ca21eb1cac..c1893b39f90 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -3406,11 +3406,9 @@ void Parse::do_one_bytecode() { case Bytecodes::_ireturn: case Bytecodes::_areturn: case Bytecodes::_freturn: - return_current(pop()); + return_current(cast_non_larval(pop())); break; case Bytecodes::_lreturn: - return_current(pop_pair()); - break; case Bytecodes::_dreturn: return_current(pop_pair()); break; @@ -3463,7 +3461,7 @@ void Parse::do_one_bytecode() { // If this is a backwards branch in the bytecodes, add Safepoint maybe_add_safepoint(iter().get_dest()); a = null(); - b = pop(); + b = cast_non_larval(pop()); if (b->is_InlineType()) { // Null checking a scalarized but nullable inline type. Check the IsInit // input instead of the oop input to avoid keeping buffer allocations alive @@ -3492,8 +3490,8 @@ void Parse::do_one_bytecode() { handle_if_acmp: // If this is a backwards branch in the bytecodes, add Safepoint maybe_add_safepoint(iter().get_dest()); - a = pop(); - b = pop(); + a = cast_non_larval(pop()); + b = cast_non_larval(pop()); do_acmp(btest, b, a); break; @@ -3599,7 +3597,7 @@ void Parse::do_one_bytecode() { if (C->should_print_igv(perBytecode)) { IdealGraphPrinter* printer = C->igv_printer(); char buffer[256]; - jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc())); + jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s, map: %d", bci(), Bytecodes::name(bc()), map() == nullptr ? -1 : map()->_idx); bool old = printer->traverse_outs(); printer->set_traverse_outs(true); printer->print_graph(buffer); diff --git a/src/hotspot/share/opto/parse3.cpp b/src/hotspot/share/opto/parse3.cpp index 56687e35424..6503cd1c498 100644 --- a/src/hotspot/share/opto/parse3.cpp +++ b/src/hotspot/share/opto/parse3.cpp @@ -22,6 +22,7 @@ * */ +#include "ci/ciInstanceKlass.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" #include "memory/universe.hpp" @@ -47,20 +48,6 @@ void Parse::do_field_access(bool is_get, bool is_field) { ciField* field = iter().get_field(will_link); assert(will_link, "getfield: typeflow responsibility"); - ciInstanceKlass* field_holder = field->holder(); - - if (is_get && is_field && field_holder->is_inlinetype() && peek()->is_InlineType()) { - InlineTypeNode* vt = peek()->as_InlineType(); - null_check(vt); - Node* value = vt->field_value_by_offset(field->offset_in_bytes()); - if (value->is_InlineType()) { - value = value->as_InlineType()->adjust_scalarization_depth(this); - } - pop(); - push_node(field->layout_type(), value); - return; - } - if (is_field == field->is_static()) { // Interpreter will throw java_lang_IncompatibleClassChangeError // Check this before allowing methods to access static fields @@ -70,6 +57,7 @@ void Parse::do_field_access(bool is_get, bool is_field) { } // Deoptimize on putfield writes to call site target field outside of CallSite ctor. + ciInstanceKlass* field_holder = field->holder(); if (!is_get && field->is_call_site_target() && !(method()->holder() == field_holder && method()->is_object_constructor())) { uncommon_trap(Deoptimization::Reason_unhandled, @@ -122,6 +110,7 @@ void Parse::do_field_access(bool is_get, bool is_field) { } void Parse::do_get_xxx(Node* obj, ciField* field) { + obj = cast_non_larval(obj); BasicType bt = field->layout_type(); // Does this field have a constant value? If so, just push the value. if (field->is_constant() && !field->is_flat() && @@ -139,6 +128,16 @@ void Parse::do_get_xxx(Node* obj, ciField* field) { } } + if (obj->is_InlineType()) { + InlineTypeNode* vt = obj->as_InlineType(); + Node* value = vt->field_value_by_offset(field->offset_in_bytes()); + if (value->is_InlineType()) { + value = value->as_InlineType()->adjust_scalarization_depth(this); + } + push_node(field->layout_type(), value); + return; + } + ciType* field_klass = field->type(); field_klass = improve_abstract_inline_type_klass(field_klass); int offset = field->offset_in_bytes(); @@ -244,9 +243,9 @@ ciType* Parse::improve_abstract_inline_type_klass(ciType* field_klass) { void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { bool is_vol = field->is_volatile(); int offset = field->offset_in_bytes(); + BasicType bt = field->layout_type(); Node* val = type2size[bt] == 1 ? pop() : pop_pair(); - if (field->is_null_free()) { PreserveReexecuteState preexecs(this); jvms()->set_should_reexecute(true); @@ -256,10 +255,9 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { return; } } - if (obj->is_InlineType()) { - set_inline_type_field(obj, field, val); - return; - } + val = cast_non_larval(val); + + assert(!obj->is_InlineType(), "InlineTypeNodes are non-larval value objects"); if (field->is_null_free() && field->type()->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) { // Storing to a field of an empty, null-free inline type that is already initialized. Ignore. return; @@ -329,46 +327,6 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { } } -void Parse::set_inline_type_field(Node* obj, ciField* field, Node* val) { - assert(_method->is_object_constructor(), "inline type is initialized outside of constructor"); - assert(obj->as_InlineType()->is_larval(), "must be larval"); - assert(!_gvn.type(obj)->maybe_null(), "should never be null"); - - // Re-execute if buffering in below code triggers deoptimization. - PreserveReexecuteState preexecs(this); - jvms()->set_should_reexecute(true); - inc_sp(1); - - if (!val->is_InlineType() && field->type()->is_inlinetype()) { - // Scalarize inline type field value - val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass()); - } else if (val->is_InlineType() && !field->is_flat()) { - // Field value needs to be allocated because it can be merged with a non-inline type. - val = val->as_InlineType()->buffer(this); - } - - // Clone the inline type node and set the new field value - InlineTypeNode* new_vt = obj->as_InlineType()->clone_if_required(&_gvn, _map); - new_vt->set_field_value_by_offset(field->offset_in_bytes(), val); - new_vt = new_vt->adjust_scalarization_depth(this); - - // If the inline type is buffered and the caller might use the buffer, update it. - if (new_vt->is_allocated(&gvn()) && (!_caller->has_method() || C->inlining_incrementally() || _caller->method()->is_object_constructor())) { - new_vt->store(this, new_vt->get_oop(), new_vt->get_oop(), new_vt->bottom_type()->inline_klass(), 0, field->offset_in_bytes()); - - // Preserve allocation ptr to create precedent edge to it in membar - // generated on exit from constructor. - AllocateNode* alloc = AllocateNode::Ideal_allocation(new_vt->get_oop()); - if (alloc != nullptr) { - set_alloc_with_final_or_stable(new_vt->get_oop()); - } - set_wrote_final(true); - } - - replace_in_map(obj, _gvn.transform(new_vt)); - return; -} - //============================================================================= void Parse::do_newarray() { diff --git a/src/hotspot/share/opto/parseHelper.cpp b/src/hotspot/share/opto/parseHelper.cpp index 952ffaf7766..c53919f35ce 100644 --- a/src/hotspot/share/opto/parseHelper.cpp +++ b/src/hotspot/share/opto/parseHelper.cpp @@ -315,11 +315,6 @@ void Parse::do_new() { if (stopped()) return; } - if (klass->is_inlinetype()) { - push(InlineTypeNode::make_all_zero(_gvn, klass->as_inline_klass(), /* is_larval */ true)); - return; - } - Node* kls = makecon(TypeKlassPtr::make(klass)); Node* obj = new_instance(kls); diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt index 33f106e965f..cdf690b706b 100644 --- a/test/hotspot/jtreg/ProblemList.txt +++ b/test/hotspot/jtreg/ProblemList.txt @@ -83,7 +83,6 @@ compiler/ciReplay/TestIncrementalInlining.java 8349191 generic-all compiler/startup/StartupOutput.java 8354404 generic-all compiler/types/TestSubTypeCheckUniqueSubclass.java 8354408 generic-all -compiler/valhalla/inlinetypes/TestAllocationMergeAndFolding.java 8354283 generic-all ############################################################################# diff --git a/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java b/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java index 617fedc59b4..b0b4b23b68c 100644 --- a/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java +++ b/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java @@ -23,19 +23,24 @@ package compiler.valhalla.inlinetypes; +import java.lang.classfile.Label; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.util.Random; import jdk.test.lib.Asserts; import jdk.test.lib.Utils; import jdk.test.whitebox.WhiteBox; +import test.java.lang.invoke.lib.InstructionHelper; /** * @test id=Xbatch * @summary Test construction of value objects. * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch * -XX:CompileCommand=inline,TestValueConstruction::checkDeopt @@ -45,9 +50,9 @@ /* * @test id=DeoptimizeALot * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeALot * -XX:CompileCommand=inline,TestValueConstruction::checkDeopt @@ -57,9 +62,9 @@ /* * @test id=CompileonlyTest * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:CompileCommand=compileonly,*TestValueConstruction::test* -Xbatch @@ -71,9 +76,9 @@ * @test id=DontInlineHelper * @summary Test construction of value objects. * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch * -XX:CompileCommand=dontinline,compiler*::helper* @@ -84,9 +89,9 @@ /* * @test id=DontInlineMyValueInit * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:CompileCommand=dontinline,*MyValue*:: -Xbatch @@ -97,9 +102,9 @@ /* * @test id=DontInlineObjectInit * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:CompileCommand=dontinline,*Object:: -Xbatch @@ -110,9 +115,9 @@ /* * @test id=DontInlineObjectInitDeoptimizeALot * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+IgnoreUnrecognizedVMOptions * -XX:+DeoptimizeALot -XX:CompileCommand=dontinline,*Object:: -Xbatch @@ -123,9 +128,9 @@ /* * @test id=DontInlineMyAbstractInit * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:CompileCommand=dontinline,*MyAbstract:: -Xbatch @@ -136,9 +141,9 @@ /* * @test id=StressIncrementalInlining * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbatch * -XX:-TieredCompilation -XX:+StressIncrementalInlining @@ -149,9 +154,9 @@ /* * @test id=StressIncrementalInliningCompileOnlyTest * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:-TieredCompilation -XX:+StressIncrementalInlining @@ -162,9 +167,9 @@ /* @test id=StressIncrementalInliningDontInlineMyValueInit * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:-TieredCompilation -XX:+StressIncrementalInlining @@ -176,9 +181,9 @@ /* * @test id=StressIncrementalInliningDontInlineObjectInit * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:-TieredCompilation -XX:+StressIncrementalInlining @@ -190,9 +195,9 @@ /* * @test id=StressIncrementalInliningDontInlineMyAbstractInit * @key randomness - * @library /testlibrary /test/lib /compiler/whitebox / + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / * @enablePreview - * @build jdk.test.whitebox.WhiteBox + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:-TieredCompilation -XX:+StressIncrementalInlining @@ -201,6 +206,20 @@ * compiler.valhalla.inlinetypes.TestValueConstruction */ +/* + * @test id=StressIncrementalInliningOnStackReplacement + * @key randomness + * @library /testlibrary /test/lib /compiler/whitebox /test/jdk/java/lang/invoke/common / + * @enablePreview + * @build jdk.test.whitebox.WhiteBox test.java.lang.invoke.lib.InstructionHelper + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:-TieredCompilation -XX:+StressIncrementalInlining + * -XX:Tier0BackedgeNotifyFreqLog=0 -XX:Tier2BackedgeNotifyFreqLog=0 -XX:Tier3BackedgeNotifyFreqLog=0 + * -XX:Tier2BackEdgeThreshold=1 -XX:Tier3BackEdgeThreshold=1 -XX:Tier4BackEdgeThreshold=1 -Xbatch + * compiler.valhalla.inlinetypes.TestValueConstruction + */ + public class TestValueConstruction { static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); @@ -1638,7 +1657,65 @@ public static MyValue16 testBackAndForthAbstract2(int x) { return new MyValue16(x); } - public static void main(String[] args) throws Exception { + private static final MethodHandle MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_STACK = InstructionHelper.buildMethodHandle(MethodHandles.lookup(), + "multipleOccurrencesInJVMSReturnStack", + MethodType.methodType(MyValue1.class, int.class), + CODE -> { + Label loopHead = CODE.newLabel(); + Label loopExit = CODE.newLabel(); + CODE. + new_(MyValue1.class.describeConstable().get()). + dup(). + iconst_0(). + labelBinding(loopHead). + dup(). + ldc(100). + if_icmpge(loopExit). + iconst_1(). + iadd(). + goto_(loopHead). + labelBinding(loopExit). + pop(). + iload(0). + invokespecial(MyValue1.class.describeConstable().get(), "", MethodType.methodType(void.class, int.class).describeConstable().get()). + areturn(); + }); + + public static MyValue1 testMultipleOccurrencesInJVMSReturnStack(int x) throws Throwable { + return (MyValue1) MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_STACK.invokeExact(x); + } + + private static final MethodHandle MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_LOCAL = InstructionHelper.buildMethodHandle(MethodHandles.lookup(), + "multipleOccurrencesInJVMSReturnLocal", + MethodType.methodType(MyValue1.class, int.class), + CODE -> { + Label loopHead = CODE.newLabel(); + Label loopExit = CODE.newLabel(); + CODE. + new_(MyValue1.class.describeConstable().get()). + dup(). + astore(1). + iconst_0(). + labelBinding(loopHead). + dup(). + ldc(100). + if_icmpge(loopExit). + iconst_1(). + iadd(). + goto_(loopHead). + labelBinding(loopExit). + pop(). + iload(0). + invokespecial(MyValue1.class.describeConstable().get(), "", MethodType.methodType(void.class, int.class).describeConstable().get()). + aload(1). + areturn(); + }); + + public static MyValue1 testMultipleOccurrencesInJVMSReturnLocal(int x) throws Throwable { + return (MyValue1) MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_LOCAL.invokeExact(x); + } + + public static void main(String[] args) throws Throwable { Random rand = Utils.getRandomInstance(); // Randomly exclude some constructors from inlining via the WhiteBox API because CompileCommands don't match on different signatures. @@ -1704,7 +1781,7 @@ public static void main(String[] args) throws Exception { } } - private static void run(int x, boolean doCheck) { + private static void run(int x, boolean doCheck) throws Throwable { check(test1(x), x, doCheck); check(test1a(x), x, doCheck); check(test1b(x), x, doCheck); @@ -1765,6 +1842,8 @@ private static void run(int x, boolean doCheck) { check(testCallAsConstructorArgument(), new MyValue14(o), doCheck); check(testBackAndForthAbstract(x), new MyValue15(x), doCheck); check(testBackAndForthAbstract2(x), new MyValue16(x), doCheck); + check(testMultipleOccurrencesInJVMSReturnStack(x), new MyValue1(x), doCheck); + check(testMultipleOccurrencesInJVMSReturnLocal(x), new MyValue1(x), doCheck); } private static void check(Object testResult, Object expectedResult, boolean check) { From d91fcf99343ae604a67ead29c569691a697e2e1b Mon Sep 17 00:00:00 2001 From: Quan Anh Mai Date: Wed, 30 Apr 2025 02:48:56 +0700 Subject: [PATCH 2/5] fix test failures --- src/hotspot/share/opto/compile.cpp | 16 --- src/hotspot/share/opto/doCall.cpp | 9 +- src/hotspot/share/opto/macro.cpp | 78 ++++++-------- src/hotspot/share/opto/macro.hpp | 7 +- src/hotspot/share/opto/memnode.cpp | 101 +++++------------- .../inlinetypes/TestValueConstruction.java | 48 ++------- 6 files changed, 75 insertions(+), 184 deletions(-) diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index 389404a4da7..a6822062b6e 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -2851,10 +2851,6 @@ void Compile::Optimize() { return; } igvn.set_delay_transform(false); - igvn.optimize(); - if (failing()) { - return; - } print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); } @@ -2876,10 +2872,6 @@ void Compile::Optimize() { return; } igvn.set_delay_transform(false); - igvn.optimize(); - if (failing()) { - return; - } print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); } @@ -2904,12 +2896,7 @@ void Compile::Optimize() { if (failing()) { return; } - igvn.set_delay_transform(false); - igvn.optimize(); - if (failing()) { - return; - } print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); } @@ -3016,9 +3003,6 @@ void Compile::Optimize() { if (failing()) { return; } - igvn.set_delay_transform(false); - igvn.optimize(); - igvn.set_delay_transform(true); print_method(PHASE_BEFORE_MACRO_EXPANSION, 3); if (mex.expand_macro_nodes()) { diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index 5093ad10648..c3107dfe039 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -893,14 +893,7 @@ void Parse::do_call() { non_larval->set_oop(gvn(), null()); non_larval->set_is_buffered(gvn(), false); non_larval = gvn().transform(non_larval)->as_InlineType(); - - // Replace the larval object with the non-larval one in all call frames. This avoids the oop - // alive until the outermost constructor exits. - JVMState* current = map()->jvms(); - while (current != nullptr) { - current->map()->replace_edge(receiver, non_larval); - current = current->caller(); - } + map()->replace_edge(receiver, non_larval); } } diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 44f56e9f1c6..5c888f3f347 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -2896,54 +2896,38 @@ void PhaseMacroExpand::expand_flatarraycheck_node(FlatArrayCheckNode* check) { //---------------------------eliminate_macro_nodes---------------------- // Eliminate scalar replaced allocations and associated locks. void PhaseMacroExpand::eliminate_macro_nodes() { - if (C->macro_count() == 0) + if (C->macro_count() == 0) { return; + } NOT_PRODUCT(int membar_before = count_MemBar(C);) - // Before elimination may re-mark (change to Nested or NonEscObj) - // all associated (same box and obj) lock and unlock nodes. - int cnt = C->macro_count(); - for (int i=0; i < cnt; i++) { - Node *n = C->macro_node(i); - if (n->is_AbstractLock()) { // Lock and Unlock nodes - mark_eliminated_locking_nodes(n->as_AbstractLock()); + int iteration = 0; + while (C->macro_count() > 0) { + if (iteration++ > 100) { + assert(false, "Too slow convergence of macro elimination"); + break; } - } - // Re-marking may break consistency of Coarsened locks. - if (!C->coarsened_locks_consistent()) { - return; // recompile without Coarsened locks if broken - } else { - // After coarsened locks are eliminated locking regions - // become unbalanced. We should not execute any more - // locks elimination optimizations on them. - C->mark_unbalanced_boxes(); - } - // First, attempt to eliminate locks - bool progress = true; - while (progress) { - progress = false; - for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once - Node* n = C->macro_node(i - 1); - bool success = false; - DEBUG_ONLY(int old_macro_count = C->macro_count();) - if (n->is_AbstractLock()) { - success = eliminate_locking_node(n->as_AbstractLock()); -#ifndef PRODUCT - if (success && PrintOptoStatistics) { - Atomic::inc(&PhaseMacroExpand::_monitor_objects_removed_counter); - } -#endif + // Before elimination may re-mark (change to Nested or NonEscObj) + // all associated (same box and obj) lock and unlock nodes. + int cnt = C->macro_count(); + for (int i=0; i < cnt; i++) { + Node *n = C->macro_node(i); + if (n->is_AbstractLock()) { // Lock and Unlock nodes + mark_eliminated_locking_nodes(n->as_AbstractLock()); } - assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); - progress = progress || success; } - } - // Next, attempt to eliminate allocations - _has_locks = false; - progress = true; - while (progress) { - progress = false; + // Re-marking may break consistency of Coarsened locks. + if (!C->coarsened_locks_consistent()) { + return; // recompile without Coarsened locks if broken + } else { + // After coarsened locks are eliminated locking regions + // become unbalanced. We should not execute any more + // locks elimination optimizations on them. + C->mark_unbalanced_boxes(); + } + + bool progress = false; for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once Node* n = C->macro_node(i - 1); bool success = false; @@ -2967,8 +2951,12 @@ void PhaseMacroExpand::eliminate_macro_nodes() { } case Node::Class_Lock: case Node::Class_Unlock: - assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); - _has_locks = true; + success = eliminate_locking_node(n->as_AbstractLock()); +#ifndef PRODUCT + if (success && PrintOptoStatistics) { + Atomic::inc(&PhaseMacroExpand::_monitor_objects_removed_counter); + } +#endif break; case Node::Class_ArrayCopy: break; @@ -2995,6 +2983,10 @@ void PhaseMacroExpand::eliminate_macro_nodes() { progress = progress || success; } + if (!progress) { + break; + } + // If an allocation is used only in safepoints, elimination of another macro nodes can remove // all these safepoints, allowing the allocation to be removed. Hence we do igvn to remove // all the excessive uses. diff --git a/src/hotspot/share/opto/macro.hpp b/src/hotspot/share/opto/macro.hpp index 632fd5b7105..c0e74f3f387 100644 --- a/src/hotspot/share/opto/macro.hpp +++ b/src/hotspot/share/opto/macro.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,9 +83,6 @@ class PhaseMacroExpand : public Phase { // projections extracted from a call node CallProjections* _callprojs; - // Additional data collected during macro expansion - bool _has_locks; - void expand_allocate(AllocateNode *alloc); void expand_allocate_array(AllocateArrayNode *alloc); void expand_allocate_common(AllocateNode* alloc, @@ -214,7 +211,7 @@ class PhaseMacroExpand : public Phase { Node* make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc); public: - PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) { + PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) { _igvn.set_delay_transform(true); } void eliminate_macro_nodes(); diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index f2ef2b1e788..27ea45df9c8 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -143,32 +143,6 @@ extern void print_alias_types(); #endif -// If call is a constructor call on receiver, returns the class which declares the target method, -// else returns nullptr. This information can then be used to deduce if call modifies a field of -// receiver. Specifically, if the field is declared in a class that is a subclass of the one -// declaring the constructor, then the field is set inside the constructor, else the field must be -// set before the constructor invocation. E.g. A field Super.x will be set during the execution of -// Sub::, while a field Sub.y must be set before Super:: is invoked. -static ciInstanceKlass* find_constructor_call_method_holder(Node* call, Node* receiver) { - if (!call->is_CallJava()) { - return nullptr; - } - - ciMethod* target = call->as_CallJava()->method(); - if (target == nullptr || !target->is_object_constructor()) { - return nullptr; - } - - assert(call->req() > TypeFunc::Parms, "constructor must have at least 1 argument"); - Node* parm = call->in(TypeFunc::Parms)->uncast(); - receiver = receiver->uncast(); - if (parm == receiver || (parm->is_InlineType() && parm->as_InlineType()->get_oop()->uncast() == receiver)) { - return target->holder(); - } - - return nullptr; -} - // Find the memory output corresponding to the fall-through path of a call static Node* find_call_fallthrough_mem_output(CallNode* call) { ResourceMark rm; @@ -178,42 +152,6 @@ static Node* find_call_fallthrough_mem_output(CallNode* call) { return res; } -// Try to find a better memory input for a load from a strict final field of an object that is -// allocated in the current compilation unit, or is the first parameter when we are in a -// constructor -static Node* optimize_strict_final_load_memory_from_local_object(ciField* field, ProjNode* base_uncasted) { - if (!EnableValhalla) { - // In this method we depends on the fact that strict fields are set before the invocation of - // super(), I'm not sure if this is true without Valhalla - return nullptr; - } - - // The node that can be passed into a constructor - Node* base = base_uncasted; - if (!base_uncasted->is_Parm()) { - assert(base_uncasted->_con == AllocateNode::RawAddress && base_uncasted->in(0)->is_Allocate(), "must be the RawAddress of an AllocateNode"); - base = base_uncasted->in(0)->as_Allocate()->result_cast(); - assert(base != nullptr && base->in(1) == base_uncasted, "must find a valid base"); - } - - // Try to see if there is a constructor call on the base - for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) { - Node* out = base->fast_out(i); - ciInstanceKlass* target_holder = find_constructor_call_method_holder(out, base); - if (target_holder == nullptr) { - continue; - } else if (target_holder->is_subclass_of(field->holder())) { - return find_call_fallthrough_mem_output(out->as_CallJava()); - } else { - Node* res = out->in(TypeFunc::Memory); - assert(res != nullptr, "should have a memory input"); - return res; - } - } - - return nullptr; -} - // Try to find a better memory input for a load from a strict final field static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, ciField* field, Node* adr, ProjNode*& base_local) { intptr_t offset = 0; @@ -226,9 +164,8 @@ static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, ciField* fie if (base_uncasted->is_Proj()) { MultiNode* multi = base_uncasted->in(0)->as_Multi(); if (multi->is_Allocate()) { - // The result of an AllocateNode, try to find the constructor call base_local = base_uncasted->as_Proj(); - return optimize_strict_final_load_memory_from_local_object(field, base_uncasted->as_Proj()); + return nullptr; } else if (multi->is_Call()) { // The oop is returned from a call, the memory can be the fallthrough output of the call return find_call_fallthrough_mem_output(multi->as_Call()); @@ -237,7 +174,7 @@ static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, ciField* fie if (phase->C->method()->is_object_constructor() && base_uncasted->as_Proj()->_con == TypeFunc::Parms) { // The receiver of a constructor is similar to the result of an AllocateNode base_local = base_uncasted->as_Proj(); - return optimize_strict_final_load_memory_from_local_object(field, base_uncasted->as_Proj()); + return nullptr; } else { // Use the start memory otherwise return multi->proj_out(TypeFunc::Memory); @@ -248,14 +185,30 @@ static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, ciField* fie return nullptr; } -// Whether a call can modify a strict final field of base_local, given that base_local is allocated -// inside the current compilation unit, or is the first parameter when the compilation root is a -// constructor. This is equivalent to asking whether base_local is the receiver of the constructor -// invocation call and the class declaring the target method is a subclass of the class declaring -// field. -static bool call_can_modify_local_object(ciField* field, CallNode* call, Node* base_local) { - ciInstanceKlass* target_holder = find_constructor_call_method_holder(call, base_local); - return target_holder != nullptr && target_holder->is_subclass_of(field->holder()); +// Whether a call can modify a strict final field, given that the object is allocated inside the +// current compilation unit, or is the first parameter when the compilation root is a constructor. +// This is equivalent to asking whether 'call' is a constructor invocation and the class declaring +// the target method is a subclass of the class declaring 'field'. +static bool call_can_modify_local_object(ciField* field, CallNode* call) { + if (!call->is_CallJava()) { + return false; + } + + ciMethod* target = call->as_CallJava()->method(); + if (target == nullptr || !target->is_object_constructor()) { + return false; + } + + // If 'field' is declared in a class that is a subclass of the one declaring the constructor, + // then the field is set inside the constructor, else the field must be set before the + // constructor invocation. E.g. A field Super.x will be set during the execution of Sub::, + // while a field Sub.y must be set before Super:: is invoked. + // We can try to be more heroic and decide if the receiver of the constructor invocation is the + // object from which we are loading from. This, however, may be problematic as deciding if 2 + // nodes are definitely different may not be trivial, especially if the graph is not canonical. + // As a result, it is made more conservative for now. + assert(call->req() > TypeFunc::Parms, "constructor must have at least 1 argument"); + return target->holder()->is_subclass_of(field->holder()); } Node* MemNode::optimize_simple_memory_chain(Node* mchain, const TypeOopPtr* t_oop, Node* load, PhaseGVN* phase) { @@ -320,7 +273,7 @@ Node* MemNode::optimize_simple_memory_chain(Node* mchain, const TypeOopPtr* t_oo CallNode* call = proj_in->as_Call(); if (!call->may_modify(t_oop, phase)) { result = call->in(TypeFunc::Memory); - } else if (is_strict_final_load && base_local != nullptr && !call_can_modify_local_object(field, call, base_local)) { + } else if (is_strict_final_load && base_local != nullptr && !call_can_modify_local_object(field, call)) { result = call->in(TypeFunc::Memory); } } else if (proj_in->is_Initialize()) { diff --git a/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java b/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java index b0b4b23b68c..f6fe6dcfa33 100644 --- a/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java +++ b/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestValueConstruction.java @@ -1657,37 +1657,9 @@ public static MyValue16 testBackAndForthAbstract2(int x) { return new MyValue16(x); } - private static final MethodHandle MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_STACK = InstructionHelper.buildMethodHandle(MethodHandles.lookup(), + private static final MethodHandle MULTIPLE_OCCURRENCES_IN_JVMS = InstructionHelper.buildMethodHandle(MethodHandles.lookup(), "multipleOccurrencesInJVMSReturnStack", MethodType.methodType(MyValue1.class, int.class), - CODE -> { - Label loopHead = CODE.newLabel(); - Label loopExit = CODE.newLabel(); - CODE. - new_(MyValue1.class.describeConstable().get()). - dup(). - iconst_0(). - labelBinding(loopHead). - dup(). - ldc(100). - if_icmpge(loopExit). - iconst_1(). - iadd(). - goto_(loopHead). - labelBinding(loopExit). - pop(). - iload(0). - invokespecial(MyValue1.class.describeConstable().get(), "", MethodType.methodType(void.class, int.class).describeConstable().get()). - areturn(); - }); - - public static MyValue1 testMultipleOccurrencesInJVMSReturnStack(int x) throws Throwable { - return (MyValue1) MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_STACK.invokeExact(x); - } - - private static final MethodHandle MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_LOCAL = InstructionHelper.buildMethodHandle(MethodHandles.lookup(), - "multipleOccurrencesInJVMSReturnLocal", - MethodType.methodType(MyValue1.class, int.class), CODE -> { Label loopHead = CODE.newLabel(); Label loopExit = CODE.newLabel(); @@ -1695,24 +1667,25 @@ public static MyValue1 testMultipleOccurrencesInJVMSReturnStack(int x) throws Th new_(MyValue1.class.describeConstable().get()). dup(). astore(1). + astore(2). iconst_0(). + istore(3). labelBinding(loopHead). - dup(). + iload(3). ldc(100). if_icmpge(loopExit). - iconst_1(). - iadd(). + iinc(3, 1). goto_(loopHead). labelBinding(loopExit). - pop(). + aload(2). iload(0). invokespecial(MyValue1.class.describeConstable().get(), "", MethodType.methodType(void.class, int.class).describeConstable().get()). - aload(1). + aload(2). areturn(); }); - public static MyValue1 testMultipleOccurrencesInJVMSReturnLocal(int x) throws Throwable { - return (MyValue1) MULTIPLE_OCCURRENCES_IN_JVMS_RETURN_LOCAL.invokeExact(x); + public static MyValue1 testMultipleOccurrencesInJVMS(int x) throws Throwable { + return (MyValue1) MULTIPLE_OCCURRENCES_IN_JVMS.invokeExact(x); } public static void main(String[] args) throws Throwable { @@ -1842,8 +1815,7 @@ private static void run(int x, boolean doCheck) throws Throwable { check(testCallAsConstructorArgument(), new MyValue14(o), doCheck); check(testBackAndForthAbstract(x), new MyValue15(x), doCheck); check(testBackAndForthAbstract2(x), new MyValue16(x), doCheck); - check(testMultipleOccurrencesInJVMSReturnStack(x), new MyValue1(x), doCheck); - check(testMultipleOccurrencesInJVMSReturnLocal(x), new MyValue1(x), doCheck); + check(testMultipleOccurrencesInJVMS(x), new MyValue1(x), doCheck); } private static void check(Object testResult, Object expectedResult, boolean check) { From 3be7b97bdd744252736ee21b19a1a21e372a65a7 Mon Sep 17 00:00:00 2001 From: Quan Anh Mai Date: Wed, 30 Apr 2025 03:09:31 +0700 Subject: [PATCH 3/5] fast path for non intrinsics --- src/hotspot/share/opto/doCall.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index c3107dfe039..5dd03c5a52d 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -94,7 +94,7 @@ static bool arg_can_be_larval(ciMethod* callee, int arg_idx) { return true; } - if (arg_idx != 1) { + if (arg_idx != 1 || callee->intrinsic_id() == vmIntrinsicID::_none) { return false; } From bdc01f44887dd7db0548675e2b988e3112d934e6 Mon Sep 17 00:00:00 2001 From: Quan Anh Mai Date: Tue, 6 May 2025 07:06:31 +0700 Subject: [PATCH 4/5] move progress check --- src/hotspot/share/opto/macro.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index b005d30ef25..db03537dcae 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -2983,10 +2983,6 @@ void PhaseMacroExpand::eliminate_macro_nodes() { progress = progress || success; } - if (!progress) { - break; - } - // If an allocation is used only in safepoints, elimination of another macro nodes can remove // all these safepoints, allowing the allocation to be removed. Hence we do igvn to remove // all the excessive uses. @@ -2996,6 +2992,10 @@ void PhaseMacroExpand::eliminate_macro_nodes() { return; } _igvn.set_delay_transform(true); + + if (!progress) { + break; + } } #ifndef PRODUCT if (PrintOptoStatistics) { From 4b16f6d047ecf497d703684165f58e2f3ef5c6a7 Mon Sep 17 00:00:00 2001 From: Quan Anh Mai Date: Sat, 10 May 2025 00:50:47 +0700 Subject: [PATCH 5/5] Tobias' comments --- src/hotspot/share/opto/doCall.cpp | 7 +-- src/hotspot/share/opto/graphKit.cpp | 10 ++-- src/hotspot/share/opto/graphKit.hpp | 2 +- src/hotspot/share/opto/inlinetypenode.cpp | 10 +++- src/hotspot/share/opto/macro.cpp | 8 +-- src/hotspot/share/opto/parse1.cpp | 52 +++++++++++++------ src/hotspot/share/opto/parse2.cpp | 8 +-- src/hotspot/share/opto/parse3.cpp | 5 +- .../inlinetypes/TestBasicFunctionality.java | 4 +- 9 files changed, 66 insertions(+), 40 deletions(-) diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index 5dd03c5a52d..b7a6f7d8438 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -704,13 +704,13 @@ void Parse::do_call() { set_stack(sp() - nargs, casted_receiver); } - // Scalarize value objects passed into this invocation because we know that they are not larval + // Scalarize value objects passed into this invocation if we know that they are not larval for (int arg_idx = 0; arg_idx < nargs; arg_idx++) { if (arg_can_be_larval(callee, arg_idx)) { continue; } - cast_non_larval(peek(nargs - 1 - arg_idx)); + cast_to_non_larval(peek(nargs - 1 - arg_idx)); } // Note: It's OK to try to inline a virtual call. @@ -888,7 +888,8 @@ void Parse::do_call() { if (cg->method()->is_object_constructor() && receiver != nullptr && gvn().type(receiver)->is_inlinetypeptr()) { InlineTypeNode* non_larval = InlineTypeNode::make_from_oop(this, receiver, gvn().type(receiver)->inline_klass()); - // Relinquish the oop input, we will delay the allocation to the point it is needed + // Relinquish the oop input, we will delay the allocation to the point it is needed, see the + // comments in InlineTypeNode::Ideal for more details non_larval = non_larval->clone_if_required(&gvn(), nullptr); non_larval->set_oop(gvn(), null()); non_larval->set_is_buffered(gvn(), false); diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index c7d4b784a63..d4956280d28 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -1501,13 +1501,9 @@ Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { return cast; // Return casted value } -Node* GraphKit::cast_non_larval(Node* obj) { - if (obj->is_InlineType()) { - return obj; - } - +Node* GraphKit::cast_to_non_larval(Node* obj) { const Type* obj_type = gvn().type(obj); - if (!obj_type->is_inlinetypeptr()) { + if (obj->is_InlineType() || !obj_type->is_inlinetypeptr()) { return obj; } @@ -3441,7 +3437,7 @@ Node* GraphKit::gen_checkcast(Node* obj, Node* superklass, Node* *failure_contro } // Else it must be a non-larval object - obj = cast_non_larval(obj); + obj = cast_to_non_larval(obj); const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve(); const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type(); diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp index 0f06e78ec19..793762110c0 100644 --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -456,7 +456,7 @@ class GraphKit : public Phase { // compilation is larval or not. If such a maybe-larval object is passed into an operation that // does not permit larval objects, we can be sure that it is not larval and scalarize it if it // is a value object. - Node* cast_non_larval(Node* obj); + Node* cast_to_non_larval(Node* obj); // Replace all occurrences of one node by another. void replace_in_map(Node* old, Node* neww); diff --git a/src/hotspot/share/opto/inlinetypenode.cpp b/src/hotspot/share/opto/inlinetypenode.cpp index 1586461993d..f0b01bce794 100644 --- a/src/hotspot/share/opto/inlinetypenode.cpp +++ b/src/hotspot/share/opto/inlinetypenode.cpp @@ -1094,7 +1094,15 @@ Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) { // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an // allocation because the only case we load from a naked CheckCastPP is when we exit a - // constructor of an inline type and we want to relinquish the larval oop there + // constructor of an inline type and we want to relinquish the larval oop there. This has a + // couple of benefits: + // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode. + // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge + // when we try to clone a value object. + // - The buffering, if needed, is delayed until it is required. This new allocation, since it is + // created from an InlineTypeNode, is recognized as not having a unique identity and in the + // future, we can move them around more freely such as hoisting out of loops. This is not true + // for the old allocation since larval value objects do have unique identities. Node* base = is_loaded(phase); if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) { if (oop != base || phase->type(is_buffered) != TypeInt::ONE) { diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index db03537dcae..9a6af6f10e0 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -2983,9 +2983,11 @@ void PhaseMacroExpand::eliminate_macro_nodes() { progress = progress || success; } - // If an allocation is used only in safepoints, elimination of another macro nodes can remove - // all these safepoints, allowing the allocation to be removed. Hence we do igvn to remove - // all the excessive uses. + // Ensure the graph after PhaseMacroExpand::eliminate_macro_nodes is canonical (no igvn + // transformation is pending). If an allocation is used only in safepoints, elimination of + // other macro nodes can remove all these safepoints, allowing the allocation to be removed. + // Hence after igvn we retry removing macro nodes if some progress that has been made in this + // iteration. _igvn.set_delay_transform(false); _igvn.optimize(); if (C->failing()) { diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index c7fac35db42..cde1b412cc7 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -183,6 +183,10 @@ Node* Parse::check_interpreter_type(Node* l, const Type* type, l = null_check_oop(l, &bad_type_ctrl); bad_type_exit->control()->add_req(bad_type_ctrl); } + + // In an OSR compilation, we cannot know if a value object in the incoming state is larval or + // not. As a result, we must pass maybe_larval == true to not eagerly scalarize the result if + // the target type is a value class. l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl, false, true); bad_type_exit->control()->add_req(bad_type_ctrl); } @@ -622,6 +626,12 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) Node* parm = local(i); const Type* t = _gvn.type(parm); if (t->is_inlinetypeptr()) { + // If the parameter is a value object, try to scalarize it if we know that it is not larval. + // There are 2 cases when a parameter may be larval: + // - In an OSR compilation, we do not know if a value object in the incoming state is larval + // or not. We must be conservative and not eagerly scalarize them. + // - In a normal compilation, all parameters are non-larval except the receiver of a + // constructor, which must be a larval object. if (!is_osr_parse() && !(method()->is_object_constructor() && i == 0)) { // Create InlineTypeNode from the oop and replace the parameter Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass()); @@ -1779,31 +1789,41 @@ void Parse::merge_common(Parse::Block* target, int pnum) { t = target->stack_type_at(j - tmp_jvms->stkoff()); } if (t != nullptr && t != Type::BOTTOM) { + // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is + // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the + // merge is an oop, each merge input needs to be either an oop or an buffered + // InlineTypeNode. if (!t->is_inlinetypeptr()) { + // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an + // InlineTypeNode if (n->is_InlineType()) { - // The merge is an oop phi, we need to buffer the inline type map()->set_req(j, n->as_InlineType()->buffer(this)); } } else { - Node* phi = nullptr; - if (target->is_merged()) { - phi = target->start_map()->in(j); - } - if (phi != nullptr && !phi->is_InlineType()) { - if (n->is_InlineType()) { - // The merge is an oop phi, we need to buffer the inline type - map()->set_req(j, n->as_InlineType()->buffer(this)); + // Since the merge is a value object, it can either be an oop or an InlineTypeNode + if (!target->is_merged()) { + // This is the first processed input of the merge. If it is an InlineTypeNode, the + // merge will be an InlineTypeNode. Else, try to scalarize so the merge can be + // scalarized as well. However, we cannot blindly scalarize an inline type oop here + // since it may be larval + if (!n->is_InlineType() && gvn().type(n)->is_zero_type()) { + // Null constant implies that this is not a larval object + map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass())); } } else { - if (!n->is_InlineType()) { - // We cannot blindly expand an inline type here since it may be larval - if (gvn().type(n)->is_zero_type()) { - // Null constant implies that this is not a larval objects - map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass())); - } else if (phi != nullptr && phi->is_InlineType()) { - // Larval oops cannot be merged with non-larval ones + Node* phi = target->start_map()->in(j); + if (phi->is_InlineType()) { + // Larval oops cannot be merged with non-larval ones, and since the merge point is + // non-larval, n must be non-larval as well. As a result, we can scalarize n to merge + // into phi + if (!n->is_InlineType()) { map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass())); } + } else { + // The merge is an oop phi, ensure the input is buffered if it is an InlineTypeNode + if (n->is_InlineType()) { + map()->set_req(j, n->as_InlineType()->buffer(this)); + } } } } diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index 12aa5704c92..6ff8342ad3e 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -3406,7 +3406,7 @@ void Parse::do_one_bytecode() { case Bytecodes::_ireturn: case Bytecodes::_areturn: case Bytecodes::_freturn: - return_current(cast_non_larval(pop())); + return_current(cast_to_non_larval(pop())); break; case Bytecodes::_lreturn: case Bytecodes::_dreturn: @@ -3461,7 +3461,7 @@ void Parse::do_one_bytecode() { // If this is a backwards branch in the bytecodes, add Safepoint maybe_add_safepoint(iter().get_dest()); a = null(); - b = cast_non_larval(pop()); + b = cast_to_non_larval(pop()); if (b->is_InlineType()) { // Null checking a scalarized but nullable inline type. Check the IsInit // input instead of the oop input to avoid keeping buffer allocations alive @@ -3490,8 +3490,8 @@ void Parse::do_one_bytecode() { handle_if_acmp: // If this is a backwards branch in the bytecodes, add Safepoint maybe_add_safepoint(iter().get_dest()); - a = cast_non_larval(pop()); - b = cast_non_larval(pop()); + a = cast_to_non_larval(pop()); + b = cast_to_non_larval(pop()); do_acmp(btest, b, a); break; diff --git a/src/hotspot/share/opto/parse3.cpp b/src/hotspot/share/opto/parse3.cpp index 6503cd1c498..2271ec4e6fb 100644 --- a/src/hotspot/share/opto/parse3.cpp +++ b/src/hotspot/share/opto/parse3.cpp @@ -110,7 +110,7 @@ void Parse::do_field_access(bool is_get, bool is_field) { } void Parse::do_get_xxx(Node* obj, ciField* field) { - obj = cast_non_larval(obj); + obj = cast_to_non_larval(obj); BasicType bt = field->layout_type(); // Does this field have a constant value? If so, just push the value. if (field->is_constant() && !field->is_flat() && @@ -255,8 +255,9 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { return; } } - val = cast_non_larval(val); + val = cast_to_non_larval(val); + // We cannot store into a non-larval object, so obj must not be an InlineTypeNode assert(!obj->is_InlineType(), "InlineTypeNodes are non-larval value objects"); if (field->is_null_free() && field->type()->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) { // Storing to a field of an empty, null-free inline type that is already initialized. Ignore. diff --git a/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestBasicFunctionality.java b/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestBasicFunctionality.java index 97f63480ee9..4f69e015bde 100644 --- a/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestBasicFunctionality.java +++ b/test/hotspot/jtreg/compiler/valhalla/inlinetypes/TestBasicFunctionality.java @@ -649,9 +649,7 @@ public void test27_verifier(RunInfo info) { // Check elimination of redundant value class allocations @Test - // TODO 8332886 Remove the AlwaysIncrementalInline=false condition - @IR(applyIf = {"AlwaysIncrementalInline", "false"}, - counts = {ALLOC, "= 1"}) + @IR(counts = {ALLOC, "= 1"}) public MyValue3 test28(MyValue3[] va) { // Create value object and force allocation MyValue3 vt = MyValue3.create();