< prev index next > src/hotspot/share/opto/macro.cpp
Print this page
} else {
assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
}
mem = mem->in(MemNode::Memory);
} else if (mem->is_ClearArray()) {
+ intptr_t offset;
+ AllocateNode* alloc = AllocateNode::Ideal_allocation(mem->in(3), phase, offset);
+
+ if (alloc == NULL) {
+ return start_mem;
+ }
+
if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
// Can not bypass initialization of the instance
// we are looking.
debug_only(intptr_t offset;)
assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
}
}
}
#ifndef PRODUCT
- if (PrintEliminateAllocations) {
+ if (print_eliminate_allocations()) {
if (can_eliminate) {
tty->print("Scalar ");
if (res == NULL)
alloc->dump();
else
}
#endif
return can_eliminate;
}
+ void PhaseMacroExpand::adjust_safepoint_jvms(SafePointNode* sfpt, Node* res, SafePointScalarObjectNode* sobj) {
+ JVMState *jvms = sfpt->jvms();
+ jvms->set_endoff(sfpt->req());
+
+ // Now make a pass over the debug information replacing any references
+ // to the allocated object with "sobj"
+ int start = jvms->debug_start();
+ int end = jvms->debug_end();
+ sfpt->replace_edges_in_range(res, sobj, start, end);
+ _igvn._worklist.push(sfpt);
+ }
+
// Do scalar replacement.
bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
GrowableArray <SafePointNode *> safepoints_done;
ciKlass* klass = NULL;
}
}
_igvn._worklist.push(sfpt_done);
}
#ifndef PRODUCT
- if (PrintEliminateAllocations) {
+ if (print_eliminate_allocations()) {
if (field != NULL) {
tty->print("=== At SafePoint node %d can't find value of Field: ",
sfpt->_idx);
field->print();
int field_idx = C->get_alias_index(field_addr_type);
field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
}
}
sfpt->add_req(field_val);
}
- JVMState *jvms = sfpt->jvms();
- jvms->set_endoff(sfpt->req());
- // Now make a pass over the debug information replacing any references
- // to the allocated object with "sobj"
- int start = jvms->debug_start();
- int end = jvms->debug_end();
- sfpt->replace_edges_in_range(res, sobj, start, end);
- _igvn._worklist.push(sfpt);
+ adjust_safepoint_jvms(sfpt, res, sobj);
safepoints_done.append_if_missing(sfpt); // keep it for rollback
}
return true;
}
}
assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
_igvn.remove_dead_node(res);
}
+ eliminate_unused_allocation_edges(alloc);
+ }
+
+ void PhaseMacroExpand::eliminate_unused_allocation_edges(CallNode* alloc) {
//
// Process other users of allocation's projections
//
if (_resproj != NULL && _resproj->outcnt() != 0) {
// First disconnect stores captured by Initialize node.
if (_catchallcatchproj != NULL) {
_igvn.replace_node(_catchallcatchproj, C->top());
}
}
+ #define STACK_REG_BUFFER 4
+
+ bool PhaseMacroExpand::stack_allocation_location_representable(int slot_location) {
+ // TODO This is likely not enough as there are values on the stack above the fixed slots
+ // Revist to see if there is a better check
+ OptoReg::Name stack_reg = OptoReg::stack2reg(slot_location + STACK_REG_BUFFER);
+ if (RegMask::can_represent(stack_reg)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ #undef STACK_REG_BUFFER
+
+ int PhaseMacroExpand::next_stack_allocated_object(int num_slots) {
+ int current = C->fixed_slots();
+ int next = current + num_slots;
+ if (!stack_allocation_location_representable(next)) {
+ return -1;
+ }
+ // Keep the toplevel high water mark current:
+ if (C->fixed_slots() < next) C->set_fixed_slots(next);
+ return current;
+ }
+
+ bool PhaseMacroExpand::process_write_barriers_on_stack_allocated_objects(AllocateNode* alloc) {
+ GrowableArray<Node*> barriers;
+ Node *res = alloc->result_cast();
+ assert(res != NULL, "result node must not be null");
+
+ // Find direct barriers on the stack allocated objects.
+ // Those we can simply eliminate.
+ for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
+ Node *use = res->fast_out(i);
+ if (use->Opcode() == Op_CastP2X) {
+ barriers.append_if_missing(use);
+ } else if (use->is_AddP()) {
+ for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
+ Node *addp_out = use->fast_out(j);
+ if (addp_out->Opcode() == Op_CastP2X) {
+ barriers.append_if_missing(addp_out);
+ }
+ }
+ }
+ }
+
+ while (barriers.length() != 0) {
+ eliminate_gc_barrier(barriers.pop());
+ }
+
+ // After removing the direct barriers result may no longer be used
+ if (alloc->result_cast() == NULL) {
+ return true;
+ }
+
+ // Next walk all uses of the allocate to discover the barriers that
+ // might be reachable from our allocate. If the barrier is reachable
+ // from stack allocated object, we unregister it, so that the check
+ // elimination code doesn't run on it.
+ VectorSet visited(Thread::current()->resource_area());
+ GrowableArray<Node*> node_worklist;
+
+ BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
+
+ node_worklist.push(res);
+
+ while(node_worklist.length() != 0) {
+ Node* n = node_worklist.pop();
+
+ if (visited.test_set(n->_idx)) {
+ continue; // already processed
+ }
+
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node *use = n->fast_out(i);
+ if (use->Opcode() == Op_CastP2X) {
+ bs->unregister_potential_barrier_node(use);
+ } else if (use->is_Phi() ||
+ use->is_CheckCastPP() ||
+ use->is_EncodeP() ||
+ use->is_DecodeN() ||
+ use->is_SafePoint() ||
+ use->is_Proj() ||
+ (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
+ // Find barriers beyond our current result
+ node_worklist.push(use);
+ } else if (use->is_Store() && use->Opcode() == Op_StoreP) {
+ if (n != use->in(MemNode::ValueIn)) {
+ continue;
+ }
+ // TODO code copied from escape.cpp::ConnectionGraph::get_addp_base.
+ // Common up this code into a helper
+ Node *memory = use->in(MemNode::Address);
+ if (memory->is_AddP()) {
+ Node *base = memory->in(AddPNode::Base);
+ if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
+ base = memory->in(AddPNode::Address);
+ while (base->is_AddP()) {
+ // Case #6 (unsafe access) may have several chained AddP nodes.
+ assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
+ base = base->in(AddPNode::Address);
+ }
+ if (base->Opcode() == Op_CheckCastPP &&
+ base->bottom_type()->isa_rawptr() &&
+ _igvn.type(base->in(1))->isa_oopptr()) {
+ base = base->in(1); // Case #9
+ }
+ }
+ node_worklist.push(base);
+ }
+ } else if (use->is_AddP() ||
+ (use->is_Load() && use->Opcode() == Op_LoadP)) {
+ // Find barriers for loads
+ node_worklist.push(use);
+ }
+ }
+ }
+ return false;
+ }
+
+ bool PhaseMacroExpand::register_stack_allocated_object_with_safepoints(AllocateNode* alloc, Node* stack_oop) {
+ VectorSet visited(Thread::current()->resource_area());
+ GrowableArray<Node*> node_worklist;
+ GrowableArray<SafePointNode*> temp;
+ Dict* safepoint_map = new Dict(cmpkey, hashkey);
+ bool found_non_direct_safepoint = false;
+ Node *res = alloc->result_cast();
+
+ assert(res != NULL, "result node must not be null");
+
+ node_worklist.push(res);
+
+ while(node_worklist.length() != 0) {
+ Node* n = node_worklist.pop();
+
+ if (visited.test_set(n->_idx)) {
+ continue; // already processed
+ }
+
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node *use = n->fast_out(i);
+ if (use->is_SafePoint()) {
+ SafePointNode* sfpt = use->as_SafePoint();
+ if (sfpt->jvms() != NULL) {
+ temp.push(sfpt);
+ }
+ } else if (use->is_Phi() ||
+ use->is_CheckCastPP() ||
+ use->is_EncodeP() ||
+ use->is_DecodeN() ||
+ use->is_Proj() ||
+ (use->Opcode() == Op_CastP2X) ||
+ use->is_MergeMem() ||
+ use->is_MemBar() ||
+ (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
+ // Find safepoints beyond our current result
+ node_worklist.push(use);
+ } else if (use->is_Store() && use->Opcode() == Op_StoreP) {
+ node_worklist.push(use);
+ if (n != use->in(MemNode::ValueIn)) {
+ continue;
+ }
+ // TODO code copied from escape.cpp::ConnectionGraph::get_addp_base.
+ // Common up this code into a helper
+ Node *memory = use->in(MemNode::Address);
+ if (memory->is_AddP()) {
+ Node *base = memory->in(AddPNode::Base);
+ if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
+ base = memory->in(AddPNode::Address);
+ while (base->is_AddP()) {
+ // Case #6 (unsafe access) may have several chained AddP nodes.
+ assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
+ base = base->in(AddPNode::Address);
+ }
+ if (base->Opcode() == Op_CheckCastPP &&
+ base->bottom_type()->isa_rawptr() &&
+ _igvn.type(base->in(1))->isa_oopptr()) {
+ base = base->in(1); // Case #9
+ }
+ }
+ node_worklist.push(base);
+ }
+ } else if (use->is_AddP() ||
+ (use->is_Load() && use->Opcode() == Op_LoadP)) {
+ // Find safepoints for arrays
+ node_worklist.push(use);
+ }
+ }
+
+ while (temp.length() != 0) {
+ SafePointNode* sfpt = temp.pop();
+ if (res != n) {
+ found_non_direct_safepoint = true;
+ }
+ handle_safepoint_for_stack_allocation(safepoint_map, alloc, stack_oop, n, sfpt);
+ }
+ }
+
+ return found_non_direct_safepoint;
+ }
+
+ void PhaseMacroExpand::handle_safepoint_for_stack_allocation(Dict* safepoint_map, AllocateNode* alloc, Node* oop_node, Node* parent, SafePointNode* sfpt) {
+ Node* res = alloc->result_cast();
+ assert(res->is_CheckCastPP(), "unexpected AllocateNode result");
+ const TypeOopPtr* res_type = _igvn.type(res)->isa_oopptr();
+ ciKlass* klass = res_type->klass();
+ int nfields = 0;
+ if (res_type->isa_instptr()) {
+ // find the fields of the class which will be needed for safepoint debug information
+ assert(klass->is_instance_klass(), "must be an instance klass.");
+ ciInstanceKlass* iklass = klass->as_instance_klass();
+ nfields = iklass->nof_nonstatic_fields();
+ } else {
+ // find the array's elements which will be needed for safepoint debug information
+ nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
+ }
+
+ assert(nfields >= 0, "Sanity");
+
+ SafePointScalarObjectNode* sobj = NULL;
+ Node *result = (Node *)(*safepoint_map)[sfpt];
+ if (result != NULL) {
+ assert(result->is_SafePointScalarObject(), "Has to be a safepointscalarobject");
+ sobj = result->as_SafePointScalarObject();
+ } else {
+ //
+ // Process the safepoint uses
+ //
+ Node* mem = sfpt->memory();
+ Node* ctl = sfpt->control();
+ assert(sfpt->jvms() != NULL, "missed JVMS");
+ // Fields of scalar objs are referenced only at the end
+ // of regular debuginfo at the last (youngest) JVMS.
+ // Record relative start index.
+ uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
+ sobj = new SafePointScalarObjectNode(res_type,
+ #ifdef ASSERT
+ alloc,
+ #endif
+ first_ind, nfields);
+ sobj->init_req(0, C->root());
+ sobj->add_req(oop_node);
+ transform_later(sobj);
+ sobj->set_stack_allocated(true);
+
+ JVMState *jvms = sfpt->jvms();
+ sfpt->add_req(sobj);
+ jvms->set_endoff(sfpt->req());
+ _igvn._worklist.push(sfpt);
+ safepoint_map->Insert(sfpt, sobj);
+ }
+
+ if (parent == res) {
+ adjust_safepoint_jvms(sfpt, parent, sobj);
+ }
+ }
+
+ bool PhaseMacroExpand::can_stack_allocate(AllocateNode* alloc, Node* res, intptr_t size_of_object) {
+ return ((res != NULL) && alloc->_is_stack_allocateable && (size_of_object != -1) && should_stack_allocate());
+ }
+
+ void PhaseMacroExpand::estimate_stack_allocation_size(AllocateNode* alloc) {
+ Node* res = alloc->result_cast();
+ Node* size_in_bytes = alloc->in(AllocateNode::AllocSize);
+ intptr_t size_of_object = _igvn.find_intptr_t_con(size_in_bytes, -1);
+
+ if (alloc->_is_scalar_replaceable && !alloc->_is_stack_allocateable) {
+ C->set_fail_stack_allocation_with_references(true);
+ return;
+ }
+
+ bool can_sa = can_stack_allocate(alloc, res, size_of_object);
+ if (alloc->_is_stack_allocateable && !can_sa) {
+ // If we marked the object as SA in EA and now we can not fail
+ C->set_fail_stack_allocation_with_references(true);
+ return;
+ }
+
+ if (!alloc->_is_stack_allocateable) {
+ // If we can not SA because EA said no then no need to count the size
+ return;
+ }
+
+ int current = C->stack_allocated_slots();
+ C->set_stack_allocated_slots(current + (size_of_object >> LogBytesPerInt));
+ }
+
+ // Do stack allocation
+ bool PhaseMacroExpand::stack_allocation(AllocateNode* alloc) {
+ Node* klass = alloc->in(AllocateNode::KlassNode);
+ const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
+ Node *length = (alloc->is_AllocateArray()) ? alloc->in(AllocateNode::ALength) : NULL;
+ Node* size_in_bytes = alloc->in(AllocateNode::AllocSize);
+ Node* res = alloc->result_cast();
+ Node* ctrl = alloc->in(TypeFunc::Control);
+ Node* mem = alloc->in(TypeFunc::Memory);
+
+ intptr_t size_of_object = _igvn.find_intptr_t_con(size_in_bytes, -1);
+
+ if (!can_stack_allocate(alloc, res, size_of_object)) {
+ return false;
+ }
+
+ if (C->fail_stack_allocation_with_references()) {
+ if (alloc->_is_referenced_stack_allocation) {
+ #ifndef PRODUCT
+ if (print_stack_allocation()) {
+ tty->print_cr("---- Avoiding stack allocation on node %d because it is referenced by another alloc and SCR/SA failed in method %s", alloc->_idx, _igvn.C->method()->get_Method()->name_and_sig_as_C_string());
+ }
+ #endif
+ return false;
+ }
+ }
+
+ int next_stack_allocation_slot = next_stack_allocated_object(size_of_object >> LogBytesPerInt);
+ if (next_stack_allocation_slot < 0) {
+ #ifndef PRODUCT
+ if (print_stack_allocation()) {
+ tty->print_cr("---- Avoiding stack allocation on node %d with size %ld for method %s because of insufficient stack space", alloc->_idx, size_of_object, _igvn.C->method()->get_Method()->name_and_sig_as_C_string());
+ }
+ #endif
+ return false;
+ }
+
+ if (mem->is_MergeMem()) {
+ mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+ }
+
+ extract_call_projections(alloc);
+
+ // Process barriers as this may result in result_cast() becoming NULL
+ if (process_write_barriers_on_stack_allocated_objects(alloc)) {
+ #ifndef PRODUCT
+ if (print_stack_allocation()) {
+ tty->print_cr("---- Allocation %d result_cast is no longer used so yank the alloc instead", alloc->_idx);
+ }
+ #endif
+ InitializeNode* init = alloc->initialization();
+ if (init != NULL) {
+ init->remove(&_igvn);
+ }
+ yank_alloc_node(alloc);
+ return true;
+ }
+
+ assert(res == alloc->result_cast(), "values much match");
+
+ Node* stack_oop = transform_later(new BoxLockNode(next_stack_allocation_slot));
+ Node* new_raw_mem = initialize_object(alloc, ctrl, mem, stack_oop, klass, length, size_in_bytes);
+
+ bool non_direct_safepoints = register_stack_allocated_object_with_safepoints(alloc, stack_oop);
+ if (non_direct_safepoints) {
+ if (length != NULL) {
+ stack_allocation_init_array_length_on_entry(alloc, length, stack_oop);
+ }
+ #ifndef PRODUCT
+ stack_allocation_clear_object_data(alloc, stack_oop);
+ #endif
+ }
+
+ _igvn.replace_node(_resproj, stack_oop);
+
+ for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
+ Node *use = _memproj_fallthrough->fast_out(i);
+ _igvn.rehash_node_delayed(use);
+ imax -= replace_input(use, _memproj_fallthrough, new_raw_mem);
+ // back up iterator
+ --i;
+ }
+
+ eliminate_unused_allocation_edges(alloc);
+
+ assert(_resproj->outcnt() == 0, "all uses of the original allocate result projection must be deleted");
+ _igvn.remove_dead_node(_resproj);
+
+ #ifndef PRODUCT
+ if (print_stack_allocation()) {
+ tty->print_cr("++++ Performing stack allocation on node %d with size %ld for method %s", alloc->_idx, size_of_object, _igvn.C->method()->get_Method()->name_and_sig_as_C_string());
+ }
+ #endif
+
+ return true;
+ }
+
+ /*
+ Initialize stack allocated array length on entry to the method.
+ This is required for de-opt so it can verify array lengths and so
+ that GCs that happen after deopt will not crash for uninitialized
+ arrays.
+ */
+ void PhaseMacroExpand::stack_allocation_init_array_length_on_entry(AllocateNode *alloc, Node *length, Node *stack_oop) {
+ Node* start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
+ assert(length != NULL, "Length can not be NULL");
+
+ if (C->is_osr_compilation()) {
+ for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
+ Node *child = start_mem->fast_out(i);
+ if (child->is_CallLeaf() && child->as_CallLeaf()->is_call_to_osr_migration_end()) {
+ CallLeafNode* call_leaf = child->as_CallLeaf();
+ start_mem = call_leaf->proj_out_or_null(TypeFunc::Memory);
+ break;
+ }
+ }
+ }
+ assert(start_mem != NULL, "Must find start mem");
+ Node* init_mem = start_mem;
+
+ // need to set the length field for arrays for deopt
+ init_mem = make_store(C->start()->proj_out_or_null(TypeFunc::Control),
+ init_mem, stack_oop, arrayOopDesc::length_offset_in_bytes(),
+ length, T_INT);
+
+
+ if (init_mem != start_mem) {
+ for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
+ Node *use = start_mem->fast_out(i);
+ // Compressed refs can make a new store which adjusts the start
+ // offet and it's sourced by start_mem. Make sure we don't make cycle.
+ if (use == init_mem || (init_mem->find_edge(use) >= 0)) {
+ continue;
+ }
+ _igvn.rehash_node_delayed(use);
+ imax -= replace_input(use, start_mem, init_mem);
+ // back up iterator
+ --i;
+ }
+ }
+ }
+
+ #ifndef PRODUCT
+ /*
+ Initialize SA object on entry to the method to ensure it is initialized
+ before safepoints which may only be reachable through phis and the object
+ may not actually have been initialized.
+ */
+ void PhaseMacroExpand::stack_allocation_clear_object_data(AllocateNode *alloc, Node *stack_oop) {
+ Node* klass = alloc->in(AllocateNode::KlassNode);
+ Node *length = (alloc->is_AllocateArray()) ? alloc->in(AllocateNode::ALength) : NULL;
+ Node* size_in_bytes = alloc->in(AllocateNode::AllocSize);
+ Node* start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
+ if (C->is_osr_compilation()) {
+ for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
+ Node *child = start_mem->fast_out(i);
+ if (child->is_CallLeaf() && child->as_CallLeaf()->is_call_to_osr_migration_end()) {
+ CallLeafNode* call_leaf = child->as_CallLeaf();
+ start_mem = call_leaf->proj_out_or_null(TypeFunc::Memory);
+ break;
+ }
+ }
+ }
+ assert(start_mem != NULL, "Must find start mem");
+ int header_size = alloc->minimum_header_size();
+ Node* init_mem = start_mem;
+ if (length != NULL) {
+ // conservatively small header size:
+ header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+ ciKlass* k = _igvn.type(klass)->is_klassptr()->klass();
+ if (k->is_array_klass()) { // we know the exact header size in most cases:
+ header_size = Klass::layout_helper_header_size(k->layout_helper());
+ }
+ }
+ init_mem = ClearArrayNode::clear_memory(C->start()->proj_out_or_null(TypeFunc::Control),
+ init_mem, stack_oop, header_size, size_in_bytes,
+ &_igvn);
+ if (init_mem != start_mem) {
+ for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
+ Node *use = start_mem->fast_out(i);
+ // Compressed refs can make a new store which adjusts the start
+ // offet and it's sourced by start_mem. Make sure we don't make cycle.
+ if (use == init_mem || (init_mem->find_edge(use) >= 0)) {
+ continue;
+ }
+ _igvn.rehash_node_delayed(use);
+ imax -= replace_input(use, start_mem, init_mem);
+ // back up iterator
+ --i;
+ }
+ }
+ }
+ #endif
+
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
// Don't do scalar replacement if the frame can be popped by JVMTI:
// if reallocation fails during deoptimization we'll pop all
// interpreter frames for this compiled frame and that won't play
// nice with JVMTI popframe.
}
process_users_of_allocation(alloc);
#ifndef PRODUCT
- if (PrintEliminateAllocations) {
+ if (print_eliminate_allocations()) {
if (alloc->is_AllocateArray())
tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
else
tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
}
}
process_users_of_allocation(boxing);
#ifndef PRODUCT
- if (PrintEliminateAllocations) {
+ if (print_eliminate_allocations()) {
tty->print("++++ Eliminated: %d ", boxing->_idx);
boxing->method()->print_short_name(tty);
tty->cr();
}
#endif
_igvn.optimize();
if (C->failing()) return true;
_igvn.set_delay_transform(true);
}
+ for (int i = C->macro_count(); i > 0; i --) {
+ Node * n = C->macro_node(i-1);
+ assert(n->is_macro(), "only macro nodes expected here");
+
+ switch (n->class_id()) {
+ case Node::Class_Allocate:
+ case Node::Class_AllocateArray:
+ estimate_stack_allocation_size(n->as_Allocate());
+ break;
+ default:
+ assert(false, "unknown node type in macro list");
+ }
+ }
+
+ // Check to see if stack allocation size is too large before macro expansion
+ // so we can reject required stack allocations
+ if (!stack_allocation_location_representable(C->fixed_slots() + C->stack_allocated_slots())) {
+ C->set_fail_stack_allocation_with_references(true);
+ }
+
// All nodes except Allocate nodes are expanded now. There could be
// new optimization opportunities (such as folding newly created
// load from a just allocated object). Run IGVN.
// expand "macro" nodes
if (C->check_node_count(300, "out of nodes before macro expansion")) {
return true;
}
switch (n->class_id()) {
case Node::Class_Allocate:
- expand_allocate(n->as_Allocate());
+ if (!stack_allocation(n->as_Allocate())) {
+ expand_allocate(n->as_Allocate());
+ }
break;
case Node::Class_AllocateArray:
- expand_allocate_array(n->as_AllocateArray());
+ if (!stack_allocation(n->as_AllocateArray())) {
+ expand_allocate_array(n->as_AllocateArray());
+ }
break;
default:
assert(false, "unknown node type in macro list");
}
assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
< prev index next >