< prev index next >

src/hotspot/share/opto/macro.cpp

Print this page

 286         mem = in->in(TypeFunc::Memory);
 287       } else {
 288         assert(false, "unexpected projection");
 289       }
 290     } else if (mem->is_Store()) {
 291       const TypePtr* atype = mem->as_Store()->adr_type();
 292       int adr_idx = phase->C->get_alias_index(atype);
 293       if (adr_idx == alias_idx) {
 294         assert(atype->isa_oopptr(), "address type must be oopptr");
 295         int adr_offset = atype->offset();
 296         uint adr_iid = atype->is_oopptr()->instance_id();
 297         // Array elements references have the same alias_idx
 298         // but different offset and different instance_id.
 299         if (adr_offset == offset && adr_iid == alloc->_idx)
 300           return mem;
 301       } else {
 302         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 303       }
 304       mem = mem->in(MemNode::Memory);
 305     } else if (mem->is_ClearArray()) {







 306       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 307         // Can not bypass initialization of the instance
 308         // we are looking.
 309         debug_only(intptr_t offset;)
 310         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 311         InitializeNode* init = alloc->as_Allocate()->initialization();
 312         // We are looking for stored value, return Initialize node
 313         // or memory edge from Allocate node.
 314         if (init != NULL)
 315           return init;
 316         else
 317           return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
 318       }
 319       // Otherwise skip it (the call updated 'mem' value).
 320     } else if (mem->Opcode() == Op_SCMemProj) {
 321       mem = mem->in(0);
 322       Node* adr = NULL;
 323       if (mem->is_LoadStore()) {
 324         adr = mem->in(MemNode::Address);
 325       } else {

 710           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 711             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 712           } else {
 713             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 714           }
 715           DEBUG_ONLY(disq_node = use;)
 716         } else {
 717           if (use->Opcode() == Op_Return) {
 718             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 719           }else {
 720             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 721           }
 722           DEBUG_ONLY(disq_node = use;)
 723         }
 724         can_eliminate = false;
 725       }
 726     }
 727   }
 728 
 729 #ifndef PRODUCT
 730   if (PrintEliminateAllocations) {
 731     if (can_eliminate) {
 732       tty->print("Scalar ");
 733       if (res == NULL)
 734         alloc->dump();
 735       else
 736         res->dump();
 737     } else if (alloc->_is_scalar_replaceable) {
 738       tty->print("NotScalar (%s)", fail_eliminate);
 739       if (res == NULL)
 740         alloc->dump();
 741       else
 742         res->dump();
 743 #ifdef ASSERT
 744       if (disq_node != NULL) {
 745           tty->print("  >>>> ");
 746           disq_node->dump();
 747       }
 748 #endif /*ASSERT*/
 749     }
 750   }
 751 #endif
 752   return can_eliminate;
 753 }
 754 












 755 // Do scalar replacement.
 756 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 757   GrowableArray <SafePointNode *> safepoints_done;
 758 
 759   ciKlass* klass = NULL;
 760   ciInstanceKlass* iklass = NULL;
 761   int nfields = 0;
 762   int array_base = 0;
 763   int element_size = 0;
 764   BasicType basic_elem_type = T_ILLEGAL;
 765   ciType* elem_type = NULL;
 766 
 767   Node* res = alloc->result_cast();
 768   assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
 769   const TypeOopPtr* res_type = NULL;
 770   if (res != NULL) { // Could be NULL when there are no users
 771     res_type = _igvn.type(res)->isa_oopptr();
 772   }
 773 
 774   if (res != NULL) {

 867           }
 868           JVMState *jvms = sfpt_done->jvms();
 869           jvms->set_endoff(sfpt_done->req());
 870           // Now make a pass over the debug information replacing any references
 871           // to SafePointScalarObjectNode with the allocated object.
 872           int start = jvms->debug_start();
 873           int end   = jvms->debug_end();
 874           for (int i = start; i < end; i++) {
 875             if (sfpt_done->in(i)->is_SafePointScalarObject()) {
 876               SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
 877               if (scobj->first_index(jvms) == sfpt_done->req() &&
 878                   scobj->n_fields() == (uint)nfields) {
 879                 assert(scobj->alloc() == alloc, "sanity");
 880                 sfpt_done->set_req(i, res);
 881               }
 882             }
 883           }
 884           _igvn._worklist.push(sfpt_done);
 885         }
 886 #ifndef PRODUCT
 887         if (PrintEliminateAllocations) {
 888           if (field != NULL) {
 889             tty->print("=== At SafePoint node %d can't find value of Field: ",
 890                        sfpt->_idx);
 891             field->print();
 892             int field_idx = C->get_alias_index(field_addr_type);
 893             tty->print(" (alias_idx=%d)", field_idx);
 894           } else { // Array's element
 895             tty->print("=== At SafePoint node %d can't find value of array element [%d]",
 896                        sfpt->_idx, j);
 897           }
 898           tty->print(", which prevents elimination of: ");
 899           if (res == NULL)
 900             alloc->dump();
 901           else
 902             res->dump();
 903         }
 904 #endif
 905         return false;
 906       }
 907       if (UseCompressedOops && field_type->isa_narrowoop()) {
 908         // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 909         // to be able scalar replace the allocation.
 910         if (field_val->is_EncodeP()) {
 911           field_val = field_val->in(1);
 912         } else {
 913           field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
 914         }
 915       }
 916       sfpt->add_req(field_val);
 917     }
 918     JVMState *jvms = sfpt->jvms();
 919     jvms->set_endoff(sfpt->req());
 920     // Now make a pass over the debug information replacing any references
 921     // to the allocated object with "sobj"
 922     int start = jvms->debug_start();
 923     int end   = jvms->debug_end();
 924     sfpt->replace_edges_in_range(res, sobj, start, end);
 925     _igvn._worklist.push(sfpt);
 926     safepoints_done.append_if_missing(sfpt); // keep it for rollback
 927   }
 928   return true;
 929 }
 930 
 931 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
 932   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
 933   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
 934   if (ctl_proj != NULL) {
 935     igvn.replace_node(ctl_proj, n->in(0));
 936   }
 937   if (mem_proj != NULL) {
 938     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
 939   }
 940 }
 941 
 942 // Process users of eliminated allocation.
 943 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
 944   Node* res = alloc->result_cast();
 945   if (res != NULL) {

1001           // Disconnect src right away: it can help find new
1002           // opportunities for allocation elimination
1003           Node* src = ac->in(ArrayCopyNode::Src);
1004           ac->replace_edge(src, top());
1005           // src can be top at this point if src and dest of the
1006           // arraycopy were the same
1007           if (src->outcnt() == 0 && !src->is_top()) {
1008             _igvn.remove_dead_node(src);
1009           }
1010         }
1011         _igvn._worklist.push(ac);
1012       } else {
1013         eliminate_gc_barrier(use);
1014       }
1015       j -= (oc1 - res->outcnt());
1016     }
1017     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1018     _igvn.remove_dead_node(res);
1019   }
1020 




1021   //
1022   // Process other users of allocation's projections
1023   //
1024   if (_resproj != NULL && _resproj->outcnt() != 0) {
1025     // First disconnect stores captured by Initialize node.
1026     // If Initialize node is eliminated first in the following code,
1027     // it will kill such stores and DUIterator_Last will assert.
1028     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
1029       Node *use = _resproj->fast_out(j);
1030       if (use->is_AddP()) {
1031         // raw memory addresses used only by the initialization
1032         _igvn.replace_node(use, C->top());
1033         --j; --jmax;
1034       }
1035     }
1036     for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
1037       Node *use = _resproj->last_out(j);
1038       uint oc1 = _resproj->outcnt();
1039       if (use->is_Initialize()) {
1040         // Eliminate Initialize node.

1069   if (_fallthroughcatchproj != NULL) {
1070     _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
1071   }
1072   if (_memproj_fallthrough != NULL) {
1073     _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
1074   }
1075   if (_memproj_catchall != NULL) {
1076     _igvn.replace_node(_memproj_catchall, C->top());
1077   }
1078   if (_ioproj_fallthrough != NULL) {
1079     _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
1080   }
1081   if (_ioproj_catchall != NULL) {
1082     _igvn.replace_node(_ioproj_catchall, C->top());
1083   }
1084   if (_catchallcatchproj != NULL) {
1085     _igvn.replace_node(_catchallcatchproj, C->top());
1086   }
1087 }
1088 


































































































































































































































































































































































































































































































1089 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1090   // Don't do scalar replacement if the frame can be popped by JVMTI:
1091   // if reallocation fails during deoptimization we'll pop all
1092   // interpreter frames for this compiled frame and that won't play
1093   // nice with JVMTI popframe.
1094   if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
1095     return false;
1096   }
1097   Node* klass = alloc->in(AllocateNode::KlassNode);
1098   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1099   Node* res = alloc->result_cast();
1100   // Eliminate boxing allocations which are not used
1101   // regardless scalar replacable status.
1102   bool boxing_alloc = C->eliminate_boxing() &&
1103                       tklass->klass()->is_instance_klass()  &&
1104                       tklass->klass()->as_instance_klass()->is_box_klass();
1105   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
1106     return false;
1107   }
1108 

1125 
1126   if (!scalar_replacement(alloc, safepoints)) {
1127     return false;
1128   }
1129 
1130   CompileLog* log = C->log();
1131   if (log != NULL) {
1132     log->head("eliminate_allocation type='%d'",
1133               log->identify(tklass->klass()));
1134     JVMState* p = alloc->jvms();
1135     while (p != NULL) {
1136       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1137       p = p->caller();
1138     }
1139     log->tail("eliminate_allocation");
1140   }
1141 
1142   process_users_of_allocation(alloc);
1143 
1144 #ifndef PRODUCT
1145   if (PrintEliminateAllocations) {
1146     if (alloc->is_AllocateArray())
1147       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1148     else
1149       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1150   }
1151 #endif
1152 
1153   return true;
1154 }
1155 
1156 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1157   // EA should remove all uses of non-escaping boxing node.
1158   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) {
1159     return false;
1160   }
1161 
1162   assert(boxing->result_cast() == NULL, "unexpected boxing node result");
1163 
1164   extract_call_projections(boxing);
1165 
1166   const TypeTuple* r = boxing->tf()->range();
1167   assert(r->cnt() > TypeFunc::Parms, "sanity");
1168   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1169   assert(t != NULL, "sanity");
1170 
1171   CompileLog* log = C->log();
1172   if (log != NULL) {
1173     log->head("eliminate_boxing type='%d'",
1174               log->identify(t->klass()));
1175     JVMState* p = boxing->jvms();
1176     while (p != NULL) {
1177       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1178       p = p->caller();
1179     }
1180     log->tail("eliminate_boxing");
1181   }
1182 
1183   process_users_of_allocation(boxing);
1184 
1185 #ifndef PRODUCT
1186   if (PrintEliminateAllocations) {
1187     tty->print("++++ Eliminated: %d ", boxing->_idx);
1188     boxing->method()->print_short_name(tty);
1189     tty->cr();
1190   }
1191 #endif
1192 
1193   return true;
1194 }
1195 
1196 //---------------------------set_eden_pointers-------------------------
1197 void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
1198   if (UseTLAB) {                // Private allocation: load from TLS
1199     Node* thread = transform_later(new ThreadLocalNode());
1200     int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
1201     int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
1202     eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
1203     eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
1204   } else {                      // Shared allocation: load from globals
1205     CollectedHeap* ch = Universe::heap();
1206     address top_adr = (address)ch->top_addr();

2762       assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
2763       break;
2764     case Node::Class_SubTypeCheck:
2765       expand_subtypecheck_node(n->as_SubTypeCheck());
2766       assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
2767       break;
2768     default:
2769       assert(false, "unknown node type in macro list");
2770     }
2771     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
2772     if (C->failing())  return true;
2773 
2774     // Clean up the graph so we're less likely to hit the maximum node
2775     // limit
2776     _igvn.set_delay_transform(false);
2777     _igvn.optimize();
2778     if (C->failing())  return true;
2779     _igvn.set_delay_transform(true);
2780   }
2781 




















2782   // All nodes except Allocate nodes are expanded now. There could be
2783   // new optimization opportunities (such as folding newly created
2784   // load from a just allocated object). Run IGVN.
2785 
2786   // expand "macro" nodes
2787   // nodes are removed from the macro list as they are processed
2788   while (C->macro_count() > 0) {
2789     int macro_count = C->macro_count();
2790     Node * n = C->macro_node(macro_count-1);
2791     assert(n->is_macro(), "only macro nodes expected here");
2792     if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
2793       // node is unreachable, so don't try to expand it
2794       C->remove_macro_node(n);
2795       continue;
2796     }
2797     // Make sure expansion will not cause node limit to be exceeded.
2798     // Worst case is a macro node gets expanded into about 200 nodes.
2799     // Allow 50% more for optimization.
2800     if (C->check_node_count(300, "out of nodes before macro expansion")) {
2801       return true;
2802     }
2803     switch (n->class_id()) {
2804     case Node::Class_Allocate:
2805       expand_allocate(n->as_Allocate());


2806       break;
2807     case Node::Class_AllocateArray:
2808       expand_allocate_array(n->as_AllocateArray());


2809       break;
2810     default:
2811       assert(false, "unknown node type in macro list");
2812     }
2813     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
2814     if (C->failing())  return true;
2815 
2816     // Clean up the graph so we're less likely to hit the maximum node
2817     // limit
2818     _igvn.set_delay_transform(false);
2819     _igvn.optimize();
2820     if (C->failing())  return true;
2821     _igvn.set_delay_transform(true);
2822   }
2823 
2824   _igvn.set_delay_transform(false);
2825   return false;
2826 }

 286         mem = in->in(TypeFunc::Memory);
 287       } else {
 288         assert(false, "unexpected projection");
 289       }
 290     } else if (mem->is_Store()) {
 291       const TypePtr* atype = mem->as_Store()->adr_type();
 292       int adr_idx = phase->C->get_alias_index(atype);
 293       if (adr_idx == alias_idx) {
 294         assert(atype->isa_oopptr(), "address type must be oopptr");
 295         int adr_offset = atype->offset();
 296         uint adr_iid = atype->is_oopptr()->instance_id();
 297         // Array elements references have the same alias_idx
 298         // but different offset and different instance_id.
 299         if (adr_offset == offset && adr_iid == alloc->_idx)
 300           return mem;
 301       } else {
 302         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 303       }
 304       mem = mem->in(MemNode::Memory);
 305     } else if (mem->is_ClearArray()) {
 306       intptr_t offset;
 307       AllocateNode* alloc = AllocateNode::Ideal_allocation(mem->in(3), phase, offset);
 308 
 309       if (alloc == NULL) {
 310         return start_mem;
 311       }
 312 
 313       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 314         // Can not bypass initialization of the instance
 315         // we are looking.
 316         debug_only(intptr_t offset;)
 317         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 318         InitializeNode* init = alloc->as_Allocate()->initialization();
 319         // We are looking for stored value, return Initialize node
 320         // or memory edge from Allocate node.
 321         if (init != NULL)
 322           return init;
 323         else
 324           return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
 325       }
 326       // Otherwise skip it (the call updated 'mem' value).
 327     } else if (mem->Opcode() == Op_SCMemProj) {
 328       mem = mem->in(0);
 329       Node* adr = NULL;
 330       if (mem->is_LoadStore()) {
 331         adr = mem->in(MemNode::Address);
 332       } else {

 717           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 718             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 719           } else {
 720             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 721           }
 722           DEBUG_ONLY(disq_node = use;)
 723         } else {
 724           if (use->Opcode() == Op_Return) {
 725             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 726           }else {
 727             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 728           }
 729           DEBUG_ONLY(disq_node = use;)
 730         }
 731         can_eliminate = false;
 732       }
 733     }
 734   }
 735 
 736 #ifndef PRODUCT
 737   if (print_eliminate_allocations()) {
 738     if (can_eliminate) {
 739       tty->print("Scalar ");
 740       if (res == NULL)
 741         alloc->dump();
 742       else
 743         res->dump();
 744     } else if (alloc->_is_scalar_replaceable) {
 745       tty->print("NotScalar (%s)", fail_eliminate);
 746       if (res == NULL)
 747         alloc->dump();
 748       else
 749         res->dump();
 750 #ifdef ASSERT
 751       if (disq_node != NULL) {
 752           tty->print("  >>>> ");
 753           disq_node->dump();
 754       }
 755 #endif /*ASSERT*/
 756     }
 757   }
 758 #endif
 759   return can_eliminate;
 760 }
 761 
 762 void PhaseMacroExpand::adjust_safepoint_jvms(SafePointNode* sfpt, Node* res, SafePointScalarObjectNode* sobj) {
 763   JVMState *jvms = sfpt->jvms();
 764   jvms->set_endoff(sfpt->req());
 765 
 766   // Now make a pass over the debug information replacing any references
 767   // to the allocated object with "sobj"
 768   int start = jvms->debug_start();
 769   int end   = jvms->debug_end();
 770   sfpt->replace_edges_in_range(res, sobj, start, end);
 771   _igvn._worklist.push(sfpt);
 772 }
 773 
 774 // Do scalar replacement.
 775 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 776   GrowableArray <SafePointNode *> safepoints_done;
 777 
 778   ciKlass* klass = NULL;
 779   ciInstanceKlass* iklass = NULL;
 780   int nfields = 0;
 781   int array_base = 0;
 782   int element_size = 0;
 783   BasicType basic_elem_type = T_ILLEGAL;
 784   ciType* elem_type = NULL;
 785 
 786   Node* res = alloc->result_cast();
 787   assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
 788   const TypeOopPtr* res_type = NULL;
 789   if (res != NULL) { // Could be NULL when there are no users
 790     res_type = _igvn.type(res)->isa_oopptr();
 791   }
 792 
 793   if (res != NULL) {

 886           }
 887           JVMState *jvms = sfpt_done->jvms();
 888           jvms->set_endoff(sfpt_done->req());
 889           // Now make a pass over the debug information replacing any references
 890           // to SafePointScalarObjectNode with the allocated object.
 891           int start = jvms->debug_start();
 892           int end   = jvms->debug_end();
 893           for (int i = start; i < end; i++) {
 894             if (sfpt_done->in(i)->is_SafePointScalarObject()) {
 895               SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
 896               if (scobj->first_index(jvms) == sfpt_done->req() &&
 897                   scobj->n_fields() == (uint)nfields) {
 898                 assert(scobj->alloc() == alloc, "sanity");
 899                 sfpt_done->set_req(i, res);
 900               }
 901             }
 902           }
 903           _igvn._worklist.push(sfpt_done);
 904         }
 905 #ifndef PRODUCT
 906         if (print_eliminate_allocations()) {
 907           if (field != NULL) {
 908             tty->print("=== At SafePoint node %d can't find value of Field: ",
 909                        sfpt->_idx);
 910             field->print();
 911             int field_idx = C->get_alias_index(field_addr_type);
 912             tty->print(" (alias_idx=%d)", field_idx);
 913           } else { // Array's element
 914             tty->print("=== At SafePoint node %d can't find value of array element [%d]",
 915                        sfpt->_idx, j);
 916           }
 917           tty->print(", which prevents elimination of: ");
 918           if (res == NULL)
 919             alloc->dump();
 920           else
 921             res->dump();
 922         }
 923 #endif
 924         return false;
 925       }
 926       if (UseCompressedOops && field_type->isa_narrowoop()) {
 927         // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 928         // to be able scalar replace the allocation.
 929         if (field_val->is_EncodeP()) {
 930           field_val = field_val->in(1);
 931         } else {
 932           field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
 933         }
 934       }
 935       sfpt->add_req(field_val);
 936     }
 937     adjust_safepoint_jvms(sfpt, res, sobj);







 938     safepoints_done.append_if_missing(sfpt); // keep it for rollback
 939   }
 940   return true;
 941 }
 942 
 943 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
 944   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
 945   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
 946   if (ctl_proj != NULL) {
 947     igvn.replace_node(ctl_proj, n->in(0));
 948   }
 949   if (mem_proj != NULL) {
 950     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
 951   }
 952 }
 953 
 954 // Process users of eliminated allocation.
 955 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
 956   Node* res = alloc->result_cast();
 957   if (res != NULL) {

1013           // Disconnect src right away: it can help find new
1014           // opportunities for allocation elimination
1015           Node* src = ac->in(ArrayCopyNode::Src);
1016           ac->replace_edge(src, top());
1017           // src can be top at this point if src and dest of the
1018           // arraycopy were the same
1019           if (src->outcnt() == 0 && !src->is_top()) {
1020             _igvn.remove_dead_node(src);
1021           }
1022         }
1023         _igvn._worklist.push(ac);
1024       } else {
1025         eliminate_gc_barrier(use);
1026       }
1027       j -= (oc1 - res->outcnt());
1028     }
1029     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1030     _igvn.remove_dead_node(res);
1031   }
1032 
1033   eliminate_unused_allocation_edges(alloc);
1034 }
1035 
1036 void PhaseMacroExpand::eliminate_unused_allocation_edges(CallNode* alloc) {
1037   //
1038   // Process other users of allocation's projections
1039   //
1040   if (_resproj != NULL && _resproj->outcnt() != 0) {
1041     // First disconnect stores captured by Initialize node.
1042     // If Initialize node is eliminated first in the following code,
1043     // it will kill such stores and DUIterator_Last will assert.
1044     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
1045       Node *use = _resproj->fast_out(j);
1046       if (use->is_AddP()) {
1047         // raw memory addresses used only by the initialization
1048         _igvn.replace_node(use, C->top());
1049         --j; --jmax;
1050       }
1051     }
1052     for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
1053       Node *use = _resproj->last_out(j);
1054       uint oc1 = _resproj->outcnt();
1055       if (use->is_Initialize()) {
1056         // Eliminate Initialize node.

1085   if (_fallthroughcatchproj != NULL) {
1086     _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
1087   }
1088   if (_memproj_fallthrough != NULL) {
1089     _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
1090   }
1091   if (_memproj_catchall != NULL) {
1092     _igvn.replace_node(_memproj_catchall, C->top());
1093   }
1094   if (_ioproj_fallthrough != NULL) {
1095     _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
1096   }
1097   if (_ioproj_catchall != NULL) {
1098     _igvn.replace_node(_ioproj_catchall, C->top());
1099   }
1100   if (_catchallcatchproj != NULL) {
1101     _igvn.replace_node(_catchallcatchproj, C->top());
1102   }
1103 }
1104 
1105 #define STACK_REG_BUFFER 4
1106 
1107 bool PhaseMacroExpand::stack_allocation_location_representable(int slot_location) {
1108   // TODO This is likely not enough as there are values on the stack above the fixed slots
1109   // Revist to see if there is a better check
1110   OptoReg::Name stack_reg = OptoReg::stack2reg(slot_location + STACK_REG_BUFFER);
1111   if (RegMask::can_represent(stack_reg)) {
1112     return true;
1113   } else {
1114     return false;
1115   }
1116 }
1117 
1118 #undef STACK_REG_BUFFER
1119 
1120 int PhaseMacroExpand::next_stack_allocated_object(int num_slots) {
1121   int current = C->fixed_slots();
1122   int next    = current + num_slots;
1123   if (!stack_allocation_location_representable(next)) {
1124     return -1;
1125   }
1126   // Keep the toplevel high water mark current:
1127   if (C->fixed_slots() < next) C->set_fixed_slots(next);
1128   return current;
1129 }
1130 
1131 bool PhaseMacroExpand::process_write_barriers_on_stack_allocated_objects(AllocateNode* alloc) {
1132   GrowableArray<Node*> barriers;
1133   Node *res = alloc->result_cast();
1134   assert(res != NULL, "result node must not be null");
1135 
1136   // Find direct barriers on the stack allocated objects.
1137   // Those we can simply eliminate.
1138   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
1139     Node *use = res->fast_out(i);
1140     if (use->Opcode() == Op_CastP2X) {
1141       barriers.append_if_missing(use);
1142     } else if (use->is_AddP()) {
1143       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1144         Node *addp_out = use->fast_out(j);
1145         if (addp_out->Opcode() == Op_CastP2X) {
1146           barriers.append_if_missing(addp_out);
1147         }
1148       }
1149     }
1150   }
1151 
1152   while (barriers.length() != 0) {
1153     eliminate_gc_barrier(barriers.pop());
1154   }
1155 
1156   // After removing the direct barriers result may no longer be used
1157   if (alloc->result_cast() == NULL) {
1158     return true;
1159   }
1160 
1161   // Next walk all uses of the allocate to discover the barriers that
1162   // might be reachable from our allocate. If the barrier is reachable
1163   // from stack allocated object, we unregister it, so that the check
1164   // elimination code doesn't run on it.
1165   VectorSet visited(Thread::current()->resource_area());
1166   GrowableArray<Node*> node_worklist;
1167 
1168   BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
1169 
1170   node_worklist.push(res);
1171 
1172   while(node_worklist.length() != 0) {
1173     Node* n = node_worklist.pop();
1174 
1175     if (visited.test_set(n->_idx)) {
1176       continue;  // already processed
1177     }
1178 
1179     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1180       Node *use = n->fast_out(i);
1181       if (use->Opcode() == Op_CastP2X) {
1182         bs->unregister_potential_barrier_node(use);
1183       } else if (use->is_Phi() ||
1184                  use->is_CheckCastPP() ||
1185                  use->is_EncodeP() ||
1186                  use->is_DecodeN() ||
1187                  use->is_SafePoint() ||
1188                  use->is_Proj() ||
1189                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
1190         // Find barriers beyond our current result
1191         node_worklist.push(use);
1192       } else if (use->is_Store() && use->Opcode() == Op_StoreP) {
1193         if (n != use->in(MemNode::ValueIn)) {
1194           continue;
1195         }
1196         // TODO code copied from escape.cpp::ConnectionGraph::get_addp_base.
1197         // Common up this code into a helper
1198         Node *memory = use->in(MemNode::Address);
1199         if (memory->is_AddP()) {
1200           Node *base = memory->in(AddPNode::Base);
1201           if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
1202             base = memory->in(AddPNode::Address);
1203             while (base->is_AddP()) {
1204               // Case #6 (unsafe access) may have several chained AddP nodes.
1205               assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
1206               base = base->in(AddPNode::Address);
1207             }
1208             if (base->Opcode() == Op_CheckCastPP &&
1209                 base->bottom_type()->isa_rawptr() &&
1210                 _igvn.type(base->in(1))->isa_oopptr()) {
1211               base = base->in(1); // Case #9
1212             }
1213           }
1214           node_worklist.push(base);
1215         }
1216       } else if (use->is_AddP() ||
1217            (use->is_Load() && use->Opcode() == Op_LoadP)) {
1218         // Find barriers for loads
1219         node_worklist.push(use);
1220       }
1221     }
1222   }
1223   return false;
1224 }
1225 
1226 bool PhaseMacroExpand::register_stack_allocated_object_with_safepoints(AllocateNode* alloc, Node* stack_oop) {
1227   VectorSet visited(Thread::current()->resource_area());
1228   GrowableArray<Node*> node_worklist;
1229   GrowableArray<SafePointNode*> temp;
1230   Dict* safepoint_map = new Dict(cmpkey, hashkey);
1231   bool found_non_direct_safepoint = false;
1232   Node *res = alloc->result_cast();
1233 
1234   assert(res != NULL, "result node must not be null");
1235 
1236   node_worklist.push(res);
1237 
1238   while(node_worklist.length() != 0) {
1239     Node* n = node_worklist.pop();
1240 
1241     if (visited.test_set(n->_idx)) {
1242       continue;  // already processed
1243     }
1244 
1245     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1246       Node *use = n->fast_out(i);
1247       if (use->is_SafePoint()) {
1248         SafePointNode* sfpt = use->as_SafePoint();
1249         if (sfpt->jvms() != NULL) {
1250           temp.push(sfpt);
1251         }
1252       } else if (use->is_Phi() ||
1253           use->is_CheckCastPP() ||
1254           use->is_EncodeP() ||
1255           use->is_DecodeN() ||
1256           use->is_Proj() ||
1257           (use->Opcode() == Op_CastP2X) ||
1258           use->is_MergeMem() ||
1259           use->is_MemBar() ||
1260           (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
1261         // Find safepoints beyond our current result
1262         node_worklist.push(use);
1263       } else if (use->is_Store() && use->Opcode() == Op_StoreP) {
1264         node_worklist.push(use);
1265         if (n != use->in(MemNode::ValueIn)) {
1266           continue;
1267         }
1268         // TODO code copied from escape.cpp::ConnectionGraph::get_addp_base.
1269         // Common up this code into a helper
1270         Node *memory = use->in(MemNode::Address);
1271         if (memory->is_AddP()) {
1272           Node *base = memory->in(AddPNode::Base);
1273           if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
1274             base = memory->in(AddPNode::Address);
1275             while (base->is_AddP()) {
1276               // Case #6 (unsafe access) may have several chained AddP nodes.
1277               assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
1278               base = base->in(AddPNode::Address);
1279             }
1280             if (base->Opcode() == Op_CheckCastPP &&
1281                 base->bottom_type()->isa_rawptr() &&
1282                 _igvn.type(base->in(1))->isa_oopptr()) {
1283               base = base->in(1); // Case #9
1284             }
1285           }
1286           node_worklist.push(base);
1287         }
1288       } else if (use->is_AddP() ||
1289         (use->is_Load() && use->Opcode() == Op_LoadP)) {
1290         // Find safepoints for arrays
1291         node_worklist.push(use);
1292       }
1293     }
1294 
1295     while (temp.length() != 0) {
1296       SafePointNode* sfpt = temp.pop();
1297       if (res != n) {
1298         found_non_direct_safepoint = true;
1299       }
1300       handle_safepoint_for_stack_allocation(safepoint_map, alloc, stack_oop, n, sfpt);
1301     }
1302   }
1303 
1304   return found_non_direct_safepoint;
1305 }
1306 
1307 void PhaseMacroExpand::handle_safepoint_for_stack_allocation(Dict* safepoint_map, AllocateNode* alloc, Node* oop_node, Node* parent, SafePointNode* sfpt) {
1308   Node* res = alloc->result_cast();
1309   assert(res->is_CheckCastPP(), "unexpected AllocateNode result");
1310   const TypeOopPtr* res_type = _igvn.type(res)->isa_oopptr();
1311   ciKlass* klass = res_type->klass();
1312   int nfields = 0;
1313   if (res_type->isa_instptr()) {
1314     // find the fields of the class which will be needed for safepoint debug information
1315     assert(klass->is_instance_klass(), "must be an instance klass.");
1316     ciInstanceKlass* iklass = klass->as_instance_klass();
1317     nfields = iklass->nof_nonstatic_fields();
1318   } else {
1319     // find the array's elements which will be needed for safepoint debug information
1320     nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
1321   }
1322 
1323   assert(nfields >= 0, "Sanity");
1324 
1325   SafePointScalarObjectNode* sobj = NULL;
1326   Node *result = (Node *)(*safepoint_map)[sfpt];
1327   if (result != NULL) {
1328     assert(result->is_SafePointScalarObject(), "Has to be a safepointscalarobject");
1329     sobj = result->as_SafePointScalarObject();
1330   } else {
1331     //
1332     // Process the safepoint uses
1333     //
1334     Node* mem = sfpt->memory();
1335     Node* ctl = sfpt->control();
1336     assert(sfpt->jvms() != NULL, "missed JVMS");
1337     // Fields of scalar objs are referenced only at the end
1338     // of regular debuginfo at the last (youngest) JVMS.
1339     // Record relative start index.
1340     uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
1341     sobj = new SafePointScalarObjectNode(res_type,
1342 #ifdef ASSERT
1343                                                 alloc,
1344 #endif
1345                                                 first_ind, nfields);
1346     sobj->init_req(0, C->root());
1347     sobj->add_req(oop_node);
1348     transform_later(sobj);
1349     sobj->set_stack_allocated(true);
1350 
1351     JVMState *jvms = sfpt->jvms();
1352     sfpt->add_req(sobj);
1353     jvms->set_endoff(sfpt->req());
1354     _igvn._worklist.push(sfpt);
1355     safepoint_map->Insert(sfpt, sobj);
1356   }
1357 
1358   if (parent == res) {
1359     adjust_safepoint_jvms(sfpt, parent, sobj);
1360   }
1361 }
1362 
1363 bool PhaseMacroExpand::can_stack_allocate(AllocateNode* alloc, Node* res, intptr_t size_of_object) {
1364   return ((res != NULL) && alloc->_is_stack_allocateable && (size_of_object != -1) && should_stack_allocate());
1365 }
1366 
1367 void PhaseMacroExpand::estimate_stack_allocation_size(AllocateNode* alloc) {
1368   Node* res                  = alloc->result_cast();
1369   Node* size_in_bytes        = alloc->in(AllocateNode::AllocSize);
1370   intptr_t size_of_object    = _igvn.find_intptr_t_con(size_in_bytes, -1);
1371 
1372   if (alloc->_is_scalar_replaceable && !alloc->_is_stack_allocateable) {
1373     C->set_fail_stack_allocation_with_references(true);
1374     return;
1375   }
1376 
1377   bool can_sa = can_stack_allocate(alloc, res, size_of_object);
1378   if (alloc->_is_stack_allocateable && !can_sa) {
1379     // If we marked the object as SA in EA and now we can not fail
1380     C->set_fail_stack_allocation_with_references(true);
1381     return;
1382   }
1383 
1384   if (!alloc->_is_stack_allocateable) {
1385     // If we can not SA because EA said no then no need to count the size
1386     return;
1387   }
1388 
1389   int current = C->stack_allocated_slots();
1390   C->set_stack_allocated_slots(current + (size_of_object >> LogBytesPerInt));
1391 }
1392 
1393 // Do stack allocation
1394 bool PhaseMacroExpand::stack_allocation(AllocateNode* alloc) {
1395   Node* klass                = alloc->in(AllocateNode::KlassNode);
1396   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1397   Node *length               = (alloc->is_AllocateArray()) ? alloc->in(AllocateNode::ALength) : NULL;
1398   Node* size_in_bytes        = alloc->in(AllocateNode::AllocSize);
1399   Node* res                  = alloc->result_cast();
1400   Node* ctrl                 = alloc->in(TypeFunc::Control);
1401   Node* mem                  = alloc->in(TypeFunc::Memory);
1402 
1403   intptr_t size_of_object = _igvn.find_intptr_t_con(size_in_bytes, -1);
1404 
1405   if (!can_stack_allocate(alloc, res, size_of_object)) {
1406     return false;
1407   }
1408 
1409   if (C->fail_stack_allocation_with_references()) {
1410     if (alloc->_is_referenced_stack_allocation) {
1411 #ifndef PRODUCT
1412       if (print_stack_allocation()) {
1413         tty->print_cr("---- Avoiding stack allocation on node %d because it is referenced by another alloc and SCR/SA failed in method %s", alloc->_idx, _igvn.C->method()->get_Method()->name_and_sig_as_C_string());
1414       }
1415 #endif
1416     return false;
1417     }
1418   }
1419 
1420   int next_stack_allocation_slot = next_stack_allocated_object(size_of_object >> LogBytesPerInt);
1421   if (next_stack_allocation_slot < 0) {
1422 #ifndef PRODUCT
1423     if (print_stack_allocation()) {
1424       tty->print_cr("---- Avoiding stack allocation on node %d with size %ld for method %s because of insufficient stack space", alloc->_idx, size_of_object, _igvn.C->method()->get_Method()->name_and_sig_as_C_string());
1425     }
1426 #endif
1427     return false;
1428   }
1429 
1430   if (mem->is_MergeMem()) {
1431     mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1432   }
1433 
1434   extract_call_projections(alloc);
1435 
1436   // Process barriers as this may result in result_cast() becoming NULL
1437   if (process_write_barriers_on_stack_allocated_objects(alloc)) {
1438 #ifndef PRODUCT
1439     if (print_stack_allocation()) {
1440       tty->print_cr("---- Allocation %d result_cast is no longer used so yank the alloc instead", alloc->_idx);
1441     }
1442 #endif
1443     InitializeNode* init = alloc->initialization();
1444     if (init != NULL) {
1445       init->remove(&_igvn);
1446     }
1447     yank_alloc_node(alloc);
1448     return true;
1449   }
1450 
1451   assert(res == alloc->result_cast(), "values much match");
1452 
1453   Node* stack_oop = transform_later(new BoxLockNode(next_stack_allocation_slot));
1454   Node* new_raw_mem = initialize_object(alloc, ctrl, mem, stack_oop, klass, length, size_in_bytes);
1455 
1456   bool non_direct_safepoints = register_stack_allocated_object_with_safepoints(alloc, stack_oop);
1457   if (non_direct_safepoints) {
1458     if (length != NULL) {
1459       stack_allocation_init_array_length_on_entry(alloc, length, stack_oop);
1460     }
1461 #ifndef PRODUCT
1462     stack_allocation_clear_object_data(alloc, stack_oop);
1463 #endif
1464   }
1465 
1466   _igvn.replace_node(_resproj, stack_oop);
1467 
1468   for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
1469     Node *use = _memproj_fallthrough->fast_out(i);
1470     _igvn.rehash_node_delayed(use);
1471     imax -= replace_input(use, _memproj_fallthrough, new_raw_mem);
1472     // back up iterator
1473     --i;
1474   }
1475 
1476   eliminate_unused_allocation_edges(alloc);
1477 
1478   assert(_resproj->outcnt() == 0, "all uses of the original allocate result projection must be deleted");
1479   _igvn.remove_dead_node(_resproj);
1480 
1481 #ifndef PRODUCT
1482   if (print_stack_allocation()) {
1483     tty->print_cr("++++ Performing stack allocation on node %d with size %ld for method %s", alloc->_idx, size_of_object, _igvn.C->method()->get_Method()->name_and_sig_as_C_string());
1484   }
1485 #endif
1486 
1487   return true;
1488 }
1489 
1490 /*
1491   Initialize stack allocated array length on entry to the method.
1492   This is required for de-opt so it can verify array lengths and so
1493   that GCs that happen after deopt will not crash for uninitialized
1494   arrays.
1495 */
1496 void PhaseMacroExpand::stack_allocation_init_array_length_on_entry(AllocateNode *alloc, Node *length, Node *stack_oop) {
1497   Node* start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
1498   assert(length != NULL, "Length can not be NULL");
1499 
1500   if (C->is_osr_compilation()) {
1501     for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
1502       Node *child = start_mem->fast_out(i);
1503       if (child->is_CallLeaf() && child->as_CallLeaf()->is_call_to_osr_migration_end()) {
1504         CallLeafNode* call_leaf = child->as_CallLeaf();
1505         start_mem = call_leaf->proj_out_or_null(TypeFunc::Memory);
1506         break;
1507       }
1508     }
1509   }
1510   assert(start_mem != NULL, "Must find start mem");
1511   Node* init_mem = start_mem;
1512 
1513   // need to set the length field for arrays for deopt
1514   init_mem = make_store(C->start()->proj_out_or_null(TypeFunc::Control),
1515                         init_mem, stack_oop, arrayOopDesc::length_offset_in_bytes(),
1516                         length, T_INT);
1517 
1518 
1519   if (init_mem != start_mem) {
1520     for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
1521       Node *use = start_mem->fast_out(i);
1522       // Compressed refs can make a new store which adjusts the start
1523       // offet and it's sourced by start_mem. Make sure we don't make cycle.
1524       if (use == init_mem || (init_mem->find_edge(use) >= 0)) {
1525         continue;
1526       }
1527       _igvn.rehash_node_delayed(use);
1528       imax -= replace_input(use, start_mem, init_mem);
1529       // back up iterator
1530       --i;
1531     }
1532   }
1533 }
1534 
1535 #ifndef PRODUCT
1536 /*
1537   Initialize SA object on entry to the method to ensure it is initialized
1538   before safepoints which may only be reachable through phis and the object
1539   may not actually have been initialized.
1540 */
1541 void PhaseMacroExpand::stack_allocation_clear_object_data(AllocateNode *alloc, Node *stack_oop) {
1542   Node* klass                = alloc->in(AllocateNode::KlassNode);
1543   Node *length               = (alloc->is_AllocateArray()) ? alloc->in(AllocateNode::ALength) : NULL;
1544   Node* size_in_bytes        = alloc->in(AllocateNode::AllocSize);
1545   Node* start_mem            = C->start()->proj_out_or_null(TypeFunc::Memory);
1546   if (C->is_osr_compilation()) {
1547     for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
1548       Node *child = start_mem->fast_out(i);
1549       if (child->is_CallLeaf() && child->as_CallLeaf()->is_call_to_osr_migration_end()) {
1550         CallLeafNode* call_leaf = child->as_CallLeaf();
1551         start_mem = call_leaf->proj_out_or_null(TypeFunc::Memory);
1552         break;
1553       }
1554     }
1555   }
1556   assert(start_mem != NULL, "Must find start mem");
1557   int header_size = alloc->minimum_header_size();
1558   Node* init_mem = start_mem;
1559   if (length != NULL) {
1560     // conservatively small header size:
1561     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1562     ciKlass* k = _igvn.type(klass)->is_klassptr()->klass();
1563     if (k->is_array_klass()) {   // we know the exact header size in most cases:
1564       header_size = Klass::layout_helper_header_size(k->layout_helper());
1565     }
1566   }
1567   init_mem = ClearArrayNode::clear_memory(C->start()->proj_out_or_null(TypeFunc::Control),
1568                                           init_mem, stack_oop, header_size, size_in_bytes,
1569                                           &_igvn);
1570   if (init_mem != start_mem) {
1571     for (DUIterator_Fast imax, i = start_mem->fast_outs(imax); i < imax; i++) {
1572       Node *use = start_mem->fast_out(i);
1573       // Compressed refs can make a new store which adjusts the start
1574       // offet and it's sourced by start_mem. Make sure we don't make cycle.
1575       if (use == init_mem || (init_mem->find_edge(use) >= 0)) {
1576         continue;
1577       }
1578       _igvn.rehash_node_delayed(use);
1579       imax -= replace_input(use, start_mem, init_mem);
1580       // back up iterator
1581       --i;
1582     }
1583   }
1584 }
1585 #endif
1586 
1587 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1588   // Don't do scalar replacement if the frame can be popped by JVMTI:
1589   // if reallocation fails during deoptimization we'll pop all
1590   // interpreter frames for this compiled frame and that won't play
1591   // nice with JVMTI popframe.
1592   if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
1593     return false;
1594   }
1595   Node* klass = alloc->in(AllocateNode::KlassNode);
1596   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1597   Node* res = alloc->result_cast();
1598   // Eliminate boxing allocations which are not used
1599   // regardless scalar replacable status.
1600   bool boxing_alloc = C->eliminate_boxing() &&
1601                       tklass->klass()->is_instance_klass()  &&
1602                       tklass->klass()->as_instance_klass()->is_box_klass();
1603   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
1604     return false;
1605   }
1606 

1623 
1624   if (!scalar_replacement(alloc, safepoints)) {
1625     return false;
1626   }
1627 
1628   CompileLog* log = C->log();
1629   if (log != NULL) {
1630     log->head("eliminate_allocation type='%d'",
1631               log->identify(tklass->klass()));
1632     JVMState* p = alloc->jvms();
1633     while (p != NULL) {
1634       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1635       p = p->caller();
1636     }
1637     log->tail("eliminate_allocation");
1638   }
1639 
1640   process_users_of_allocation(alloc);
1641 
1642 #ifndef PRODUCT
1643   if (print_eliminate_allocations()) {
1644     if (alloc->is_AllocateArray())
1645       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1646     else
1647       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1648   }
1649 #endif
1650 
1651   return true;
1652 }
1653 
1654 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1655   // EA should remove all uses of non-escaping boxing node.
1656   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) {
1657     return false;
1658   }
1659 
1660   assert(boxing->result_cast() == NULL, "unexpected boxing node result");
1661 
1662   extract_call_projections(boxing);
1663 
1664   const TypeTuple* r = boxing->tf()->range();
1665   assert(r->cnt() > TypeFunc::Parms, "sanity");
1666   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1667   assert(t != NULL, "sanity");
1668 
1669   CompileLog* log = C->log();
1670   if (log != NULL) {
1671     log->head("eliminate_boxing type='%d'",
1672               log->identify(t->klass()));
1673     JVMState* p = boxing->jvms();
1674     while (p != NULL) {
1675       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1676       p = p->caller();
1677     }
1678     log->tail("eliminate_boxing");
1679   }
1680 
1681   process_users_of_allocation(boxing);
1682 
1683 #ifndef PRODUCT
1684   if (print_eliminate_allocations()) {
1685     tty->print("++++ Eliminated: %d ", boxing->_idx);
1686     boxing->method()->print_short_name(tty);
1687     tty->cr();
1688   }
1689 #endif
1690 
1691   return true;
1692 }
1693 
1694 //---------------------------set_eden_pointers-------------------------
1695 void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
1696   if (UseTLAB) {                // Private allocation: load from TLS
1697     Node* thread = transform_later(new ThreadLocalNode());
1698     int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
1699     int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
1700     eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
1701     eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
1702   } else {                      // Shared allocation: load from globals
1703     CollectedHeap* ch = Universe::heap();
1704     address top_adr = (address)ch->top_addr();

3260       assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
3261       break;
3262     case Node::Class_SubTypeCheck:
3263       expand_subtypecheck_node(n->as_SubTypeCheck());
3264       assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
3265       break;
3266     default:
3267       assert(false, "unknown node type in macro list");
3268     }
3269     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
3270     if (C->failing())  return true;
3271 
3272     // Clean up the graph so we're less likely to hit the maximum node
3273     // limit
3274     _igvn.set_delay_transform(false);
3275     _igvn.optimize();
3276     if (C->failing())  return true;
3277     _igvn.set_delay_transform(true);
3278   }
3279 
3280   for (int i = C->macro_count(); i > 0; i --) {
3281     Node * n = C->macro_node(i-1);
3282     assert(n->is_macro(), "only macro nodes expected here");
3283 
3284     switch (n->class_id()) {
3285     case Node::Class_Allocate:
3286     case Node::Class_AllocateArray:
3287       estimate_stack_allocation_size(n->as_Allocate());
3288       break;
3289     default:
3290       assert(false, "unknown node type in macro list");
3291     }
3292   }
3293 
3294   // Check to see if stack allocation size is too large before macro expansion
3295   // so we can reject required stack allocations
3296   if (!stack_allocation_location_representable(C->fixed_slots() + C->stack_allocated_slots())) {
3297     C->set_fail_stack_allocation_with_references(true);
3298   }
3299 
3300   // All nodes except Allocate nodes are expanded now. There could be
3301   // new optimization opportunities (such as folding newly created
3302   // load from a just allocated object). Run IGVN.
3303 
3304   // expand "macro" nodes
3305   // nodes are removed from the macro list as they are processed
3306   while (C->macro_count() > 0) {
3307     int macro_count = C->macro_count();
3308     Node * n = C->macro_node(macro_count-1);
3309     assert(n->is_macro(), "only macro nodes expected here");
3310     if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
3311       // node is unreachable, so don't try to expand it
3312       C->remove_macro_node(n);
3313       continue;
3314     }
3315     // Make sure expansion will not cause node limit to be exceeded.
3316     // Worst case is a macro node gets expanded into about 200 nodes.
3317     // Allow 50% more for optimization.
3318     if (C->check_node_count(300, "out of nodes before macro expansion")) {
3319       return true;
3320     }
3321     switch (n->class_id()) {
3322     case Node::Class_Allocate:
3323       if (!stack_allocation(n->as_Allocate())) {
3324         expand_allocate(n->as_Allocate());
3325       }
3326       break;
3327     case Node::Class_AllocateArray:
3328       if (!stack_allocation(n->as_AllocateArray())) {
3329         expand_allocate_array(n->as_AllocateArray());
3330       }
3331       break;
3332     default:
3333       assert(false, "unknown node type in macro list");
3334     }
3335     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
3336     if (C->failing())  return true;
3337 
3338     // Clean up the graph so we're less likely to hit the maximum node
3339     // limit
3340     _igvn.set_delay_transform(false);
3341     _igvn.optimize();
3342     if (C->failing())  return true;
3343     _igvn.set_delay_transform(true);
3344   }
3345 
3346   _igvn.set_delay_transform(false);
3347   return false;
3348 }
< prev index next >