30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/c2compiler.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/phaseX.hpp"
40 #include "opto/movenode.hpp"
41 #include "opto/rootnode.hpp"
42 #include "utilities/macros.hpp"
43
44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
45 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
46 _in_worklist(C->comp_arena()),
47 _next_pidx(0),
48 _collecting(true),
49 _verify(false),
50 _compile(C),
51 _igvn(igvn),
52 _node_map(C->comp_arena()) {
53 // Add unknown java object.
54 add_java_object(C->top(), PointsToNode::GlobalEscape);
55 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
56 // Add ConP(#NULL) and ConN(#NULL) nodes.
57 Node* oop_null = igvn->zerocon(T_OBJECT);
58 assert(oop_null->_idx < nodes_size(), "should be created already");
59 add_java_object(oop_null, PointsToNode::NoEscape);
60 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
61 if (UseCompressedOops) {
62 Node* noop_null = igvn->zerocon(T_NARROWOOP);
63 assert(noop_null->_idx < nodes_size(), "should be created already");
64 map_ideal_node(noop_null, null_obj);
65 }
66 _pcmp_neq = NULL; // Should be initialized
67 _pcmp_eq = NULL;
68 }
69
166 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
167 // Collect compare pointers nodes.
168 ptr_cmp_worklist.append(n);
169 } else if (n->is_MemBarStoreStore()) {
170 // Collect all MemBarStoreStore nodes so that depending on the
171 // escape status of the associated Allocate node some of them
172 // may be eliminated.
173 storestore_worklist.append(n);
174 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
175 (n->req() > MemBarNode::Precedent)) {
176 record_for_optimizer(n);
177 #ifdef ASSERT
178 } else if (n->is_AddP()) {
179 // Collect address nodes for graph verification.
180 addp_worklist.append(n);
181 #endif
182 } else if (n->is_ArrayCopy()) {
183 // Keep a list of ArrayCopy nodes so if one of its input is non
184 // escaping, we can record a unique type
185 arraycopy_worklist.append(n->as_ArrayCopy());
186 }
187 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
188 Node* m = n->fast_out(i); // Get user
189 ideal_nodes.push(m);
190 }
191 }
192 if (non_escaped_worklist.length() == 0) {
193 _collecting = false;
194 return false; // Nothing to do.
195 }
196 // Add final simple edges to graph.
197 while(delayed_worklist.size() > 0) {
198 Node* n = delayed_worklist.pop();
199 add_final_edges(n);
200 }
201 int ptnodes_length = ptnodes_worklist.length();
202
203 #ifdef ASSERT
204 if (VerifyConnectionGraph) {
205 // Verify that no new simple edges could be created and all
233
234 // 3. Adjust scalar_replaceable state of nonescaping objects and push
235 // scalar replaceable allocations on alloc_worklist for processing
236 // in split_unique_types().
237 int non_escaped_length = non_escaped_worklist.length();
238 for (int next = 0; next < non_escaped_length; next++) {
239 JavaObjectNode* ptn = non_escaped_worklist.at(next);
240 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
241 Node* n = ptn->ideal_node();
242 if (n->is_Allocate()) {
243 n->as_Allocate()->_is_non_escaping = noescape;
244 }
245 if (n->is_CallStaticJava()) {
246 n->as_CallStaticJava()->_is_non_escaping = noescape;
247 }
248 if (noescape && ptn->scalar_replaceable()) {
249 adjust_scalar_replaceable_state(ptn);
250 if (ptn->scalar_replaceable()) {
251 alloc_worklist.append(ptn->ideal_node());
252 }
253 }
254 }
255
256 #ifdef ASSERT
257 if (VerifyConnectionGraph) {
258 // Verify that graph is complete - no new edges could be added or needed.
259 verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
260 java_objects_worklist, addp_worklist);
261 }
262 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
263 assert(null_obj->escape_state() == PointsToNode::NoEscape &&
264 null_obj->edge_count() == 0 &&
265 !null_obj->arraycopy_src() &&
266 !null_obj->arraycopy_dst(), "sanity");
267 #endif
268
269 _collecting = false;
270
271 } // TracePhase t3("connectionGraph")
272
273 // 4. Optimize ideal graph based on EA information.
274 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
275 if (has_non_escaping_obj) {
276 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
277 }
278
279 #ifndef PRODUCT
280 if (PrintEscapeAnalysis) {
281 dump(ptnodes_worklist); // Dump ConnectionGraph
282 }
283 #endif
284
285 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
286 #ifdef ASSERT
287 if (VerifyConnectionGraph) {
288 int alloc_length = alloc_worklist.length();
289 for (int next = 0; next < alloc_length; ++next) {
290 Node* n = alloc_worklist.at(next);
291 PointsToNode* ptn = ptnode_adr(n->_idx);
292 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
293 }
294 }
295 #endif
296
297 // 5. Separate memory graph for scalar replaceable allcations.
298 if (has_scalar_replaceable_candidates &&
299 C->AliasLevel() >= 3 && EliminateAllocations) {
300 // Now use the escape information to create unique types for
301 // scalar replaceable objects.
302 split_unique_types(alloc_worklist, arraycopy_worklist);
303 if (C->failing()) return false;
304 C->print_method(PHASE_AFTER_EA, 2);
305
306 #ifdef ASSERT
307 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
308 tty->print("=== No allocations eliminated for ");
309 C->method()->print_short_name();
310 if(!EliminateAllocations) {
311 tty->print(" since EliminateAllocations is off ===");
312 } else if(!has_scalar_replaceable_candidates) {
313 tty->print(" since there are no scalar replaceable candidates ===");
314 } else if(C->AliasLevel() < 3) {
315 tty->print(" since AliasLevel < 3 ===");
316 }
317 tty->cr();
318 #endif
319 }
320 return has_non_escaping_obj;
321 }
322
323 // Utility function for nodes that load an object
324 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
325 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
326 // ThreadLocal has RawPtr type.
327 const Type* t = _igvn->type(n);
328 if (t->make_ptr() != NULL) {
329 Node* adr = n->in(MemNode::Address);
330 #ifdef ASSERT
331 if (!adr->is_AddP()) {
332 assert(_igvn->type(adr)->isa_rawptr(), "sanity");
333 } else {
334 assert((ptnode_adr(adr->_idx) == NULL ||
335 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
336 }
337 #endif
338 add_local_var_and_edge(n, PointsToNode::NoEscape,
339 adr, delayed_worklist);
340 }
341 }
342
1220 } else {
1221 new_edges = 0; // Bailout
1222 }
1223 } while (new_edges > 0);
1224
1225 // Bailout if passed limits.
1226 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
1227 Compile* C = _compile;
1228 if (C->log() != NULL) {
1229 C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1230 C->log()->text("%s", timeout ? "time" : "iterations");
1231 C->log()->end_elem(" limit'");
1232 }
1233 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1234 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length());
1235 // Possible infinite build_connection_graph loop,
1236 // bailout (no changes to ideal graph were made).
1237 return false;
1238 }
1239 #ifdef ASSERT
1240 if (Verbose && PrintEscapeAnalysis) {
1241 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1242 iterations, nodes_size(), ptnodes_worklist.length());
1243 }
1244 #endif
1245
1246 #undef CG_BUILD_ITER_LIMIT
1247
1248 // Find fields initialized by NULL for non-escaping Allocations.
1249 int non_escaped_length = non_escaped_worklist.length();
1250 for (int next = 0; next < non_escaped_length; next++) {
1251 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1252 PointsToNode::EscapeState es = ptn->escape_state();
1253 assert(es <= PointsToNode::ArgEscape, "sanity");
1254 if (es == PointsToNode::NoEscape) {
1255 if (find_init_values(ptn, null_obj, _igvn) > 0) {
1256 // Adding references to NULL object does not change escape states
1257 // since it does not escape. Also no fields are added to NULL object.
1258 add_java_object_edges(null_obj, false);
1259 }
1260 }
2765 result = step_through_mergemem(mmem, alias_idx, toop);
2766 if (result == mmem->base_memory()) {
2767 // Didn't find instance memory, search through general slice recursively.
2768 result = mmem->memory_at(C->get_general_index(alias_idx));
2769 result = find_inst_mem(result, alias_idx, orig_phis);
2770 if (C->failing()) {
2771 return NULL;
2772 }
2773 mmem->set_memory_at(alias_idx, result);
2774 }
2775 } else if (result->is_Phi() &&
2776 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2777 Node *un = result->as_Phi()->unique_input(igvn);
2778 if (un != NULL) {
2779 orig_phis.append_if_missing(result->as_Phi());
2780 result = un;
2781 } else {
2782 break;
2783 }
2784 } else if (result->is_ClearArray()) {
2785 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2786 // Can not bypass initialization of the instance
2787 // we are looking for.
2788 break;
2789 }
2790 // Otherwise skip it (the call updated 'result' value).
2791 } else if (result->Opcode() == Op_SCMemProj) {
2792 Node* mem = result->in(0);
2793 Node* adr = NULL;
2794 if (mem->is_LoadStore()) {
2795 adr = mem->in(MemNode::Address);
2796 } else {
2797 assert(mem->Opcode() == Op_EncodeISOArray ||
2798 mem->Opcode() == Op_StrCompressedCopy, "sanity");
2799 adr = mem->in(3); // Memory edge corresponds to destination array
2800 }
2801 const Type *at = igvn->type(adr);
2802 if (at != Type::TOP) {
2803 assert(at->isa_ptr() != NULL, "pointer type required.");
2804 int idx = C->get_alias_index(at->is_ptr());
2805 if (idx == alias_idx) {
|
30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/c2compiler.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/phaseX.hpp"
40 #include "opto/movenode.hpp"
41 #include "opto/rootnode.hpp"
42 #include "utilities/macros.hpp"
43
44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
45 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
46 _in_worklist(C->comp_arena()),
47 _next_pidx(0),
48 _collecting(true),
49 _verify(false),
50 _has_locks(false),
51 _compile(C),
52 _igvn(igvn),
53 _node_map(C->comp_arena()) {
54 // Add unknown java object.
55 add_java_object(C->top(), PointsToNode::GlobalEscape);
56 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
57 // Add ConP(#NULL) and ConN(#NULL) nodes.
58 Node* oop_null = igvn->zerocon(T_OBJECT);
59 assert(oop_null->_idx < nodes_size(), "should be created already");
60 add_java_object(oop_null, PointsToNode::NoEscape);
61 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
62 if (UseCompressedOops) {
63 Node* noop_null = igvn->zerocon(T_NARROWOOP);
64 assert(noop_null->_idx < nodes_size(), "should be created already");
65 map_ideal_node(noop_null, null_obj);
66 }
67 _pcmp_neq = NULL; // Should be initialized
68 _pcmp_eq = NULL;
69 }
70
167 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
168 // Collect compare pointers nodes.
169 ptr_cmp_worklist.append(n);
170 } else if (n->is_MemBarStoreStore()) {
171 // Collect all MemBarStoreStore nodes so that depending on the
172 // escape status of the associated Allocate node some of them
173 // may be eliminated.
174 storestore_worklist.append(n);
175 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
176 (n->req() > MemBarNode::Precedent)) {
177 record_for_optimizer(n);
178 #ifdef ASSERT
179 } else if (n->is_AddP()) {
180 // Collect address nodes for graph verification.
181 addp_worklist.append(n);
182 #endif
183 } else if (n->is_ArrayCopy()) {
184 // Keep a list of ArrayCopy nodes so if one of its input is non
185 // escaping, we can record a unique type
186 arraycopy_worklist.append(n->as_ArrayCopy());
187 } else if (n->is_Lock()) {
188 Node* obj = n->as_Lock()->obj_node()->uncast();
189 if (!(obj->is_Parm() || obj->is_Con())) {
190 _has_locks = true;
191 }
192 }
193 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
194 Node* m = n->fast_out(i); // Get user
195 ideal_nodes.push(m);
196 }
197 }
198 if (non_escaped_worklist.length() == 0) {
199 _collecting = false;
200 return false; // Nothing to do.
201 }
202 // Add final simple edges to graph.
203 while(delayed_worklist.size() > 0) {
204 Node* n = delayed_worklist.pop();
205 add_final_edges(n);
206 }
207 int ptnodes_length = ptnodes_worklist.length();
208
209 #ifdef ASSERT
210 if (VerifyConnectionGraph) {
211 // Verify that no new simple edges could be created and all
239
240 // 3. Adjust scalar_replaceable state of nonescaping objects and push
241 // scalar replaceable allocations on alloc_worklist for processing
242 // in split_unique_types().
243 int non_escaped_length = non_escaped_worklist.length();
244 for (int next = 0; next < non_escaped_length; next++) {
245 JavaObjectNode* ptn = non_escaped_worklist.at(next);
246 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
247 Node* n = ptn->ideal_node();
248 if (n->is_Allocate()) {
249 n->as_Allocate()->_is_non_escaping = noescape;
250 }
251 if (n->is_CallStaticJava()) {
252 n->as_CallStaticJava()->_is_non_escaping = noescape;
253 }
254 if (noescape && ptn->scalar_replaceable()) {
255 adjust_scalar_replaceable_state(ptn);
256 if (ptn->scalar_replaceable()) {
257 alloc_worklist.append(ptn->ideal_node());
258 }
259 } else {
260 // Set scalar replaceable to false to for stack allocation analysis below
261 ptn->set_scalar_replaceable(false);
262 }
263 }
264
265 // 4. Perform stack allocation analysis
266 if (C->do_stack_allocation() && (!_has_locks || (EliminateLocks && EliminateNestedLocks))) {
267 if (non_escaped_length > 0) {
268 for (int next = 0; next < non_escaped_length; next++) {
269 JavaObjectNode* ptn = non_escaped_worklist.at(next);
270 PointsToNode::EscapeState es = ptn->escape_state();
271 assert(es < PointsToNode::GlobalEscape, "list can not contain GlobalEscape objects");
272 if (es == PointsToNode::ArgEscape) {
273 #ifndef PRODUCT
274 if (print_escape_analysis() || print_stack_allocation()) {
275 tty->print_cr("---- Alloc node %d can not be stack allocated as it escapes as an argument", ptn->ideal_node()->_idx);
276 }
277 #endif
278 continue;
279 }
280
281 Node* n = ptn->ideal_node();
282 if (!n->is_Allocate()) {
283 continue;
284 }
285
286 n->as_Allocate()->_is_stack_allocateable = eligible_for_stack_allocation(ptn);
287 }
288 }
289
290 // 4.1 Verify that object chains don't contain heap objects pointing
291 // to stack allocated objects. Loop until there are changes in the
292 // state of which objects are allowed to be stack allocated.
293 bool more_work = non_escaped_length > 0;
294 while (more_work) {
295 more_work = verify_stack_allocated_object_chains(non_escaped_worklist, non_escaped_length);
296 }
297
298 #ifndef PRODUCT
299 if (print_escape_analysis() || print_stack_allocation()) {
300 print_stack_allocated_candidates(non_escaped_worklist, non_escaped_length);
301 }
302 #endif
303 }
304
305 #ifdef ASSERT
306 if (VerifyConnectionGraph) {
307 // Verify that graph is complete - no new edges could be added or needed.
308 verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
309 java_objects_worklist, addp_worklist);
310 }
311 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
312 assert(null_obj->escape_state() == PointsToNode::NoEscape &&
313 null_obj->edge_count() == 0 &&
314 !null_obj->arraycopy_src() &&
315 !null_obj->arraycopy_dst(), "sanity");
316 #endif
317
318 _collecting = false;
319
320 } // TracePhase t3("connectionGraph")
321
322 // 5. Optimize ideal graph based on EA information.
323 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
324 if (has_non_escaping_obj) {
325 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
326 }
327
328 #ifndef PRODUCT
329 if (print_escape_analysis()) {
330 dump(ptnodes_worklist); // Dump ConnectionGraph
331 }
332 #endif
333
334 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
335 #ifdef ASSERT
336 if (VerifyConnectionGraph) {
337 int alloc_length = alloc_worklist.length();
338 for (int next = 0; next < alloc_length; ++next) {
339 Node* n = alloc_worklist.at(next);
340 PointsToNode* ptn = ptnode_adr(n->_idx);
341 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
342 }
343 }
344 #endif
345
346 // 6. Separate memory graph for scalar replaceable allcations.
347 if (has_scalar_replaceable_candidates &&
348 C->AliasLevel() >= 3 && EliminateAllocations) {
349 // Now use the escape information to create unique types for
350 // scalar replaceable objects.
351 split_unique_types(alloc_worklist, arraycopy_worklist);
352 if (C->failing()) return false;
353 C->print_method(PHASE_AFTER_EA, 2);
354
355 #ifdef ASSERT
356 } else if (Verbose && (print_escape_analysis() || print_eliminate_allocations())) {
357 tty->print("=== No allocations eliminated for ");
358 C->method()->print_short_name();
359 if(!EliminateAllocations) {
360 tty->print(" since EliminateAllocations is off ===");
361 } else if(!has_scalar_replaceable_candidates) {
362 tty->print(" since there are no scalar replaceable candidates ===");
363 } else if(C->AliasLevel() < 3) {
364 tty->print(" since AliasLevel < 3 ===");
365 }
366 tty->cr();
367 #endif
368 }
369 return has_non_escaping_obj;
370 }
371
372 // If an allocation is dominated by a loop, check to see if the lifetime of two instances
373 // may overlap. If they do this allocate is not eligible for stack allocation
374 bool ConnectionGraph::allocation_lifetime_overlap(AllocateNode *alloc, PhiNode *phi) {
375 Node *child0 = phi->in(0);
376 if (!child0->is_Loop()) {
377 return false;
378 }
379 // This is very pessimistic... but correct. It could be optimized
380 VectorSet visited(Thread::current()->resource_area());
381 GrowableArray<Node*> node_worklist;
382
383 for (uint i = 1; i < phi->outcnt(); i++) {
384 node_worklist.push(phi->raw_out(i));
385 }
386
387 while(node_worklist.length() != 0) {
388 Node* node = node_worklist.pop();
389 if (visited.test_set(node->_idx)) {
390 continue; // already processed
391 }
392
393 if (node->is_Phi()) {
394 if (phi == node) {
395 return true;
396 }
397 }
398 for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
399 node_worklist.push(node->fast_out(i));
400 }
401 }
402 return false;
403 }
404
405 // Find if an allocate result may reach an EncodeP
406 bool ConnectionGraph::oop_may_be_compressed(Node* alloc_result) {
407 VectorSet visited(Thread::current()->resource_area());
408 GrowableArray<Node*> node_worklist;
409
410 node_worklist.push(alloc_result);
411 visited.set(alloc_result->_idx);
412
413 while(node_worklist.length() != 0) {
414 Node* node = node_worklist.pop();
415
416 for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
417 Node *use = node->fast_out(i);
418 if (use->is_Phi()) {
419 if (!visited.test_set(use->_idx)) {
420 node_worklist.push(use);
421 }
422 } else if (use->is_EncodeP()) {
423 return true;
424 }
425 }
426 }
427
428 return false;
429 }
430
431 // Various checks to determine if an alloc is a candidate for stack allocation
432 bool ConnectionGraph::eligible_for_stack_allocation(PointsToNode* ptn) {
433 assert(ptn->ideal_node()->is_Allocate(), "Must be called on allocate or allocate array node");
434
435 AllocateNode *alloc = ptn->ideal_node()->as_Allocate();
436 Node* res = alloc->result_cast();
437 if (res == NULL) {
438 #ifndef PRODUCT
439 if (print_escape_analysis() || print_stack_allocation()) {
440 tty->print_cr("---- Alloc node %d can not be stack allocated due to NULL result_cast", alloc->_idx);
441 }
442 #endif
443 return false;
444 } else if (!res->is_CheckCastPP()) {
445 #ifndef PRODUCT
446 if (print_escape_analysis() || print_stack_allocation()) {
447 tty->print_cr("---- Alloc node %d can not be stack allocated due to an invalid result_cast", alloc->_idx);
448 }
449 #endif
450 return false;
451 }
452
453 Node* size_in_bytes = alloc->in(AllocateNode::AllocSize);
454 intptr_t size_of_object = _igvn->find_intptr_t_con(size_in_bytes, -1);
455 if ((size_of_object == -1) || (size_of_object > AllocateNode::StackAllocSizeLimit)) {
456 // Object has unknown size or is too big so it can not be stack allocated.
457 // No need to find reaching objects since it does not have any fields
458 #ifndef PRODUCT
459 if (print_escape_analysis() || print_stack_allocation()) {
460 tty->print_cr("---- Alloc node %d can not be stack allocated due to an invalid size", alloc->_idx);
461 }
462 #endif
463 return false;
464 }
465
466 if (alloc->is_AllocateArray()) {
467 int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
468 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
469 // Array does not have a constant length so it can not be stack allocated
470 #ifndef PRODUCT
471 if (print_escape_analysis() || print_stack_allocation()) {
472 tty->print_cr("---- Alloc node %d can not be stack allocated as it is an array with an invalid length", alloc->_idx);
473 }
474 #endif
475 return false;
476 }
477 }
478
479 if (UseCompressedOops && oop_may_be_compressed(res)) {
480 #ifndef PRODUCT
481 if (print_escape_analysis() || print_stack_allocation()) {
482 tty->print_cr("---- Alloc node %d can not be stack allocated due to compress operation on the stack oop", alloc->_idx);
483 }
484 #endif
485 return false;
486 }
487
488 return all_uses_eligible_for_stack_allocation(ptn);
489 }
490
491 // Check if the alloc has uses that make it ineligible for stack allocation
492 bool ConnectionGraph::all_uses_eligible_for_stack_allocation(PointsToNode *ptn) {
493 assert(ptn->ideal_node()->is_Allocate(), "Must be called on allocate or allocate array node");
494
495 AllocateNode *alloc = ptn->ideal_node()->as_Allocate();
496 Node* res = alloc->result_cast();
497
498 assert(res != NULL, "Result cast must not be NULL at this point");
499
500 for (int uses = 0; uses < ptn->use_count(); uses ++) {
501 PointsToNode *use = ptn->use(uses);
502 if (use->is_LocalVar()) {
503 LocalVarNode *local = use->as_LocalVar();
504 Node *node = local->ideal_node();
505 if (node->is_Phi()) {
506 if (allocation_lifetime_overlap(alloc, node->as_Phi())) {
507 #ifndef PRODUCT
508 if (print_escape_analysis() || print_stack_allocation()) {
509 tty->print_cr("---- Alloc node %d can not be stack allocated as it may overlap with older versions of itself", alloc->_idx);
510 }
511 #endif
512 return false;
513 }
514 } else if (node->is_Load() && node->Opcode() == Op_LoadP) {
515 Node *in1 = node->in(1);
516 if ((in1 != NULL) && in1->is_Phi()) {
517 if (allocation_lifetime_overlap(alloc, in1->as_Phi())) {
518 #ifndef PRODUCT
519 if (print_escape_analysis() || print_stack_allocation()) {
520 tty->print_cr("---- Alloc node %d can not be stack allocated as it may overlap with older versions of itself", alloc->_idx);
521 }
522 #endif
523 return false;
524 }
525 }
526 }
527 } else if (use->is_Field()) {
528 if (UseCompressedOops) {
529 #ifndef PRODUCT
530 if (print_escape_analysis() || print_stack_allocation()) {
531 tty->print_cr("---- Alloc node %d can not be stack allocated as it referenced by another object", alloc->_idx);
532 }
533 #endif
534 return false;
535 }
536 } else if (use->is_Arraycopy()) {
537 if (ptn->arraycopy_dst() && alloc->is_AllocateArray()) {
538 Node* klass = alloc->in(AllocateNode::KlassNode);
539 ciKlass* k = _igvn->type(klass)->is_klassptr()->klass();
540 if (k->is_obj_array_klass()) {
541 // The System.arraycopy helper has a post store barrier which does not handle stack allocated objects
542 #ifndef PRODUCT
543 if (print_escape_analysis() || print_stack_allocation()) {
544 tty->print_cr("---- Alloc node %d can not be stack allocated as it is referenced from an arraycopy", alloc->_idx);
545 }
546 #endif
547 return false;
548 }
549 }
550 }
551 }
552
553 return true;
554 }
555
556 bool ConnectionGraph::verify_stack_allocated_object_chains(GrowableArray<JavaObjectNode*> &non_escaped_worklist, int non_escaped_length) {
557 for (int next = 0; next < non_escaped_length; next++) {
558 JavaObjectNode* ptn = non_escaped_worklist.at(next);
559 if (ptn->escape_state() != PointsToNode::NoEscape) {
560 continue;
561 }
562 Node* n = ptn->ideal_node();
563 if (!n->is_Allocate()) {
564 continue;
565 }
566 AllocateNode *alloc = n->as_Allocate();
567 if (!alloc->_is_stack_allocateable) {
568 continue;
569 }
570 for (int uses = 0; uses < ptn->use_count(); uses ++) {
571 PointsToNode *use = ptn->use(uses);
572 if(use->is_Field()) {
573 for (BaseIterator i(use->as_Field()); i.has_next(); i.next()) {
574 PointsToNode* base = i.get();
575 if (base->is_JavaObject()) {
576 JavaObjectNode *new_obj = base->as_JavaObject();
577 if (new_obj == ptn) {
578 continue;
579 }
580 if (!new_obj->ideal_node()->is_Allocate()) {
581 if (new_obj->ideal_node()->Opcode() == Op_ConP) {
582 TypeNode *tn = new_obj->ideal_node()->as_Type();
583 if (tn->type() == TypePtr::NULL_PTR) {
584 // Allow NULL ptr ConP
585 continue;
586 }
587 }
588 alloc->_is_stack_allocateable = false;
589 alloc->_is_referenced_stack_allocation = false;
590 #ifndef PRODUCT
591 if (print_escape_analysis() || print_stack_allocation()) {
592 tty->print_cr("---- Alloc node %d can not be stack allocated, it is referenced by a non allocate object", alloc->_idx);
593 }
594 #endif
595 return true;
596 }
597 AllocateNode *new_alloc = new_obj->ideal_node()->as_Allocate();
598 if (!new_alloc->_is_stack_allocateable && !new_obj->scalar_replaceable()) {
599 alloc->_is_stack_allocateable = false;
600 alloc->_is_referenced_stack_allocation = false;
601 #ifndef PRODUCT
602 if (print_escape_analysis() || print_stack_allocation()) {
603 tty->print_cr("---- Alloc node %d can not be stack allocated, it is referenced by another non SCR/SA object %d", alloc->_idx, new_alloc->_idx);
604 }
605 #endif
606 return true;
607 } else {
608 assert(alloc->_is_stack_allocateable, "has to be stack allocateable");
609 alloc->_is_referenced_stack_allocation = true;
610 }
611 }
612 }
613 }
614 }
615 }
616
617 return false;
618 }
619
620 #ifndef PRODUCT
621 void ConnectionGraph::print_stack_allocated_candidates(GrowableArray<JavaObjectNode*> &non_escaped_worklist, int non_escaped_length) {
622 for (int next = 0; next < non_escaped_length; next++) {
623 JavaObjectNode* ptn = non_escaped_worklist.at(next);
624 Node* n = ptn->ideal_node();
625 if (!n->is_Allocate()) {
626 continue;
627 }
628 AllocateNode *alloc = n->as_Allocate();
629 if (alloc->_is_stack_allocateable) {
630 tty->print_cr("++++ Alloc node %d is marked as stack allocateable is_scalar_replaceable (%d)", n->_idx, ptn->scalar_replaceable());
631 }
632 }
633 }
634 #endif
635
636 // Utility function for nodes that load an object
637 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
638 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
639 // ThreadLocal has RawPtr type.
640 const Type* t = _igvn->type(n);
641 if (t->make_ptr() != NULL) {
642 Node* adr = n->in(MemNode::Address);
643 #ifdef ASSERT
644 if (!adr->is_AddP()) {
645 assert(_igvn->type(adr)->isa_rawptr(), "sanity");
646 } else {
647 assert((ptnode_adr(adr->_idx) == NULL ||
648 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
649 }
650 #endif
651 add_local_var_and_edge(n, PointsToNode::NoEscape,
652 adr, delayed_worklist);
653 }
654 }
655
1533 } else {
1534 new_edges = 0; // Bailout
1535 }
1536 } while (new_edges > 0);
1537
1538 // Bailout if passed limits.
1539 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
1540 Compile* C = _compile;
1541 if (C->log() != NULL) {
1542 C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1543 C->log()->text("%s", timeout ? "time" : "iterations");
1544 C->log()->end_elem(" limit'");
1545 }
1546 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1547 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length());
1548 // Possible infinite build_connection_graph loop,
1549 // bailout (no changes to ideal graph were made).
1550 return false;
1551 }
1552 #ifdef ASSERT
1553 if (Verbose && print_escape_analysis()) {
1554 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1555 iterations, nodes_size(), ptnodes_worklist.length());
1556 }
1557 #endif
1558
1559 #undef CG_BUILD_ITER_LIMIT
1560
1561 // Find fields initialized by NULL for non-escaping Allocations.
1562 int non_escaped_length = non_escaped_worklist.length();
1563 for (int next = 0; next < non_escaped_length; next++) {
1564 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1565 PointsToNode::EscapeState es = ptn->escape_state();
1566 assert(es <= PointsToNode::ArgEscape, "sanity");
1567 if (es == PointsToNode::NoEscape) {
1568 if (find_init_values(ptn, null_obj, _igvn) > 0) {
1569 // Adding references to NULL object does not change escape states
1570 // since it does not escape. Also no fields are added to NULL object.
1571 add_java_object_edges(null_obj, false);
1572 }
1573 }
3078 result = step_through_mergemem(mmem, alias_idx, toop);
3079 if (result == mmem->base_memory()) {
3080 // Didn't find instance memory, search through general slice recursively.
3081 result = mmem->memory_at(C->get_general_index(alias_idx));
3082 result = find_inst_mem(result, alias_idx, orig_phis);
3083 if (C->failing()) {
3084 return NULL;
3085 }
3086 mmem->set_memory_at(alias_idx, result);
3087 }
3088 } else if (result->is_Phi() &&
3089 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
3090 Node *un = result->as_Phi()->unique_input(igvn);
3091 if (un != NULL) {
3092 orig_phis.append_if_missing(result->as_Phi());
3093 result = un;
3094 } else {
3095 break;
3096 }
3097 } else if (result->is_ClearArray()) {
3098 intptr_t offset;
3099 AllocateNode* alloc = AllocateNode::Ideal_allocation(result->in(3), igvn, offset);
3100
3101 if ((alloc == NULL) || !ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
3102 // Can not bypass initialization of the instance
3103 // we are looking for.
3104 break;
3105 }
3106 // Otherwise skip it (the call updated 'result' value).
3107 } else if (result->Opcode() == Op_SCMemProj) {
3108 Node* mem = result->in(0);
3109 Node* adr = NULL;
3110 if (mem->is_LoadStore()) {
3111 adr = mem->in(MemNode::Address);
3112 } else {
3113 assert(mem->Opcode() == Op_EncodeISOArray ||
3114 mem->Opcode() == Op_StrCompressedCopy, "sanity");
3115 adr = mem->in(3); // Memory edge corresponds to destination array
3116 }
3117 const Type *at = igvn->type(adr);
3118 if (at != Type::TOP) {
3119 assert(at->isa_ptr() != NULL, "pointer type required.");
3120 int idx = C->get_alias_index(at->is_ptr());
3121 if (idx == alias_idx) {
|