1 /*
2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/connode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/divnode.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/loopnode.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/mulnode.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/superword.hpp"
45 #include "utilities/powerOfTwo.hpp"
46
47 //=============================================================================
48 //--------------------------is_cloop_ind_var-----------------------------------
49 // Determine if a node is a counted loop induction variable.
50 // NOTE: The method is declared in "node.hpp".
51 bool Node::is_cloop_ind_var() const {
52 return (is_Phi() && !as_Phi()->is_copy() &&
53 as_Phi()->region()->is_CountedLoop() &&
54 as_Phi()->region()->as_CountedLoop()->phi() == this);
55 }
56
57 //=============================================================================
58 //------------------------------dump_spec--------------------------------------
59 // Dump special per-node info
60 #ifndef PRODUCT
61 void LoopNode::dump_spec(outputStream *st) const {
62 if (is_inner_loop()) st->print( "inner " );
63 if (is_partial_peel_loop()) st->print( "partial_peel " );
64 if (partial_peel_has_failed()) st->print( "partial_peel_failed " );
65 }
66 #endif
67
68 //------------------------------is_valid_counted_loop-------------------------
69 bool LoopNode::is_valid_counted_loop() const {
70 if (is_CountedLoop()) {
71 CountedLoopNode* l = as_CountedLoop();
72 CountedLoopEndNode* le = l->loopexit_or_null();
73 if (le != NULL &&
74 le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
75 Node* phi = l->phi();
76 Node* exit = le->proj_out_or_null(0 /* false */);
77 if (exit != NULL && exit->Opcode() == Op_IfFalse &&
78 phi != NULL && phi->is_Phi() &&
79 phi->in(LoopNode::LoopBackControl) == l->incr() &&
80 le->loopnode() == l && le->stride_is_con()) {
81 return true;
82 }
83 }
84 }
85 return false;
86 }
87
88 //------------------------------get_early_ctrl---------------------------------
89 // Compute earliest legal control
90 Node *PhaseIdealLoop::get_early_ctrl( Node *n ) {
91 assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
92 uint i;
93 Node *early;
94 if (n->in(0) && !n->is_expensive()) {
95 early = n->in(0);
96 if (!early->is_CFG()) // Might be a non-CFG multi-def
97 early = get_ctrl(early); // So treat input as a straight data input
98 i = 1;
99 } else {
100 early = get_ctrl(n->in(1));
101 i = 2;
102 }
103 uint e_d = dom_depth(early);
104 assert( early, "" );
105 for (; i < n->req(); i++) {
106 Node *cin = get_ctrl(n->in(i));
107 assert( cin, "" );
108 // Keep deepest dominator depth
109 uint c_d = dom_depth(cin);
110 if (c_d > e_d) { // Deeper guy?
111 early = cin; // Keep deepest found so far
112 e_d = c_d;
113 } else if (c_d == e_d && // Same depth?
114 early != cin) { // If not equal, must use slower algorithm
115 // If same depth but not equal, one _must_ dominate the other
116 // and we want the deeper (i.e., dominated) guy.
117 Node *n1 = early;
118 Node *n2 = cin;
119 while (1) {
120 n1 = idom(n1); // Walk up until break cycle
121 n2 = idom(n2);
122 if (n1 == cin || // Walked early up to cin
123 dom_depth(n2) < c_d)
124 break; // early is deeper; keep him
125 if (n2 == early || // Walked cin up to early
126 dom_depth(n1) < c_d) {
127 early = cin; // cin is deeper; keep him
128 break;
129 }
130 }
131 e_d = dom_depth(early); // Reset depth register cache
132 }
133 }
134
135 // Return earliest legal location
136 assert(early == find_non_split_ctrl(early), "unexpected early control");
137
138 if (n->is_expensive() && !_verify_only && !_verify_me) {
139 assert(n->in(0), "should have control input");
140 early = get_early_ctrl_for_expensive(n, early);
141 }
142
143 return early;
144 }
145
146 //------------------------------get_early_ctrl_for_expensive---------------------------------
147 // Move node up the dominator tree as high as legal while still beneficial
148 Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
149 assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
150 assert(OptimizeExpensiveOps, "optimization off?");
151
152 Node* ctl = n->in(0);
153 assert(ctl->is_CFG(), "expensive input 0 must be cfg");
154 uint min_dom_depth = dom_depth(earliest);
155 #ifdef ASSERT
156 if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
157 dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
158 assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
159 }
160 #endif
161 if (dom_depth(ctl) < min_dom_depth) {
162 return earliest;
163 }
164
165 while (1) {
166 Node *next = ctl;
167 // Moving the node out of a loop on the projection of a If
168 // confuses loop predication. So once we hit a Loop in a If branch
169 // that doesn't branch to an UNC, we stop. The code that process
170 // expensive nodes will notice the loop and skip over it to try to
171 // move the node further up.
172 if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
173 if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
174 break;
175 }
176 next = idom(ctl->in(1)->in(0));
177 } else if (ctl->is_Proj()) {
178 // We only move it up along a projection if the projection is
179 // the single control projection for its parent: same code path,
180 // if it's a If with UNC or fallthrough of a call.
181 Node* parent_ctl = ctl->in(0);
182 if (parent_ctl == NULL) {
183 break;
184 } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
185 next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
186 } else if (parent_ctl->is_If()) {
187 if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
188 break;
189 }
190 assert(idom(ctl) == parent_ctl, "strange");
191 next = idom(parent_ctl);
192 } else if (ctl->is_CatchProj()) {
193 if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
194 break;
195 }
196 assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
197 next = parent_ctl->in(0)->in(0)->in(0);
198 } else {
199 // Check if parent control has a single projection (this
200 // control is the only possible successor of the parent
201 // control). If so, we can try to move the node above the
202 // parent control.
203 int nb_ctl_proj = 0;
204 for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
205 Node *p = parent_ctl->fast_out(i);
206 if (p->is_Proj() && p->is_CFG()) {
207 nb_ctl_proj++;
208 if (nb_ctl_proj > 1) {
209 break;
210 }
211 }
212 }
213
214 if (nb_ctl_proj > 1) {
215 break;
216 }
217 assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() ||
218 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node");
219 assert(idom(ctl) == parent_ctl, "strange");
220 next = idom(parent_ctl);
221 }
222 } else {
223 next = idom(ctl);
224 }
225 if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
226 break;
227 }
228 ctl = next;
229 }
230
231 if (ctl != n->in(0)) {
232 _igvn.replace_input_of(n, 0, ctl);
233 _igvn.hash_insert(n);
234 }
235
236 return ctl;
237 }
238
239
240 //------------------------------set_early_ctrl---------------------------------
241 // Set earliest legal control
242 void PhaseIdealLoop::set_early_ctrl( Node *n ) {
243 Node *early = get_early_ctrl(n);
244
245 // Record earliest legal location
246 set_ctrl(n, early);
247 }
248
249 //------------------------------set_subtree_ctrl-------------------------------
250 // set missing _ctrl entries on new nodes
251 void PhaseIdealLoop::set_subtree_ctrl( Node *n ) {
252 // Already set? Get out.
253 if( _nodes[n->_idx] ) return;
254 // Recursively set _nodes array to indicate where the Node goes
255 uint i;
256 for( i = 0; i < n->req(); ++i ) {
257 Node *m = n->in(i);
258 if( m && m != C->root() )
259 set_subtree_ctrl( m );
260 }
261
262 // Fixup self
263 set_early_ctrl( n );
264 }
265
266 IdealLoopTree* PhaseIdealLoop::insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift) {
267 IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift);
268 IdealLoopTree* parent = loop->_parent;
269 IdealLoopTree* sibling = parent->_child;
270 if (sibling == loop) {
271 parent->_child = outer_ilt;
272 } else {
273 while (sibling->_next != loop) {
274 sibling = sibling->_next;
275 }
276 sibling->_next = outer_ilt;
277 }
278 outer_ilt->_next = loop->_next;
279 outer_ilt->_parent = parent;
280 outer_ilt->_child = loop;
281 outer_ilt->_nest = loop->_nest;
282 loop->_parent = outer_ilt;
283 loop->_next = NULL;
284 loop->_nest++;
285 return outer_ilt;
286 }
287
288 // Create a skeleton strip mined outer loop: a Loop head before the
289 // inner strip mined loop, a safepoint and an exit condition guarded
290 // by an opaque node after the inner strip mined loop with a backedge
291 // to the loop head. The inner strip mined loop is left as it is. Only
292 // once loop optimizations are over, do we adjust the inner loop exit
293 // condition to limit its number of iterations, set the outer loop
294 // exit condition and add Phis to the outer loop head. Some loop
295 // optimizations that operate on the inner strip mined loop need to be
296 // aware of the outer strip mined loop: loop unswitching needs to
297 // clone the outer loop as well as the inner, unrolling needs to only
298 // clone the inner loop etc. No optimizations need to change the outer
299 // strip mined loop as it is only a skeleton.
300 IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
301 IdealLoopTree* loop, float cl_prob, float le_fcnt,
302 Node*& entry_control, Node*& iffalse) {
303 Node* outer_test = _igvn.intcon(0);
304 set_ctrl(outer_test, C->root());
305 Node *orig = iffalse;
306 iffalse = iffalse->clone();
307 _igvn.register_new_node_with_optimizer(iffalse);
308 set_idom(iffalse, idom(orig), dom_depth(orig));
309
310 IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt);
311 Node *outer_ift = new IfTrueNode (outer_le);
312 Node* outer_iff = orig;
313 _igvn.replace_input_of(outer_iff, 0, outer_le);
314
315 LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift);
316 entry_control = outer_l;
317
318 IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_l, outer_ift);
319
320 set_loop(iffalse, outer_ilt);
321 // When this code runs, loop bodies have not yet been populated.
322 const bool body_populated = false;
323 register_control(outer_le, outer_ilt, iffalse, body_populated);
324 register_control(outer_ift, outer_ilt, outer_le, body_populated);
325 set_idom(outer_iff, outer_le, dom_depth(outer_le));
326 _igvn.register_new_node_with_optimizer(outer_l);
327 set_loop(outer_l, outer_ilt);
328 set_idom(outer_l, init_control, dom_depth(init_control)+1);
329
330 return outer_ilt;
331 }
332
333 void PhaseIdealLoop::insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol) {
334 Node* new_predicate_proj = create_new_if_for_predicate(limit_check_proj, NULL,
335 Deoptimization::Reason_loop_limit_check,
336 Op_If);
337 Node* iff = new_predicate_proj->in(0);
338 assert(iff->Opcode() == Op_If, "bad graph shape");
339 Node* conv = iff->in(1);
340 assert(conv->Opcode() == Op_Conv2B, "bad graph shape");
341 Node* opaq = conv->in(1);
342 assert(opaq->Opcode() == Op_Opaque1, "bad graph shape");
343 cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
344 bol = _igvn.register_new_node_with_optimizer(bol);
345 set_subtree_ctrl(bol);
346 _igvn.replace_input_of(iff, 1, bol);
347
348 #ifndef PRODUCT
349 // report that the loop predication has been actually performed
350 // for this loop
351 if (TraceLoopLimitCheck) {
352 tty->print_cr("Counted Loop Limit Check generated:");
353 debug_only( bol->dump(2); )
354 }
355 #endif
356 }
357
358 Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) {
359 // Counted loop head must be a good RegionNode with only 3 not NULL
360 // control input edges: Self, Entry, LoopBack.
361 if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) {
362 return NULL;
363 }
364 Node *init_control = x->in(LoopNode::EntryControl);
365 Node *back_control = x->in(LoopNode::LoopBackControl);
366 if (init_control == NULL || back_control == NULL) { // Partially dead
367 return NULL;
368 }
369 // Must also check for TOP when looking for a dead loop
370 if (init_control->is_top() || back_control->is_top()) {
371 return NULL;
372 }
373
374 // Allow funny placement of Safepoint
375 if (back_control->Opcode() == Op_SafePoint) {
376 back_control = back_control->in(TypeFunc::Control);
377 }
378
379 // Controlling test for loop
380 Node *iftrue = back_control;
381 uint iftrue_op = iftrue->Opcode();
382 if (iftrue_op != Op_IfTrue &&
383 iftrue_op != Op_IfFalse) {
384 // I have a weird back-control. Probably the loop-exit test is in
385 // the middle of the loop and I am looking at some trailing control-flow
386 // merge point. To fix this I would have to partially peel the loop.
387 return NULL; // Obscure back-control
388 }
389
390 // Get boolean guarding loop-back test
391 Node *iff = iftrue->in(0);
392 if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) {
393 return NULL;
394 }
395 return iftrue;
396 }
397
398 Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob) {
399 Node* iftrue = back_control;
400 uint iftrue_op = iftrue->Opcode();
401 Node* iff = iftrue->in(0);
402 BoolNode* test = iff->in(1)->as_Bool();
403 bt = test->_test._test;
404 cl_prob = iff->as_If()->_prob;
405 if (iftrue_op == Op_IfFalse) {
406 bt = BoolTest(bt).negate();
407 cl_prob = 1.0 - cl_prob;
408 }
409 // Get backedge compare
410 Node* cmp = test->in(1);
411 if (!cmp->is_Cmp()) {
412 return NULL;
413 }
414
415 // Find the trip-counter increment & limit. Limit must be loop invariant.
416 incr = cmp->in(1);
417 limit = cmp->in(2);
418
419 // ---------
420 // need 'loop()' test to tell if limit is loop invariant
421 // ---------
422
423 if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
424 Node* tmp = incr; // Then reverse order into the CmpI
425 incr = limit;
426 limit = tmp;
427 bt = BoolTest(bt).commute(); // And commute the exit test
428 }
429 if (is_member(loop, get_ctrl(limit))) { // Limit must be loop-invariant
430 return NULL;
431 }
432 if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
433 return NULL;
434 }
435 return cmp;
436 }
437
438 Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr) {
439 if (incr->is_Phi()) {
440 if (incr->as_Phi()->region() != x || incr->req() != 3) {
441 return NULL; // Not simple trip counter expression
442 }
443 phi_incr = incr;
444 incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
445 if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
446 return NULL;
447 }
448 }
449 return incr;
450 }
451
452 Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi) {
453 assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp.");
454 // Get merge point
455 xphi = incr->in(1);
456 Node *stride = incr->in(2);
457 if (!stride->is_Con()) { // Oops, swap these
458 if (!xphi->is_Con()) { // Is the other guy a constant?
459 return NULL; // Nope, unknown stride, bail out
460 }
461 Node *tmp = xphi; // 'incr' is commutative, so ok to swap
462 xphi = stride;
463 stride = tmp;
464 }
465 return stride;
466 }
467
468 PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop) {
469 if (!xphi->is_Phi()) {
470 return NULL; // Too much math on the trip counter
471 }
472 if (phi_incr != NULL && phi_incr != xphi) {
473 return NULL;
474 }
475 PhiNode *phi = xphi->as_Phi();
476
477 // Phi must be of loop header; backedge must wrap to increment
478 if (phi->region() != x) {
479 return NULL;
480 }
481 return phi;
482 }
483
484 // Return 0 if it won't overflow, -1 if it must overflow, and 1 otherwise.
485 static int check_stride_overflow(jint stride_con, const TypeInt* limit_t) {
486 if (stride_con > 0) {
487 if (limit_t->_lo > (max_jint - stride_con)) {
488 return -1;
489 }
490 if (limit_t->_hi > (max_jint - stride_con)) {
491 return 1;
492 }
493 } else {
494 if (limit_t->_hi < (min_jint - stride_con)) {
495 return -1;
496 }
497 if (limit_t->_lo < (min_jint - stride_con)) {
498 return 1;
499 }
500 }
501 return 0;
502 }
503
504 //------------------------------is_counted_loop--------------------------------
505 bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) {
506 PhaseGVN *gvn = &_igvn;
507
508 Node* back_control = loop_exit_control(x, loop);
509 if (back_control == NULL) {
510 return false;
511 }
512
513 BoolTest::mask bt = BoolTest::illegal;
514 float cl_prob = 0;
515 Node* incr = NULL;
516 Node* limit = NULL;
517
518 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
519 if (cmp == NULL || cmp->Opcode() != Op_CmpI) {
520 return false; // Avoid pointer & float & 64-bit compares
521 }
522
523 // Trip-counter increment must be commutative & associative.
524 if (incr->Opcode() == Op_CastII) {
525 incr = incr->in(1);
526 }
527
528 Node* phi_incr = NULL;
529 incr = loop_iv_incr(incr, x, loop, phi_incr);
530 if (incr == NULL) {
531 return false;
532 }
533
534 Node* trunc1 = NULL;
535 Node* trunc2 = NULL;
536 const TypeInt* iv_trunc_t = NULL;
537 Node* orig_incr = incr;
538 if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) {
539 return false; // Funny increment opcode
540 }
541 assert(incr->Opcode() == Op_AddI, "wrong increment code");
542
543 Node* xphi = NULL;
544 Node* stride = loop_iv_stride(incr, loop, xphi);
545
546 if (stride == NULL) {
547 return false;
548 }
549
550 if (xphi->Opcode() == Op_CastII) {
551 xphi = xphi->in(1);
552 }
553
554 // Stride must be constant
555 int stride_con = stride->get_int();
556 assert(stride_con != 0, "missed some peephole opt");
557
558 PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
559
560 if (phi == NULL ||
561 (trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr) ||
562 (trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1)) {
563 return false;
564 }
565
566 if (x->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint &&
567 LoopStripMiningIter != 0) {
568 // Leaving the safepoint on the backedge and creating a
569 // CountedLoop will confuse optimizations. We can't move the
570 // safepoint around because its jvm state wouldn't match a new
571 // location. Give up on that loop.
572 return false;
573 }
574
575 Node* iftrue = back_control;
576 uint iftrue_op = iftrue->Opcode();
577 Node* iff = iftrue->in(0);
578 BoolNode* test = iff->in(1)->as_Bool();
579
580 const TypeInt* limit_t = gvn->type(limit)->is_int();
581 if (trunc1 != NULL) {
582 // When there is a truncation, we must be sure that after the truncation
583 // the trip counter will end up higher than the limit, otherwise we are looking
584 // at an endless loop. Can happen with range checks.
585
586 // Example:
587 // int i = 0;
588 // while (true)
589 // sum + = array[i];
590 // i++;
591 // i = i && 0x7fff;
592 // }
593 //
594 // If the array is shorter than 0x8000 this exits through a AIOOB
595 // - Counted loop transformation is ok
596 // If the array is longer then this is an endless loop
597 // - No transformation can be done.
598
599 const TypeInt* incr_t = gvn->type(orig_incr)->is_int();
600 if (limit_t->_hi > incr_t->_hi) {
601 // if the limit can have a higher value than the increment (before the phi)
602 return false;
603 }
604 }
605
606 Node *init_trip = phi->in(LoopNode::EntryControl);
607
608 // If iv trunc type is smaller than int, check for possible wrap.
609 if (!TypeInt::INT->higher_equal(iv_trunc_t)) {
610 assert(trunc1 != NULL, "must have found some truncation");
611
612 // Get a better type for the phi (filtered thru if's)
613 const TypeInt* phi_ft = filtered_type(phi);
614
615 // Can iv take on a value that will wrap?
616 //
617 // Ensure iv's limit is not within "stride" of the wrap value.
618 //
619 // Example for "short" type
620 // Truncation ensures value is in the range -32768..32767 (iv_trunc_t)
621 // If the stride is +10, then the last value of the induction
622 // variable before the increment (phi_ft->_hi) must be
623 // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to
624 // ensure no truncation occurs after the increment.
625
626 if (stride_con > 0) {
627 if (iv_trunc_t->_hi - phi_ft->_hi < stride_con ||
628 iv_trunc_t->_lo > phi_ft->_lo) {
629 return false; // truncation may occur
630 }
631 } else if (stride_con < 0) {
632 if (iv_trunc_t->_lo - phi_ft->_lo > stride_con ||
633 iv_trunc_t->_hi < phi_ft->_hi) {
634 return false; // truncation may occur
635 }
636 }
637 // No possibility of wrap so truncation can be discarded
638 // Promote iv type to Int
639 } else {
640 assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int");
641 }
642
643 // If the condition is inverted and we will be rolling
644 // through MININT to MAXINT, then bail out.
645 if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice!
646 // Odd stride
647 (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) ||
648 // Count down loop rolls through MAXINT
649 ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) ||
650 // Count up loop rolls through MININT
651 ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) {
652 return false; // Bail out
653 }
654
655 const TypeInt* init_t = gvn->type(init_trip)->is_int();
656
657 if (stride_con > 0) {
658 jlong init_p = (jlong)init_t->_lo + stride_con;
659 if (init_p > (jlong)max_jint || init_p > (jlong)limit_t->_hi)
660 return false; // cyclic loop or this loop trips only once
661 } else {
662 jlong init_p = (jlong)init_t->_hi + stride_con;
663 if (init_p < (jlong)min_jint || init_p < (jlong)limit_t->_lo)
664 return false; // cyclic loop or this loop trips only once
665 }
666
667 if (phi_incr != NULL && bt != BoolTest::ne) {
668 // check if there is a possiblity of IV overflowing after the first increment
669 if (stride_con > 0) {
670 if (init_t->_hi > max_jint - stride_con) {
671 return false;
672 }
673 } else {
674 if (init_t->_lo < min_jint - stride_con) {
675 return false;
676 }
677 }
678 }
679
680 // =================================================
681 // ---- SUCCESS! Found A Trip-Counted Loop! -----
682 //
683 assert(x->Opcode() == Op_Loop, "regular loops only");
684 C->print_method(PHASE_BEFORE_CLOOPS, 3);
685
686 Node *hook = new Node(6);
687
688 // ===================================================
689 // Generate loop limit check to avoid integer overflow
690 // in cases like next (cyclic loops):
691 //
692 // for (i=0; i <= max_jint; i++) {}
693 // for (i=0; i < max_jint; i+=2) {}
694 //
695 //
696 // Limit check predicate depends on the loop test:
697 //
698 // for(;i != limit; i++) --> limit <= (max_jint)
699 // for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1)
700 // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride )
701 //
702
703 // Check if limit is excluded to do more precise int overflow check.
704 bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge);
705 int stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1));
706
707 // If compare points directly to the phi we need to adjust
708 // the compare so that it points to the incr. Limit have
709 // to be adjusted to keep trip count the same and the
710 // adjusted limit should be checked for int overflow.
711 Node* adjusted_limit = limit;
712 if (phi_incr != NULL) {
713 stride_m += stride_con;
714 }
715
716 Node *init_control = x->in(LoopNode::EntryControl);
717
718 int sov = check_stride_overflow(stride_m, limit_t);
719 // If sov==0, limit's type always satisfies the condition, for
720 // example, when it is an array length.
721 if (sov != 0) {
722 if (sov < 0) {
723 return false; // Bailout: integer overflow is certain.
724 }
725 // Generate loop's limit check.
726 // Loop limit check predicate should be near the loop.
727 ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check);
728 if (!limit_check_proj) {
729 // The limit check predicate is not generated if this method trapped here before.
730 #ifdef ASSERT
731 if (TraceLoopLimitCheck) {
732 tty->print("missing loop limit check:");
733 loop->dump_head();
734 x->dump(1);
735 }
736 #endif
737 return false;
738 }
739
740 IfNode* check_iff = limit_check_proj->in(0)->as_If();
741
742 if (!is_dominator(get_ctrl(limit), check_iff->in(0))) {
743 return false;
744 }
745
746 Node* cmp_limit;
747 Node* bol;
748
749 if (stride_con > 0) {
750 cmp_limit = new CmpINode(limit, _igvn.intcon(max_jint - stride_m));
751 bol = new BoolNode(cmp_limit, BoolTest::le);
752 } else {
753 cmp_limit = new CmpINode(limit, _igvn.intcon(min_jint - stride_m));
754 bol = new BoolNode(cmp_limit, BoolTest::ge);
755 }
756
757 insert_loop_limit_check(limit_check_proj, cmp_limit, bol);
758 }
759
760 // Now we need to canonicalize loop condition.
761 if (bt == BoolTest::ne) {
762 assert(stride_con == 1 || stride_con == -1, "simple increment only");
763 if (stride_con > 0 && init_t->_hi < limit_t->_lo) {
764 // 'ne' can be replaced with 'lt' only when init < limit.
765 bt = BoolTest::lt;
766 } else if (stride_con < 0 && init_t->_lo > limit_t->_hi) {
767 // 'ne' can be replaced with 'gt' only when init > limit.
768 bt = BoolTest::gt;
769 } else {
770 ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check);
771 if (!limit_check_proj) {
772 // The limit check predicate is not generated if this method trapped here before.
773 #ifdef ASSERT
774 if (TraceLoopLimitCheck) {
775 tty->print("missing loop limit check:");
776 loop->dump_head();
777 x->dump(1);
778 }
779 #endif
780 return false;
781 }
782 IfNode* check_iff = limit_check_proj->in(0)->as_If();
783
784 if (!is_dominator(get_ctrl(limit), check_iff->in(0)) ||
785 !is_dominator(get_ctrl(init_trip), check_iff->in(0))) {
786 return false;
787 }
788
789 Node* cmp_limit;
790 Node* bol;
791
792 if (stride_con > 0) {
793 cmp_limit = new CmpINode(init_trip, limit);
794 bol = new BoolNode(cmp_limit, BoolTest::lt);
795 } else {
796 cmp_limit = new CmpINode(init_trip, limit);
797 bol = new BoolNode(cmp_limit, BoolTest::gt);
798 }
799
800 insert_loop_limit_check(limit_check_proj, cmp_limit, bol);
801
802 if (stride_con > 0) {
803 // 'ne' can be replaced with 'lt' only when init < limit.
804 bt = BoolTest::lt;
805 } else if (stride_con < 0) {
806 // 'ne' can be replaced with 'gt' only when init > limit.
807 bt = BoolTest::gt;
808 }
809 }
810 }
811
812 if (phi_incr != NULL) {
813 // If compare points directly to the phi we need to adjust
814 // the compare so that it points to the incr. Limit have
815 // to be adjusted to keep trip count the same and we
816 // should avoid int overflow.
817 //
818 // i = init; do {} while(i++ < limit);
819 // is converted to
820 // i = init; do {} while(++i < limit+1);
821 //
822 adjusted_limit = gvn->transform(new AddINode(limit, stride));
823 }
824
825 if (incl_limit) {
826 // The limit check guaranties that 'limit <= (max_jint - stride)' so
827 // we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
828 //
829 Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1);
830 adjusted_limit = gvn->transform(new AddINode(adjusted_limit, one));
831 if (bt == BoolTest::le)
832 bt = BoolTest::lt;
833 else if (bt == BoolTest::ge)
834 bt = BoolTest::gt;
835 else
836 ShouldNotReachHere();
837 }
838 set_subtree_ctrl(adjusted_limit);
839
840 if (LoopStripMiningIter == 0) {
841 // Check for SafePoint on backedge and remove
842 Node *sfpt = x->in(LoopNode::LoopBackControl);
843 if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
844 lazy_replace( sfpt, iftrue );
845 if (loop->_safepts != NULL) {
846 loop->_safepts->yank(sfpt);
847 }
848 loop->_tail = iftrue;
849 }
850 }
851
852 // Build a canonical trip test.
853 // Clone code, as old values may be in use.
854 incr = incr->clone();
855 incr->set_req(1,phi);
856 incr->set_req(2,stride);
857 incr = _igvn.register_new_node_with_optimizer(incr);
858 set_early_ctrl( incr );
859 _igvn.rehash_node_delayed(phi);
860 phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
861
862 // If phi type is more restrictive than Int, raise to
863 // Int to prevent (almost) infinite recursion in igvn
864 // which can only handle integer types for constants or minint..maxint.
865 if (!TypeInt::INT->higher_equal(phi->bottom_type())) {
866 Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT);
867 nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
868 nphi = _igvn.register_new_node_with_optimizer(nphi);
869 set_ctrl(nphi, get_ctrl(phi));
870 _igvn.replace_node(phi, nphi);
871 phi = nphi->as_Phi();
872 }
873 cmp = cmp->clone();
874 cmp->set_req(1,incr);
875 cmp->set_req(2, adjusted_limit);
876 cmp = _igvn.register_new_node_with_optimizer(cmp);
877 set_ctrl(cmp, iff->in(0));
878
879 test = test->clone()->as_Bool();
880 (*(BoolTest*)&test->_test)._test = bt;
881 test->set_req(1,cmp);
882 _igvn.register_new_node_with_optimizer(test);
883 set_ctrl(test, iff->in(0));
884
885 // Replace the old IfNode with a new LoopEndNode
886 Node *lex = _igvn.register_new_node_with_optimizer(new CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt ));
887 IfNode *le = lex->as_If();
888 uint dd = dom_depth(iff);
889 set_idom(le, le->in(0), dd); // Update dominance for loop exit
890 set_loop(le, loop);
891
892 // Get the loop-exit control
893 Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));
894
895 // Need to swap loop-exit and loop-back control?
896 if (iftrue_op == Op_IfFalse) {
897 Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le));
898 Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le));
899
900 loop->_tail = back_control = ift2;
901 set_loop(ift2, loop);
902 set_loop(iff2, get_loop(iffalse));
903
904 // Lazy update of 'get_ctrl' mechanism.
905 lazy_replace(iffalse, iff2);
906 lazy_replace(iftrue, ift2);
907
908 // Swap names
909 iffalse = iff2;
910 iftrue = ift2;
911 } else {
912 _igvn.rehash_node_delayed(iffalse);
913 _igvn.rehash_node_delayed(iftrue);
914 iffalse->set_req_X( 0, le, &_igvn );
915 iftrue ->set_req_X( 0, le, &_igvn );
916 }
917
918 set_idom(iftrue, le, dd+1);
919 set_idom(iffalse, le, dd+1);
920 assert(iff->outcnt() == 0, "should be dead now");
921 lazy_replace( iff, le ); // fix 'get_ctrl'
922
923 Node *sfpt2 = le->in(0);
924
925 Node* entry_control = init_control;
926 bool strip_mine_loop = LoopStripMiningIter > 1 && loop->_child == NULL &&
927 sfpt2->Opcode() == Op_SafePoint && !loop->_has_call;
928 IdealLoopTree* outer_ilt = NULL;
929 if (strip_mine_loop) {
930 outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop,
931 cl_prob, le->_fcnt, entry_control,
932 iffalse);
933 }
934
935 // Now setup a new CountedLoopNode to replace the existing LoopNode
936 CountedLoopNode *l = new CountedLoopNode(entry_control, back_control);
937 l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
938 // The following assert is approximately true, and defines the intention
939 // of can_be_counted_loop. It fails, however, because phase->type
940 // is not yet initialized for this loop and its parts.
941 //assert(l->can_be_counted_loop(this), "sanity");
942 _igvn.register_new_node_with_optimizer(l);
943 set_loop(l, loop);
944 loop->_head = l;
945 // Fix all data nodes placed at the old loop head.
946 // Uses the lazy-update mechanism of 'get_ctrl'.
947 lazy_replace( x, l );
948 set_idom(l, entry_control, dom_depth(entry_control) + 1);
949
950 if (LoopStripMiningIter == 0 || strip_mine_loop) {
951 // Check for immediately preceding SafePoint and remove
952 if (sfpt2->Opcode() == Op_SafePoint && (LoopStripMiningIter != 0 || is_deleteable_safept(sfpt2))) {
953 if (strip_mine_loop) {
954 Node* outer_le = outer_ilt->_tail->in(0);
955 Node* sfpt = sfpt2->clone();
956 sfpt->set_req(0, iffalse);
957 outer_le->set_req(0, sfpt);
958 // When this code runs, loop bodies have not yet been populated.
959 const bool body_populated = false;
960 register_control(sfpt, outer_ilt, iffalse, body_populated);
961 set_idom(outer_le, sfpt, dom_depth(sfpt));
962 }
963 lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
964 if (loop->_safepts != NULL) {
965 loop->_safepts->yank(sfpt2);
966 }
967 }
968 }
969
970 // Free up intermediate goo
971 _igvn.remove_dead_node(hook);
972
973 #ifdef ASSERT
974 assert(l->is_valid_counted_loop(), "counted loop shape is messed up");
975 assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" );
976 #endif
977 #ifndef PRODUCT
978 if (TraceLoopOpts) {
979 tty->print("Counted ");
980 loop->dump_head();
981 }
982 #endif
983
984 C->print_method(PHASE_AFTER_CLOOPS, 3);
985
986 // Capture bounds of the loop in the induction variable Phi before
987 // subsequent transformation (iteration splitting) obscures the
988 // bounds
989 l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn));
990
991 if (strip_mine_loop) {
992 l->mark_strip_mined();
993 l->verify_strip_mined(1);
994 outer_ilt->_head->as_Loop()->verify_strip_mined(1);
995 loop = outer_ilt;
996 }
997
998 return true;
999 }
1000
1001 //----------------------exact_limit-------------------------------------------
1002 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
1003 assert(loop->_head->is_CountedLoop(), "");
1004 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1005 assert(cl->is_valid_counted_loop(), "");
1006
1007 if (ABS(cl->stride_con()) == 1 ||
1008 cl->limit()->Opcode() == Op_LoopLimit) {
1009 // Old code has exact limit (it could be incorrect in case of int overflow).
1010 // Loop limit is exact with stride == 1. And loop may already have exact limit.
1011 return cl->limit();
1012 }
1013 Node *limit = NULL;
1014 #ifdef ASSERT
1015 BoolTest::mask bt = cl->loopexit()->test_trip();
1016 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
1017 #endif
1018 if (cl->has_exact_trip_count()) {
1019 // Simple case: loop has constant boundaries.
1020 // Use jlongs to avoid integer overflow.
1021 int stride_con = cl->stride_con();
1022 jlong init_con = cl->init_trip()->get_int();
1023 jlong limit_con = cl->limit()->get_int();
1024 julong trip_cnt = cl->trip_count();
1025 jlong final_con = init_con + trip_cnt*stride_con;
1026 int final_int = (int)final_con;
1027 // The final value should be in integer range since the loop
1028 // is counted and the limit was checked for overflow.
1029 assert(final_con == (jlong)final_int, "final value should be integer");
1030 limit = _igvn.intcon(final_int);
1031 } else {
1032 // Create new LoopLimit node to get exact limit (final iv value).
1033 limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
1034 register_new_node(limit, cl->in(LoopNode::EntryControl));
1035 }
1036 assert(limit != NULL, "sanity");
1037 return limit;
1038 }
1039
1040 //------------------------------Ideal------------------------------------------
1041 // Return a node which is more "ideal" than the current node.
1042 // Attempt to convert into a counted-loop.
1043 Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1044 if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) {
1045 phase->C->set_major_progress();
1046 }
1047 return RegionNode::Ideal(phase, can_reshape);
1048 }
1049
1050 #ifdef ASSERT
1051 void LoopNode::verify_strip_mined(int expect_skeleton) const {
1052 const OuterStripMinedLoopNode* outer = NULL;
1053 const CountedLoopNode* inner = NULL;
1054 if (is_strip_mined()) {
1055 if (!is_valid_counted_loop()) {
1056 return; // Skip malformed counted loop
1057 }
1058 assert(is_CountedLoop(), "no Loop should be marked strip mined");
1059 inner = as_CountedLoop();
1060 outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop();
1061 } else if (is_OuterStripMinedLoop()) {
1062 outer = this->as_OuterStripMinedLoop();
1063 inner = outer->unique_ctrl_out()->as_CountedLoop();
1064 assert(inner->is_valid_counted_loop() && inner->is_strip_mined(), "OuterStripMinedLoop should have been removed");
1065 assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined");
1066 }
1067 if (inner != NULL || outer != NULL) {
1068 assert(inner != NULL && outer != NULL, "missing loop in strip mined nest");
1069 Node* outer_tail = outer->in(LoopNode::LoopBackControl);
1070 Node* outer_le = outer_tail->in(0);
1071 assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If");
1072 Node* sfpt = outer_le->in(0);
1073 assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?");
1074 Node* inner_out = sfpt->in(0);
1075 if (inner_out->outcnt() != 1) {
1076 ResourceMark rm;
1077 Unique_Node_List wq;
1078
1079 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
1080 Node* u = inner_out->fast_out(i);
1081 if (u == sfpt) {
1082 continue;
1083 }
1084 wq.clear();
1085 wq.push(u);
1086 bool found_sfpt = false;
1087 for (uint next = 0; next < wq.size() && !found_sfpt; next++) {
1088 Node* n = wq.at(next);
1089 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) {
1090 Node* u = n->fast_out(i);
1091 if (u == sfpt) {
1092 found_sfpt = true;
1093 }
1094 if (!u->is_CFG()) {
1095 wq.push(u);
1096 }
1097 }
1098 }
1099 assert(found_sfpt, "no node in loop that's not input to safepoint");
1100 }
1101 }
1102
1103 CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
1104 assert(cle == inner->loopexit_or_null(), "mismatch");
1105 bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
1106 if (has_skeleton) {
1107 assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node");
1108 assert(outer->outcnt() == 2, "only phis");
1109 } else {
1110 assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?");
1111 uint phis = 0;
1112 for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) {
1113 Node* u = inner->fast_out(i);
1114 if (u->is_Phi()) {
1115 phis++;
1116 }
1117 }
1118 for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) {
1119 Node* u = outer->fast_out(i);
1120 assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop");
1121 }
1122 uint stores = 0;
1123 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
1124 Node* u = inner_out->fast_out(i);
1125 if (u->is_Store()) {
1126 stores++;
1127 }
1128 }
1129 assert(outer->outcnt() >= phis + 2 && outer->outcnt() <= phis + 2 + stores + 1, "only phis");
1130 }
1131 assert(sfpt->outcnt() == 1, "no data node");
1132 assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node");
1133 }
1134 }
1135 #endif
1136
1137 //=============================================================================
1138 //------------------------------Ideal------------------------------------------
1139 // Return a node which is more "ideal" than the current node.
1140 // Attempt to convert into a counted-loop.
1141 Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1142 return RegionNode::Ideal(phase, can_reshape);
1143 }
1144
1145 //------------------------------dump_spec--------------------------------------
1146 // Dump special per-node info
1147 #ifndef PRODUCT
1148 void CountedLoopNode::dump_spec(outputStream *st) const {
1149 LoopNode::dump_spec(st);
1150 if (stride_is_con()) {
1151 st->print("stride: %d ",stride_con());
1152 }
1153 if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
1154 if (is_main_loop()) st->print("main of N%d", _idx);
1155 if (is_post_loop()) st->print("post of N%d", _main_idx);
1156 if (is_strip_mined()) st->print(" strip mined");
1157 }
1158 #endif
1159
1160 //=============================================================================
1161 int CountedLoopEndNode::stride_con() const {
1162 return stride()->bottom_type()->is_int()->get_con();
1163 }
1164
1165 //=============================================================================
1166 //------------------------------Value-----------------------------------------
1167 const Type* LoopLimitNode::Value(PhaseGVN* phase) const {
1168 const Type* init_t = phase->type(in(Init));
1169 const Type* limit_t = phase->type(in(Limit));
1170 const Type* stride_t = phase->type(in(Stride));
1171 // Either input is TOP ==> the result is TOP
1172 if (init_t == Type::TOP) return Type::TOP;
1173 if (limit_t == Type::TOP) return Type::TOP;
1174 if (stride_t == Type::TOP) return Type::TOP;
1175
1176 int stride_con = stride_t->is_int()->get_con();
1177 if (stride_con == 1)
1178 return NULL; // Identity
1179
1180 if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
1181 // Use jlongs to avoid integer overflow.
1182 jlong init_con = init_t->is_int()->get_con();
1183 jlong limit_con = limit_t->is_int()->get_con();
1184 int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
1185 jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
1186 jlong final_con = init_con + stride_con*trip_count;
1187 int final_int = (int)final_con;
1188 // The final value should be in integer range since the loop
1189 // is counted and the limit was checked for overflow.
1190 assert(final_con == (jlong)final_int, "final value should be integer");
1191 return TypeInt::make(final_int);
1192 }
1193
1194 return bottom_type(); // TypeInt::INT
1195 }
1196
1197 //------------------------------Ideal------------------------------------------
1198 // Return a node which is more "ideal" than the current node.
1199 Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1200 if (phase->type(in(Init)) == Type::TOP ||
1201 phase->type(in(Limit)) == Type::TOP ||
1202 phase->type(in(Stride)) == Type::TOP)
1203 return NULL; // Dead
1204
1205 int stride_con = phase->type(in(Stride))->is_int()->get_con();
1206 if (stride_con == 1)
1207 return NULL; // Identity
1208
1209 if (in(Init)->is_Con() && in(Limit)->is_Con())
1210 return NULL; // Value
1211
1212 // Delay following optimizations until all loop optimizations
1213 // done to keep Ideal graph simple.
1214 if (!can_reshape || phase->C->major_progress())
1215 return NULL;
1216
1217 const TypeInt* init_t = phase->type(in(Init) )->is_int();
1218 const TypeInt* limit_t = phase->type(in(Limit))->is_int();
1219 int stride_p;
1220 jlong lim, ini;
1221 julong max;
1222 if (stride_con > 0) {
1223 stride_p = stride_con;
1224 lim = limit_t->_hi;
1225 ini = init_t->_lo;
1226 max = (julong)max_jint;
1227 } else {
1228 stride_p = -stride_con;
1229 lim = init_t->_hi;
1230 ini = limit_t->_lo;
1231 max = (julong)min_jint;
1232 }
1233 julong range = lim - ini + stride_p;
1234 if (range <= max) {
1235 // Convert to integer expression if it is not overflow.
1236 Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
1237 Node *range = phase->transform(new SubINode(in(Limit), in(Init)));
1238 Node *bias = phase->transform(new AddINode(range, stride_m));
1239 Node *trip = phase->transform(new DivINode(0, bias, in(Stride)));
1240 Node *span = phase->transform(new MulINode(trip, in(Stride)));
1241 return new AddINode(span, in(Init)); // exact limit
1242 }
1243
1244 if (is_power_of_2(stride_p) || // divisor is 2^n
1245 !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
1246 // Convert to long expression to avoid integer overflow
1247 // and let igvn optimizer convert this division.
1248 //
1249 Node* init = phase->transform( new ConvI2LNode(in(Init)));
1250 Node* limit = phase->transform( new ConvI2LNode(in(Limit)));
1251 Node* stride = phase->longcon(stride_con);
1252 Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));
1253
1254 Node *range = phase->transform(new SubLNode(limit, init));
1255 Node *bias = phase->transform(new AddLNode(range, stride_m));
1256 Node *span;
1257 if (stride_con > 0 && is_power_of_2(stride_p)) {
1258 // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
1259 // and avoid generating rounding for division. Zero trip guard should
1260 // guarantee that init < limit but sometimes the guard is missing and
1261 // we can get situation when init > limit. Note, for the empty loop
1262 // optimization zero trip guard is generated explicitly which leaves
1263 // only RCE predicate where exact limit is used and the predicate
1264 // will simply fail forcing recompilation.
1265 Node* neg_stride = phase->longcon(-stride_con);
1266 span = phase->transform(new AndLNode(bias, neg_stride));
1267 } else {
1268 Node *trip = phase->transform(new DivLNode(0, bias, stride));
1269 span = phase->transform(new MulLNode(trip, stride));
1270 }
1271 // Convert back to int
1272 Node *span_int = phase->transform(new ConvL2INode(span));
1273 return new AddINode(span_int, in(Init)); // exact limit
1274 }
1275
1276 return NULL; // No progress
1277 }
1278
1279 //------------------------------Identity---------------------------------------
1280 // If stride == 1 return limit node.
1281 Node* LoopLimitNode::Identity(PhaseGVN* phase) {
1282 int stride_con = phase->type(in(Stride))->is_int()->get_con();
1283 if (stride_con == 1 || stride_con == -1)
1284 return in(Limit);
1285 return this;
1286 }
1287
1288 //=============================================================================
1289 //----------------------match_incr_with_optional_truncation--------------------
1290 // Match increment with optional truncation:
1291 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
1292 // Return NULL for failure. Success returns the increment node.
1293 Node* CountedLoopNode::match_incr_with_optional_truncation(
1294 Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) {
1295 // Quick cutouts:
1296 if (expr == NULL || expr->req() != 3) return NULL;
1297
1298 Node *t1 = NULL;
1299 Node *t2 = NULL;
1300 const TypeInt* trunc_t = TypeInt::INT;
1301 Node* n1 = expr;
1302 int n1op = n1->Opcode();
1303
1304 // Try to strip (n1 & M) or (n1 << N >> N) from n1.
1305 if (n1op == Op_AndI &&
1306 n1->in(2)->is_Con() &&
1307 n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) {
1308 // %%% This check should match any mask of 2**K-1.
1309 t1 = n1;
1310 n1 = t1->in(1);
1311 n1op = n1->Opcode();
1312 trunc_t = TypeInt::CHAR;
1313 } else if (n1op == Op_RShiftI &&
1314 n1->in(1) != NULL &&
1315 n1->in(1)->Opcode() == Op_LShiftI &&
1316 n1->in(2) == n1->in(1)->in(2) &&
1317 n1->in(2)->is_Con()) {
1318 jint shift = n1->in(2)->bottom_type()->is_int()->get_con();
1319 // %%% This check should match any shift in [1..31].
1320 if (shift == 16 || shift == 8) {
1321 t1 = n1;
1322 t2 = t1->in(1);
1323 n1 = t2->in(1);
1324 n1op = n1->Opcode();
1325 if (shift == 16) {
1326 trunc_t = TypeInt::SHORT;
1327 } else if (shift == 8) {
1328 trunc_t = TypeInt::BYTE;
1329 }
1330 }
1331 }
1332
1333 // If (maybe after stripping) it is an AddI, we won:
1334 if (n1op == Op_AddI) {
1335 *trunc1 = t1;
1336 *trunc2 = t2;
1337 *trunc_type = trunc_t;
1338 return n1;
1339 }
1340
1341 // failed
1342 return NULL;
1343 }
1344
1345 LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) {
1346 if (is_strip_mined() && is_valid_counted_loop()) {
1347 verify_strip_mined(expect_skeleton);
1348 return in(EntryControl)->as_Loop();
1349 }
1350 return this;
1351 }
1352
1353 OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const {
1354 assert(is_strip_mined(), "not a strip mined loop");
1355 Node* c = in(EntryControl);
1356 if (c == NULL || c->is_top() || !c->is_OuterStripMinedLoop()) {
1357 return NULL;
1358 }
1359 return c->as_OuterStripMinedLoop();
1360 }
1361
1362 IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const {
1363 Node* c = in(LoopBackControl);
1364 if (c == NULL || c->is_top()) {
1365 return NULL;
1366 }
1367 return c->as_IfTrue();
1368 }
1369
1370 IfTrueNode* CountedLoopNode::outer_loop_tail() const {
1371 LoopNode* l = outer_loop();
1372 if (l == NULL) {
1373 return NULL;
1374 }
1375 return l->outer_loop_tail();
1376 }
1377
1378 OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const {
1379 IfTrueNode* proj = outer_loop_tail();
1380 if (proj == NULL) {
1381 return NULL;
1382 }
1383 Node* c = proj->in(0);
1384 if (c == NULL || c->is_top() || c->outcnt() != 2) {
1385 return NULL;
1386 }
1387 return c->as_OuterStripMinedLoopEnd();
1388 }
1389
1390 OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const {
1391 LoopNode* l = outer_loop();
1392 if (l == NULL) {
1393 return NULL;
1394 }
1395 return l->outer_loop_end();
1396 }
1397
1398 IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const {
1399 IfNode* le = outer_loop_end();
1400 if (le == NULL) {
1401 return NULL;
1402 }
1403 Node* c = le->proj_out_or_null(false);
1404 if (c == NULL) {
1405 return NULL;
1406 }
1407 return c->as_IfFalse();
1408 }
1409
1410 IfFalseNode* CountedLoopNode::outer_loop_exit() const {
1411 LoopNode* l = outer_loop();
1412 if (l == NULL) {
1413 return NULL;
1414 }
1415 return l->outer_loop_exit();
1416 }
1417
1418 SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const {
1419 IfNode* le = outer_loop_end();
1420 if (le == NULL) {
1421 return NULL;
1422 }
1423 Node* c = le->in(0);
1424 if (c == NULL || c->is_top()) {
1425 return NULL;
1426 }
1427 assert(c->Opcode() == Op_SafePoint, "broken outer loop");
1428 return c->as_SafePoint();
1429 }
1430
1431 SafePointNode* CountedLoopNode::outer_safepoint() const {
1432 LoopNode* l = outer_loop();
1433 if (l == NULL) {
1434 return NULL;
1435 }
1436 return l->outer_safepoint();
1437 }
1438
1439 Node* CountedLoopNode::skip_predicates_from_entry(Node* ctrl) {
1440 while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If() &&
1441 ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->outcnt() == 1 &&
1442 ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->unique_out()->Opcode() == Op_Halt) {
1443 ctrl = ctrl->in(0)->in(0);
1444 }
1445
1446 return ctrl;
1447 }
1448
1449 Node* CountedLoopNode::skip_predicates() {
1450 if (is_main_loop()) {
1451 Node* ctrl = skip_strip_mined()->in(LoopNode::EntryControl);
1452
1453 return skip_predicates_from_entry(ctrl);
1454 }
1455 return in(LoopNode::EntryControl);
1456 }
1457
1458 void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
1459 // Look for the outer & inner strip mined loop, reduce number of
1460 // iterations of the inner loop, set exit condition of outer loop,
1461 // construct required phi nodes for outer loop.
1462 CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
1463 assert(inner_cl->is_strip_mined(), "inner loop should be strip mined");
1464 Node* inner_iv_phi = inner_cl->phi();
1465 if (inner_iv_phi == NULL) {
1466 IfNode* outer_le = outer_loop_end();
1467 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
1468 igvn->replace_node(outer_le, iff);
1469 inner_cl->clear_strip_mined();
1470 return;
1471 }
1472 CountedLoopEndNode* inner_cle = inner_cl->loopexit();
1473
1474 int stride = inner_cl->stride_con();
1475 jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS(stride);
1476 int scaled_iters = (int)scaled_iters_long;
1477 int short_scaled_iters = LoopStripMiningIterShortLoop* ABS(stride);
1478 const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int();
1479 jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo;
1480 assert(iter_estimate > 0, "broken");
1481 if ((jlong)scaled_iters != scaled_iters_long || iter_estimate <= short_scaled_iters) {
1482 // Remove outer loop and safepoint (too few iterations)
1483 Node* outer_sfpt = outer_safepoint();
1484 Node* outer_out = outer_loop_exit();
1485 igvn->replace_node(outer_out, outer_sfpt->in(0));
1486 igvn->replace_input_of(outer_sfpt, 0, igvn->C->top());
1487 inner_cl->clear_strip_mined();
1488 return;
1489 }
1490 if (iter_estimate <= scaled_iters_long) {
1491 // We would only go through one iteration of
1492 // the outer loop: drop the outer loop but
1493 // keep the safepoint so we don't run for
1494 // too long without a safepoint
1495 IfNode* outer_le = outer_loop_end();
1496 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
1497 igvn->replace_node(outer_le, iff);
1498 inner_cl->clear_strip_mined();
1499 return;
1500 }
1501
1502 Node* cle_tail = inner_cle->proj_out(true);
1503 ResourceMark rm;
1504 Node_List old_new;
1505 if (cle_tail->outcnt() > 1) {
1506 // Look for nodes on backedge of inner loop and clone them
1507 Unique_Node_List backedge_nodes;
1508 for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) {
1509 Node* u = cle_tail->fast_out(i);
1510 if (u != inner_cl) {
1511 assert(!u->is_CFG(), "control flow on the backedge?");
1512 backedge_nodes.push(u);
1513 }
1514 }
1515 uint last = igvn->C->unique();
1516 for (uint next = 0; next < backedge_nodes.size(); next++) {
1517 Node* n = backedge_nodes.at(next);
1518 old_new.map(n->_idx, n->clone());
1519 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1520 Node* u = n->fast_out(i);
1521 assert(!u->is_CFG(), "broken");
1522 if (u->_idx >= last) {
1523 continue;
1524 }
1525 if (!u->is_Phi()) {
1526 backedge_nodes.push(u);
1527 } else {
1528 assert(u->in(0) == inner_cl, "strange phi on the backedge");
1529 }
1530 }
1531 }
1532 // Put the clones on the outer loop backedge
1533 Node* le_tail = outer_loop_tail();
1534 for (uint next = 0; next < backedge_nodes.size(); next++) {
1535 Node *n = old_new[backedge_nodes.at(next)->_idx];
1536 for (uint i = 1; i < n->req(); i++) {
1537 if (n->in(i) != NULL && old_new[n->in(i)->_idx] != NULL) {
1538 n->set_req(i, old_new[n->in(i)->_idx]);
1539 }
1540 }
1541 if (n->in(0) != NULL && n->in(0) == cle_tail) {
1542 n->set_req(0, le_tail);
1543 }
1544 igvn->register_new_node_with_optimizer(n);
1545 }
1546 }
1547
1548 Node* iv_phi = NULL;
1549 // Make a clone of each phi in the inner loop
1550 // for the outer loop
1551 for (uint i = 0; i < inner_cl->outcnt(); i++) {
1552 Node* u = inner_cl->raw_out(i);
1553 if (u->is_Phi()) {
1554 assert(u->in(0) == inner_cl, "inconsistent");
1555 Node* phi = u->clone();
1556 phi->set_req(0, this);
1557 Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx];
1558 if (be != NULL) {
1559 phi->set_req(LoopNode::LoopBackControl, be);
1560 }
1561 phi = igvn->transform(phi);
1562 igvn->replace_input_of(u, LoopNode::EntryControl, phi);
1563 if (u == inner_iv_phi) {
1564 iv_phi = phi;
1565 }
1566 }
1567 }
1568 Node* cle_out = inner_cle->proj_out(false);
1569 if (cle_out->outcnt() > 1) {
1570 // Look for chains of stores that were sunk
1571 // out of the inner loop and are in the outer loop
1572 for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) {
1573 Node* u = cle_out->fast_out(i);
1574 if (u->is_Store()) {
1575 Node* first = u;
1576 for(;;) {
1577 Node* next = first->in(MemNode::Memory);
1578 if (!next->is_Store() || next->in(0) != cle_out) {
1579 break;
1580 }
1581 first = next;
1582 }
1583 Node* last = u;
1584 for(;;) {
1585 Node* next = NULL;
1586 for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) {
1587 Node* uu = last->fast_out(j);
1588 if (uu->is_Store() && uu->in(0) == cle_out) {
1589 assert(next == NULL, "only one in the outer loop");
1590 next = uu;
1591 }
1592 }
1593 if (next == NULL) {
1594 break;
1595 }
1596 last = next;
1597 }
1598 Node* phi = NULL;
1599 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1600 Node* uu = fast_out(j);
1601 if (uu->is_Phi()) {
1602 Node* be = uu->in(LoopNode::LoopBackControl);
1603 if (be->is_Store() && old_new[be->_idx] != NULL) {
1604 assert(false, "store on the backedge + sunk stores: unsupported");
1605 // drop outer loop
1606 IfNode* outer_le = outer_loop_end();
1607 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
1608 igvn->replace_node(outer_le, iff);
1609 inner_cl->clear_strip_mined();
1610 return;
1611 }
1612 if (be == last || be == first->in(MemNode::Memory)) {
1613 assert(phi == NULL, "only one phi");
1614 phi = uu;
1615 }
1616 }
1617 }
1618 #ifdef ASSERT
1619 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1620 Node* uu = fast_out(j);
1621 if (uu->is_Phi() && uu->bottom_type() == Type::MEMORY) {
1622 if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) {
1623 assert(phi == uu, "what's that phi?");
1624 } else if (uu->adr_type() == TypePtr::BOTTOM) {
1625 Node* n = uu->in(LoopNode::LoopBackControl);
1626 uint limit = igvn->C->live_nodes();
1627 uint i = 0;
1628 while (n != uu) {
1629 i++;
1630 assert(i < limit, "infinite loop");
1631 if (n->is_Proj()) {
1632 n = n->in(0);
1633 } else if (n->is_SafePoint() || n->is_MemBar()) {
1634 n = n->in(TypeFunc::Memory);
1635 } else if (n->is_Phi()) {
1636 n = n->in(1);
1637 } else if (n->is_MergeMem()) {
1638 n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type()));
1639 } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) {
1640 n = n->in(MemNode::Memory);
1641 } else {
1642 n->dump();
1643 ShouldNotReachHere();
1644 }
1645 }
1646 }
1647 }
1648 }
1649 #endif
1650 if (phi == NULL) {
1651 // If the an entire chains was sunk, the
1652 // inner loop has no phi for that memory
1653 // slice, create one for the outer loop
1654 phi = PhiNode::make(this, first->in(MemNode::Memory), Type::MEMORY,
1655 igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type())));
1656 phi->set_req(LoopNode::LoopBackControl, last);
1657 phi = igvn->transform(phi);
1658 igvn->replace_input_of(first, MemNode::Memory, phi);
1659 } else {
1660 // Or fix the outer loop fix to include
1661 // that chain of stores.
1662 Node* be = phi->in(LoopNode::LoopBackControl);
1663 assert(!(be->is_Store() && old_new[be->_idx] != NULL), "store on the backedge + sunk stores: unsupported");
1664 if (be == first->in(MemNode::Memory)) {
1665 if (be == phi->in(LoopNode::LoopBackControl)) {
1666 igvn->replace_input_of(phi, LoopNode::LoopBackControl, last);
1667 } else {
1668 igvn->replace_input_of(be, MemNode::Memory, last);
1669 }
1670 } else {
1671 #ifdef ASSERT
1672 if (be == phi->in(LoopNode::LoopBackControl)) {
1673 assert(phi->in(LoopNode::LoopBackControl) == last, "");
1674 } else {
1675 assert(be->in(MemNode::Memory) == last, "");
1676 }
1677 #endif
1678 }
1679 }
1680 }
1681 }
1682 }
1683
1684 if (iv_phi != NULL) {
1685 // Now adjust the inner loop's exit condition
1686 Node* limit = inner_cl->limit();
1687 // If limit < init for stride > 0 (or limit > init for stride 0),
1688 // the loop body is run only once. Given limit - init (init - limit resp.)
1689 // would be negative, the unsigned comparison below would cause
1690 // the loop body to be run for LoopStripMiningIter.
1691 Node* max = NULL;
1692 if (stride > 0) {
1693 max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
1694 } else {
1695 max = MaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
1696 }
1697 // sub is positive and can be larger than the max signed int
1698 // value. Use an unsigned min.
1699 Node* const_iters = igvn->intcon(scaled_iters);
1700 Node* min = MaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
1701 // min is the number of iterations for the next inner loop execution:
1702 // unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0
1703 // unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0
1704
1705 Node* new_limit = NULL;
1706 if (stride > 0) {
1707 new_limit = igvn->transform(new AddINode(min, iv_phi));
1708 } else {
1709 new_limit = igvn->transform(new SubINode(iv_phi, min));
1710 }
1711 Node* inner_cmp = inner_cle->cmp_node();
1712 Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue);
1713 Node* outer_bol = inner_bol;
1714 // cmp node for inner loop may be shared
1715 inner_cmp = inner_cmp->clone();
1716 inner_cmp->set_req(2, new_limit);
1717 inner_bol = inner_bol->clone();
1718 inner_bol->set_req(1, igvn->transform(inner_cmp));
1719 igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol));
1720 // Set the outer loop's exit condition too
1721 igvn->replace_input_of(outer_loop_end(), 1, outer_bol);
1722 } else {
1723 assert(false, "should be able to adjust outer loop");
1724 IfNode* outer_le = outer_loop_end();
1725 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
1726 igvn->replace_node(outer_le, iff);
1727 inner_cl->clear_strip_mined();
1728 }
1729 }
1730
1731 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
1732 if (!in(0)) return Type::TOP;
1733 if (phase->type(in(0)) == Type::TOP)
1734 return Type::TOP;
1735
1736 return TypeTuple::IFBOTH;
1737 }
1738
1739 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1740 if (remove_dead_region(phase, can_reshape)) return this;
1741
1742 return NULL;
1743 }
1744
1745 //------------------------------filtered_type--------------------------------
1746 // Return a type based on condition control flow
1747 // A successful return will be a type that is restricted due
1748 // to a series of dominating if-tests, such as:
1749 // if (i < 10) {
1750 // if (i > 0) {
1751 // here: "i" type is [1..10)
1752 // }
1753 // }
1754 // or a control flow merge
1755 // if (i < 10) {
1756 // do {
1757 // phi( , ) -- at top of loop type is [min_int..10)
1758 // i = ?
1759 // } while ( i < 10)
1760 //
1761 const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) {
1762 assert(n && n->bottom_type()->is_int(), "must be int");
1763 const TypeInt* filtered_t = NULL;
1764 if (!n->is_Phi()) {
1765 assert(n_ctrl != NULL || n_ctrl == C->top(), "valid control");
1766 filtered_t = filtered_type_from_dominators(n, n_ctrl);
1767
1768 } else {
1769 Node* phi = n->as_Phi();
1770 Node* region = phi->in(0);
1771 assert(n_ctrl == NULL || n_ctrl == region, "ctrl parameter must be region");
1772 if (region && region != C->top()) {
1773 for (uint i = 1; i < phi->req(); i++) {
1774 Node* val = phi->in(i);
1775 Node* use_c = region->in(i);
1776 const TypeInt* val_t = filtered_type_from_dominators(val, use_c);
1777 if (val_t != NULL) {
1778 if (filtered_t == NULL) {
1779 filtered_t = val_t;
1780 } else {
1781 filtered_t = filtered_t->meet(val_t)->is_int();
1782 }
1783 }
1784 }
1785 }
1786 }
1787 const TypeInt* n_t = _igvn.type(n)->is_int();
1788 if (filtered_t != NULL) {
1789 n_t = n_t->join(filtered_t)->is_int();
1790 }
1791 return n_t;
1792 }
1793
1794
1795 //------------------------------filtered_type_from_dominators--------------------------------
1796 // Return a possibly more restrictive type for val based on condition control flow of dominators
1797 const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) {
1798 if (val->is_Con()) {
1799 return val->bottom_type()->is_int();
1800 }
1801 uint if_limit = 10; // Max number of dominating if's visited
1802 const TypeInt* rtn_t = NULL;
1803
1804 if (use_ctrl && use_ctrl != C->top()) {
1805 Node* val_ctrl = get_ctrl(val);
1806 uint val_dom_depth = dom_depth(val_ctrl);
1807 Node* pred = use_ctrl;
1808 uint if_cnt = 0;
1809 while (if_cnt < if_limit) {
1810 if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
1811 if_cnt++;
1812 const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred);
1813 if (if_t != NULL) {
1814 if (rtn_t == NULL) {
1815 rtn_t = if_t;
1816 } else {
1817 rtn_t = rtn_t->join(if_t)->is_int();
1818 }
1819 }
1820 }
1821 pred = idom(pred);
1822 if (pred == NULL || pred == C->top()) {
1823 break;
1824 }
1825 // Stop if going beyond definition block of val
1826 if (dom_depth(pred) < val_dom_depth) {
1827 break;
1828 }
1829 }
1830 }
1831 return rtn_t;
1832 }
1833
1834
1835 //------------------------------dump_spec--------------------------------------
1836 // Dump special per-node info
1837 #ifndef PRODUCT
1838 void CountedLoopEndNode::dump_spec(outputStream *st) const {
1839 if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) {
1840 BoolTest bt( test_trip()); // Added this for g++.
1841
1842 st->print("[");
1843 bt.dump_on(st);
1844 st->print("]");
1845 }
1846 st->print(" ");
1847 IfNode::dump_spec(st);
1848 }
1849 #endif
1850
1851 //=============================================================================
1852 //------------------------------is_member--------------------------------------
1853 // Is 'l' a member of 'this'?
1854 bool IdealLoopTree::is_member(const IdealLoopTree *l) const {
1855 while( l->_nest > _nest ) l = l->_parent;
1856 return l == this;
1857 }
1858
1859 //------------------------------set_nest---------------------------------------
1860 // Set loop tree nesting depth. Accumulate _has_call bits.
1861 int IdealLoopTree::set_nest( uint depth ) {
1862 _nest = depth;
1863 int bits = _has_call;
1864 if( _child ) bits |= _child->set_nest(depth+1);
1865 if( bits ) _has_call = 1;
1866 if( _next ) bits |= _next ->set_nest(depth );
1867 return bits;
1868 }
1869
1870 //------------------------------split_fall_in----------------------------------
1871 // Split out multiple fall-in edges from the loop header. Move them to a
1872 // private RegionNode before the loop. This becomes the loop landing pad.
1873 void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
1874 PhaseIterGVN &igvn = phase->_igvn;
1875 uint i;
1876
1877 // Make a new RegionNode to be the landing pad.
1878 Node *landing_pad = new RegionNode( fall_in_cnt+1 );
1879 phase->set_loop(landing_pad,_parent);
1880 // Gather all the fall-in control paths into the landing pad
1881 uint icnt = fall_in_cnt;
1882 uint oreq = _head->req();
1883 for( i = oreq-1; i>0; i-- )
1884 if( !phase->is_member( this, _head->in(i) ) )
1885 landing_pad->set_req(icnt--,_head->in(i));
1886
1887 // Peel off PhiNode edges as well
1888 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
1889 Node *oj = _head->fast_out(j);
1890 if( oj->is_Phi() ) {
1891 PhiNode* old_phi = oj->as_Phi();
1892 assert( old_phi->region() == _head, "" );
1893 igvn.hash_delete(old_phi); // Yank from hash before hacking edges
1894 Node *p = PhiNode::make_blank(landing_pad, old_phi);
1895 uint icnt = fall_in_cnt;
1896 for( i = oreq-1; i>0; i-- ) {
1897 if( !phase->is_member( this, _head->in(i) ) ) {
1898 p->init_req(icnt--, old_phi->in(i));
1899 // Go ahead and clean out old edges from old phi
1900 old_phi->del_req(i);
1901 }
1902 }
1903 // Search for CSE's here, because ZKM.jar does a lot of
1904 // loop hackery and we need to be a little incremental
1905 // with the CSE to avoid O(N^2) node blow-up.
1906 Node *p2 = igvn.hash_find_insert(p); // Look for a CSE
1907 if( p2 ) { // Found CSE
1908 p->destruct(); // Recover useless new node
1909 p = p2; // Use old node
1910 } else {
1911 igvn.register_new_node_with_optimizer(p, old_phi);
1912 }
1913 // Make old Phi refer to new Phi.
1914 old_phi->add_req(p);
1915 // Check for the special case of making the old phi useless and
1916 // disappear it. In JavaGrande I have a case where this useless
1917 // Phi is the loop limit and prevents recognizing a CountedLoop
1918 // which in turn prevents removing an empty loop.
1919 Node *id_old_phi = old_phi->Identity(&igvn);
1920 if( id_old_phi != old_phi ) { // Found a simple identity?
1921 // Note that I cannot call 'replace_node' here, because
1922 // that will yank the edge from old_phi to the Region and
1923 // I'm mid-iteration over the Region's uses.
1924 for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
1925 Node* use = old_phi->last_out(i);
1926 igvn.rehash_node_delayed(use);
1927 uint uses_found = 0;
1928 for (uint j = 0; j < use->len(); j++) {
1929 if (use->in(j) == old_phi) {
1930 if (j < use->req()) use->set_req (j, id_old_phi);
1931 else use->set_prec(j, id_old_phi);
1932 uses_found++;
1933 }
1934 }
1935 i -= uses_found; // we deleted 1 or more copies of this edge
1936 }
1937 }
1938 igvn._worklist.push(old_phi);
1939 }
1940 }
1941 // Finally clean out the fall-in edges from the RegionNode
1942 for( i = oreq-1; i>0; i-- ) {
1943 if( !phase->is_member( this, _head->in(i) ) ) {
1944 _head->del_req(i);
1945 }
1946 }
1947 igvn.rehash_node_delayed(_head);
1948 // Transform landing pad
1949 igvn.register_new_node_with_optimizer(landing_pad, _head);
1950 // Insert landing pad into the header
1951 _head->add_req(landing_pad);
1952 }
1953
1954 //------------------------------split_outer_loop-------------------------------
1955 // Split out the outermost loop from this shared header.
1956 void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
1957 PhaseIterGVN &igvn = phase->_igvn;
1958
1959 // Find index of outermost loop; it should also be my tail.
1960 uint outer_idx = 1;
1961 while( _head->in(outer_idx) != _tail ) outer_idx++;
1962
1963 // Make a LoopNode for the outermost loop.
1964 Node *ctl = _head->in(LoopNode::EntryControl);
1965 Node *outer = new LoopNode( ctl, _head->in(outer_idx) );
1966 outer = igvn.register_new_node_with_optimizer(outer, _head);
1967 phase->set_created_loop_node();
1968
1969 // Outermost loop falls into '_head' loop
1970 _head->set_req(LoopNode::EntryControl, outer);
1971 _head->del_req(outer_idx);
1972 // Split all the Phis up between '_head' loop and 'outer' loop.
1973 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
1974 Node *out = _head->fast_out(j);
1975 if( out->is_Phi() ) {
1976 PhiNode *old_phi = out->as_Phi();
1977 assert( old_phi->region() == _head, "" );
1978 Node *phi = PhiNode::make_blank(outer, old_phi);
1979 phi->init_req(LoopNode::EntryControl, old_phi->in(LoopNode::EntryControl));
1980 phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
1981 phi = igvn.register_new_node_with_optimizer(phi, old_phi);
1982 // Make old Phi point to new Phi on the fall-in path
1983 igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
1984 old_phi->del_req(outer_idx);
1985 }
1986 }
1987
1988 // Use the new loop head instead of the old shared one
1989 _head = outer;
1990 phase->set_loop(_head, this);
1991 }
1992
1993 //------------------------------fix_parent-------------------------------------
1994 static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) {
1995 loop->_parent = parent;
1996 if( loop->_child ) fix_parent( loop->_child, loop );
1997 if( loop->_next ) fix_parent( loop->_next , parent );
1998 }
1999
2000 //------------------------------estimate_path_freq-----------------------------
2001 static float estimate_path_freq( Node *n ) {
2002 // Try to extract some path frequency info
2003 IfNode *iff;
2004 for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests
2005 uint nop = n->Opcode();
2006 if( nop == Op_SafePoint ) { // Skip any safepoint
2007 n = n->in(0);
2008 continue;
2009 }
2010 if( nop == Op_CatchProj ) { // Get count from a prior call
2011 // Assume call does not always throw exceptions: means the call-site
2012 // count is also the frequency of the fall-through path.
2013 assert( n->is_CatchProj(), "" );
2014 if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index )
2015 return 0.0f; // Assume call exception path is rare
2016 Node *call = n->in(0)->in(0)->in(0);
2017 assert( call->is_Call(), "expect a call here" );
2018 const JVMState *jvms = ((CallNode*)call)->jvms();
2019 ciMethodData* methodData = jvms->method()->method_data();
2020 if (!methodData->is_mature()) return 0.0f; // No call-site data
2021 ciProfileData* data = methodData->bci_to_data(jvms->bci());
2022 if ((data == NULL) || !data->is_CounterData()) {
2023 // no call profile available, try call's control input
2024 n = n->in(0);
2025 continue;
2026 }
2027 return data->as_CounterData()->count()/FreqCountInvocations;
2028 }
2029 // See if there's a gating IF test
2030 Node *n_c = n->in(0);
2031 if( !n_c->is_If() ) break; // No estimate available
2032 iff = n_c->as_If();
2033 if( iff->_fcnt != COUNT_UNKNOWN ) // Have a valid count?
2034 // Compute how much count comes on this path
2035 return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt;
2036 // Have no count info. Skip dull uncommon-trap like branches.
2037 if( (nop == Op_IfTrue && iff->_prob < PROB_LIKELY_MAG(5)) ||
2038 (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) )
2039 break;
2040 // Skip through never-taken branch; look for a real loop exit.
2041 n = iff->in(0);
2042 }
2043 return 0.0f; // No estimate available
2044 }
2045
2046 //------------------------------merge_many_backedges---------------------------
2047 // Merge all the backedges from the shared header into a private Region.
2048 // Feed that region as the one backedge to this loop.
2049 void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) {
2050 uint i;
2051
2052 // Scan for the top 2 hottest backedges
2053 float hotcnt = 0.0f;
2054 float warmcnt = 0.0f;
2055 uint hot_idx = 0;
2056 // Loop starts at 2 because slot 1 is the fall-in path
2057 for( i = 2; i < _head->req(); i++ ) {
2058 float cnt = estimate_path_freq(_head->in(i));
2059 if( cnt > hotcnt ) { // Grab hottest path
2060 warmcnt = hotcnt;
2061 hotcnt = cnt;
2062 hot_idx = i;
2063 } else if( cnt > warmcnt ) { // And 2nd hottest path
2064 warmcnt = cnt;
2065 }
2066 }
2067
2068 // See if the hottest backedge is worthy of being an inner loop
2069 // by being much hotter than the next hottest backedge.
2070 if( hotcnt <= 0.0001 ||
2071 hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge
2072
2073 // Peel out the backedges into a private merge point; peel
2074 // them all except optionally hot_idx.
2075 PhaseIterGVN &igvn = phase->_igvn;
2076
2077 Node *hot_tail = NULL;
2078 // Make a Region for the merge point
2079 Node *r = new RegionNode(1);
2080 for( i = 2; i < _head->req(); i++ ) {
2081 if( i != hot_idx )
2082 r->add_req( _head->in(i) );
2083 else hot_tail = _head->in(i);
2084 }
2085 igvn.register_new_node_with_optimizer(r, _head);
2086 // Plug region into end of loop _head, followed by hot_tail
2087 while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
2088 igvn.replace_input_of(_head, 2, r);
2089 if( hot_idx ) _head->add_req(hot_tail);
2090
2091 // Split all the Phis up between '_head' loop and the Region 'r'
2092 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
2093 Node *out = _head->fast_out(j);
2094 if( out->is_Phi() ) {
2095 PhiNode* n = out->as_Phi();
2096 igvn.hash_delete(n); // Delete from hash before hacking edges
2097 Node *hot_phi = NULL;
2098 Node *phi = new PhiNode(r, n->type(), n->adr_type());
2099 // Check all inputs for the ones to peel out
2100 uint j = 1;
2101 for( uint i = 2; i < n->req(); i++ ) {
2102 if( i != hot_idx )
2103 phi->set_req( j++, n->in(i) );
2104 else hot_phi = n->in(i);
2105 }
2106 // Register the phi but do not transform until whole place transforms
2107 igvn.register_new_node_with_optimizer(phi, n);
2108 // Add the merge phi to the old Phi
2109 while( n->req() > 3 ) n->del_req( n->req()-1 );
2110 igvn.replace_input_of(n, 2, phi);
2111 if( hot_idx ) n->add_req(hot_phi);
2112 }
2113 }
2114
2115
2116 // Insert a new IdealLoopTree inserted below me. Turn it into a clone
2117 // of self loop tree. Turn self into a loop headed by _head and with
2118 // tail being the new merge point.
2119 IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail );
2120 phase->set_loop(_tail,ilt); // Adjust tail
2121 _tail = r; // Self's tail is new merge point
2122 phase->set_loop(r,this);
2123 ilt->_child = _child; // New guy has my children
2124 _child = ilt; // Self has new guy as only child
2125 ilt->_parent = this; // new guy has self for parent
2126 ilt->_nest = _nest; // Same nesting depth (for now)
2127
2128 // Starting with 'ilt', look for child loop trees using the same shared
2129 // header. Flatten these out; they will no longer be loops in the end.
2130 IdealLoopTree **pilt = &_child;
2131 while( ilt ) {
2132 if( ilt->_head == _head ) {
2133 uint i;
2134 for( i = 2; i < _head->req(); i++ )
2135 if( _head->in(i) == ilt->_tail )
2136 break; // Still a loop
2137 if( i == _head->req() ) { // No longer a loop
2138 // Flatten ilt. Hang ilt's "_next" list from the end of
2139 // ilt's '_child' list. Move the ilt's _child up to replace ilt.
2140 IdealLoopTree **cp = &ilt->_child;
2141 while( *cp ) cp = &(*cp)->_next; // Find end of child list
2142 *cp = ilt->_next; // Hang next list at end of child list
2143 *pilt = ilt->_child; // Move child up to replace ilt
2144 ilt->_head = NULL; // Flag as a loop UNIONED into parent
2145 ilt = ilt->_child; // Repeat using new ilt
2146 continue; // do not advance over ilt->_child
2147 }
2148 assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" );
2149 phase->set_loop(_head,ilt);
2150 }
2151 pilt = &ilt->_child; // Advance to next
2152 ilt = *pilt;
2153 }
2154
2155 if( _child ) fix_parent( _child, this );
2156 }
2157
2158 //------------------------------beautify_loops---------------------------------
2159 // Split shared headers and insert loop landing pads.
2160 // Insert a LoopNode to replace the RegionNode.
2161 // Return TRUE if loop tree is structurally changed.
2162 bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) {
2163 bool result = false;
2164 // Cache parts in locals for easy
2165 PhaseIterGVN &igvn = phase->_igvn;
2166
2167 igvn.hash_delete(_head); // Yank from hash before hacking edges
2168
2169 // Check for multiple fall-in paths. Peel off a landing pad if need be.
2170 int fall_in_cnt = 0;
2171 for( uint i = 1; i < _head->req(); i++ )
2172 if( !phase->is_member( this, _head->in(i) ) )
2173 fall_in_cnt++;
2174 assert( fall_in_cnt, "at least 1 fall-in path" );
2175 if( fall_in_cnt > 1 ) // Need a loop landing pad to merge fall-ins
2176 split_fall_in( phase, fall_in_cnt );
2177
2178 // Swap inputs to the _head and all Phis to move the fall-in edge to
2179 // the left.
2180 fall_in_cnt = 1;
2181 while( phase->is_member( this, _head->in(fall_in_cnt) ) )
2182 fall_in_cnt++;
2183 if( fall_in_cnt > 1 ) {
2184 // Since I am just swapping inputs I do not need to update def-use info
2185 Node *tmp = _head->in(1);
2186 igvn.rehash_node_delayed(_head);
2187 _head->set_req( 1, _head->in(fall_in_cnt) );
2188 _head->set_req( fall_in_cnt, tmp );
2189 // Swap also all Phis
2190 for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
2191 Node* phi = _head->fast_out(i);
2192 if( phi->is_Phi() ) {
2193 igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges
2194 tmp = phi->in(1);
2195 phi->set_req( 1, phi->in(fall_in_cnt) );
2196 phi->set_req( fall_in_cnt, tmp );
2197 }
2198 }
2199 }
2200 assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" );
2201 assert( phase->is_member( this, _head->in(2) ), "right edge is loop" );
2202
2203 // If I am a shared header (multiple backedges), peel off the many
2204 // backedges into a private merge point and use the merge point as
2205 // the one true backedge.
2206 if (_head->req() > 3) {
2207 // Merge the many backedges into a single backedge but leave
2208 // the hottest backedge as separate edge for the following peel.
2209 if (!_irreducible) {
2210 merge_many_backedges( phase );
2211 }
2212
2213 // When recursively beautify my children, split_fall_in can change
2214 // loop tree structure when I am an irreducible loop. Then the head
2215 // of my children has a req() not bigger than 3. Here we need to set
2216 // result to true to catch that case in order to tell the caller to
2217 // rebuild loop tree. See issue JDK-8244407 for details.
2218 result = true;
2219 }
2220
2221 // If I have one hot backedge, peel off myself loop.
2222 // I better be the outermost loop.
2223 if (_head->req() > 3 && !_irreducible) {
2224 split_outer_loop( phase );
2225 result = true;
2226
2227 } else if (!_head->is_Loop() && !_irreducible) {
2228 // Make a new LoopNode to replace the old loop head
2229 Node *l = new LoopNode( _head->in(1), _head->in(2) );
2230 l = igvn.register_new_node_with_optimizer(l, _head);
2231 phase->set_created_loop_node();
2232 // Go ahead and replace _head
2233 phase->_igvn.replace_node( _head, l );
2234 _head = l;
2235 phase->set_loop(_head, this);
2236 }
2237
2238 // Now recursively beautify nested loops
2239 if( _child ) result |= _child->beautify_loops( phase );
2240 if( _next ) result |= _next ->beautify_loops( phase );
2241 return result;
2242 }
2243
2244 //------------------------------allpaths_check_safepts----------------------------
2245 // Allpaths backwards scan from loop tail, terminating each path at first safepoint
2246 // encountered. Helper for check_safepts.
2247 void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) {
2248 assert(stack.size() == 0, "empty stack");
2249 stack.push(_tail);
2250 visited.clear();
2251 visited.set(_tail->_idx);
2252 while (stack.size() > 0) {
2253 Node* n = stack.pop();
2254 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
2255 // Terminate this path
2256 } else if (n->Opcode() == Op_SafePoint) {
2257 if (_phase->get_loop(n) != this) {
2258 if (_required_safept == NULL) _required_safept = new Node_List();
2259 _required_safept->push(n); // save the one closest to the tail
2260 }
2261 // Terminate this path
2262 } else {
2263 uint start = n->is_Region() ? 1 : 0;
2264 uint end = n->is_Region() && !n->is_Loop() ? n->req() : start + 1;
2265 for (uint i = start; i < end; i++) {
2266 Node* in = n->in(i);
2267 assert(in->is_CFG(), "must be");
2268 if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) {
2269 stack.push(in);
2270 }
2271 }
2272 }
2273 }
2274 }
2275
2276 //------------------------------check_safepts----------------------------
2277 // Given dominators, try to find loops with calls that must always be
2278 // executed (call dominates loop tail). These loops do not need non-call
2279 // safepoints (ncsfpt).
2280 //
2281 // A complication is that a safepoint in a inner loop may be needed
2282 // by an outer loop. In the following, the inner loop sees it has a
2283 // call (block 3) on every path from the head (block 2) to the
2284 // backedge (arc 3->2). So it deletes the ncsfpt (non-call safepoint)
2285 // in block 2, _but_ this leaves the outer loop without a safepoint.
2286 //
2287 // entry 0
2288 // |
2289 // v
2290 // outer 1,2 +->1
2291 // | |
2292 // | v
2293 // | 2<---+ ncsfpt in 2
2294 // |_/|\ |
2295 // | v |
2296 // inner 2,3 / 3 | call in 3
2297 // / | |
2298 // v +--+
2299 // exit 4
2300 //
2301 //
2302 // This method creates a list (_required_safept) of ncsfpt nodes that must
2303 // be protected is created for each loop. When a ncsfpt maybe deleted, it
2304 // is first looked for in the lists for the outer loops of the current loop.
2305 //
2306 // The insights into the problem:
2307 // A) counted loops are okay
2308 // B) innermost loops are okay (only an inner loop can delete
2309 // a ncsfpt needed by an outer loop)
2310 // C) a loop is immune from an inner loop deleting a safepoint
2311 // if the loop has a call on the idom-path
2312 // D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the
2313 // idom-path that is not in a nested loop
2314 // E) otherwise, an ncsfpt on the idom-path that is nested in an inner
2315 // loop needs to be prevented from deletion by an inner loop
2316 //
2317 // There are two analyses:
2318 // 1) The first, and cheaper one, scans the loop body from
2319 // tail to head following the idom (immediate dominator)
2320 // chain, looking for the cases (C,D,E) above.
2321 // Since inner loops are scanned before outer loops, there is summary
2322 // information about inner loops. Inner loops can be skipped over
2323 // when the tail of an inner loop is encountered.
2324 //
2325 // 2) The second, invoked if the first fails to find a call or ncsfpt on
2326 // the idom path (which is rare), scans all predecessor control paths
2327 // from the tail to the head, terminating a path when a call or sfpt
2328 // is encountered, to find the ncsfpt's that are closest to the tail.
2329 //
2330 void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
2331 // Bottom up traversal
2332 IdealLoopTree* ch = _child;
2333 if (_child) _child->check_safepts(visited, stack);
2334 if (_next) _next ->check_safepts(visited, stack);
2335
2336 if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) {
2337 bool has_call = false; // call on dom-path
2338 bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth
2339 Node* nonlocal_ncsfpt = NULL; // ncsfpt on dom-path at a deeper depth
2340 // Scan the dom-path nodes from tail to head
2341 for (Node* n = tail(); n != _head; n = _phase->idom(n)) {
2342 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
2343 has_call = true;
2344 _has_sfpt = 1; // Then no need for a safept!
2345 break;
2346 } else if (n->Opcode() == Op_SafePoint) {
2347 if (_phase->get_loop(n) == this) {
2348 has_local_ncsfpt = true;
2349 break;
2350 }
2351 if (nonlocal_ncsfpt == NULL) {
2352 nonlocal_ncsfpt = n; // save the one closest to the tail
2353 }
2354 } else {
2355 IdealLoopTree* nlpt = _phase->get_loop(n);
2356 if (this != nlpt) {
2357 // If at an inner loop tail, see if the inner loop has already
2358 // recorded seeing a call on the dom-path (and stop.) If not,
2359 // jump to the head of the inner loop.
2360 assert(is_member(nlpt), "nested loop");
2361 Node* tail = nlpt->_tail;
2362 if (tail->in(0)->is_If()) tail = tail->in(0);
2363 if (n == tail) {
2364 // If inner loop has call on dom-path, so does outer loop
2365 if (nlpt->_has_sfpt) {
2366 has_call = true;
2367 _has_sfpt = 1;
2368 break;
2369 }
2370 // Skip to head of inner loop
2371 assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head");
2372 n = nlpt->_head;
2373 }
2374 }
2375 }
2376 }
2377 // Record safept's that this loop needs preserved when an
2378 // inner loop attempts to delete it's safepoints.
2379 if (_child != NULL && !has_call && !has_local_ncsfpt) {
2380 if (nonlocal_ncsfpt != NULL) {
2381 if (_required_safept == NULL) _required_safept = new Node_List();
2382 _required_safept->push(nonlocal_ncsfpt);
2383 } else {
2384 // Failed to find a suitable safept on the dom-path. Now use
2385 // an all paths walk from tail to head, looking for safepoints to preserve.
2386 allpaths_check_safepts(visited, stack);
2387 }
2388 }
2389 }
2390 }
2391
2392 //---------------------------is_deleteable_safept----------------------------
2393 // Is safept not required by an outer loop?
2394 bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
2395 assert(sfpt->Opcode() == Op_SafePoint, "");
2396 IdealLoopTree* lp = get_loop(sfpt)->_parent;
2397 while (lp != NULL) {
2398 Node_List* sfpts = lp->_required_safept;
2399 if (sfpts != NULL) {
2400 for (uint i = 0; i < sfpts->size(); i++) {
2401 if (sfpt == sfpts->at(i))
2402 return false;
2403 }
2404 }
2405 lp = lp->_parent;
2406 }
2407 return true;
2408 }
2409
2410 //---------------------------replace_parallel_iv-------------------------------
2411 // Replace parallel induction variable (parallel to trip counter)
2412 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
2413 assert(loop->_head->is_CountedLoop(), "");
2414 CountedLoopNode *cl = loop->_head->as_CountedLoop();
2415 if (!cl->is_valid_counted_loop())
2416 return; // skip malformed counted loop
2417 Node *incr = cl->incr();
2418 if (incr == NULL)
2419 return; // Dead loop?
2420 Node *init = cl->init_trip();
2421 Node *phi = cl->phi();
2422 int stride_con = cl->stride_con();
2423
2424 // Visit all children, looking for Phis
2425 for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
2426 Node *out = cl->out(i);
2427 // Look for other phis (secondary IVs). Skip dead ones
2428 if (!out->is_Phi() || out == phi || !has_node(out))
2429 continue;
2430 PhiNode* phi2 = out->as_Phi();
2431 Node *incr2 = phi2->in( LoopNode::LoopBackControl );
2432 // Look for induction variables of the form: X += constant
2433 if (phi2->region() != loop->_head ||
2434 incr2->req() != 3 ||
2435 incr2->in(1) != phi2 ||
2436 incr2 == incr ||
2437 incr2->Opcode() != Op_AddI ||
2438 !incr2->in(2)->is_Con())
2439 continue;
2440
2441 // Check for parallel induction variable (parallel to trip counter)
2442 // via an affine function. In particular, count-down loops with
2443 // count-up array indices are common. We only RCE references off
2444 // the trip-counter, so we need to convert all these to trip-counter
2445 // expressions.
2446 Node *init2 = phi2->in( LoopNode::EntryControl );
2447 int stride_con2 = incr2->in(2)->get_int();
2448
2449 // The ratio of the two strides cannot be represented as an int
2450 // if stride_con2 is min_int and stride_con is -1.
2451 if (stride_con2 == min_jint && stride_con == -1) {
2452 continue;
2453 }
2454
2455 // The general case here gets a little tricky. We want to find the
2456 // GCD of all possible parallel IV's and make a new IV using this
2457 // GCD for the loop. Then all possible IVs are simple multiples of
2458 // the GCD. In practice, this will cover very few extra loops.
2459 // Instead we require 'stride_con2' to be a multiple of 'stride_con',
2460 // where +/-1 is the common case, but other integer multiples are
2461 // also easy to handle.
2462 int ratio_con = stride_con2/stride_con;
2463
2464 if ((ratio_con * stride_con) == stride_con2) { // Check for exact
2465 #ifndef PRODUCT
2466 if (TraceLoopOpts) {
2467 tty->print("Parallel IV: %d ", phi2->_idx);
2468 loop->dump_head();
2469 }
2470 #endif
2471 // Convert to using the trip counter. The parallel induction
2472 // variable differs from the trip counter by a loop-invariant
2473 // amount, the difference between their respective initial values.
2474 // It is scaled by the 'ratio_con'.
2475 Node* ratio = _igvn.intcon(ratio_con);
2476 set_ctrl(ratio, C->root());
2477 Node* ratio_init = new MulINode(init, ratio);
2478 _igvn.register_new_node_with_optimizer(ratio_init, init);
2479 set_early_ctrl(ratio_init);
2480 Node* diff = new SubINode(init2, ratio_init);
2481 _igvn.register_new_node_with_optimizer(diff, init2);
2482 set_early_ctrl(diff);
2483 Node* ratio_idx = new MulINode(phi, ratio);
2484 _igvn.register_new_node_with_optimizer(ratio_idx, phi);
2485 set_ctrl(ratio_idx, cl);
2486 Node* add = new AddINode(ratio_idx, diff);
2487 _igvn.register_new_node_with_optimizer(add);
2488 set_ctrl(add, cl);
2489 _igvn.replace_node( phi2, add );
2490 // Sometimes an induction variable is unused
2491 if (add->outcnt() == 0) {
2492 _igvn.remove_dead_node(add);
2493 }
2494 --i; // deleted this phi; rescan starting with next position
2495 continue;
2496 }
2497 }
2498 }
2499
2500 void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) {
2501 Node* keep = NULL;
2502 if (keep_one) {
2503 // Look for a safepoint on the idom-path.
2504 for (Node* i = tail(); i != _head; i = phase->idom(i)) {
2505 if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) {
2506 keep = i;
2507 break; // Found one
2508 }
2509 }
2510 }
2511
2512 // Don't remove any safepoints if it is requested to keep a single safepoint and
2513 // no safepoint was found on idom-path. It is not safe to remove any safepoint
2514 // in this case since there's no safepoint dominating all paths in the loop body.
2515 bool prune = !keep_one || keep != NULL;
2516
2517 // Delete other safepoints in this loop.
2518 Node_List* sfpts = _safepts;
2519 if (prune && sfpts != NULL) {
2520 assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint");
2521 for (uint i = 0; i < sfpts->size(); i++) {
2522 Node* n = sfpts->at(i);
2523 assert(phase->get_loop(n) == this, "");
2524 if (n != keep && phase->is_deleteable_safept(n)) {
2525 phase->lazy_replace(n, n->in(TypeFunc::Control));
2526 }
2527 }
2528 }
2529 }
2530
2531 //------------------------------counted_loop-----------------------------------
2532 // Convert to counted loops where possible
2533 void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
2534
2535 // For grins, set the inner-loop flag here
2536 if (!_child) {
2537 if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
2538 }
2539
2540 IdealLoopTree* loop = this;
2541 if (_head->is_CountedLoop() ||
2542 phase->is_counted_loop(_head, loop)) {
2543
2544 if (LoopStripMiningIter == 0 || (LoopStripMiningIter > 1 && _child == NULL)) {
2545 // Indicate we do not need a safepoint here
2546 _has_sfpt = 1;
2547 }
2548
2549 // Remove safepoints
2550 bool keep_one_sfpt = !(_has_call || _has_sfpt);
2551 remove_safepoints(phase, keep_one_sfpt);
2552
2553 // Look for induction variables
2554 phase->replace_parallel_iv(this);
2555
2556 } else if (_parent != NULL && !_irreducible) {
2557 // Not a counted loop. Keep one safepoint.
2558 bool keep_one_sfpt = true;
2559 remove_safepoints(phase, keep_one_sfpt);
2560 }
2561
2562 // Recursively
2563 assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?");
2564 assert(loop->_child != this || (loop->_child->_child == NULL && loop->_child->_next == NULL), "would miss some loops");
2565 if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase);
2566 if (loop->_next) loop->_next ->counted_loop(phase);
2567 }
2568
2569
2570 // The Estimated Loop Clone Size:
2571 // CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm,
2572 // where BC and CC are totally ad-hoc/magic "body" and "clone" constants,
2573 // respectively, used to ensure that the node usage estimates made are on the
2574 // safe side, for the most part. The FanOutTerm is an attempt to estimate the
2575 // possible additional/excessive nodes generated due to data and control flow
2576 // merging, for edges reaching outside the loop.
2577 uint IdealLoopTree::est_loop_clone_sz(uint factor) const {
2578
2579 precond(0 < factor && factor < 16);
2580
2581 uint const bc = 13;
2582 uint const cc = 17;
2583 uint const sz = _body.size() + (_body.size() + 7) / 8;
2584 uint estimate = factor * (sz + bc) + cc;
2585
2586 assert((estimate - cc) / factor == sz + bc, "overflow");
2587
2588 return estimate + est_loop_flow_merge_sz();
2589 }
2590
2591 // The Estimated Loop (full-) Unroll Size:
2592 // UnrollFactor * (~106% * BodySize) + CC + FanOutTerm,
2593 // where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that
2594 // node usage estimates made are on the safe side, for the most part. This is
2595 // a "light" version of the loop clone size calculation (above), based on the
2596 // assumption that most of the loop-construct overhead will be unraveled when
2597 // (fully) unrolled. Defined for unroll factors larger or equal to one (>=1),
2598 // including an overflow check and returning UINT_MAX in case of an overflow.
2599 uint IdealLoopTree::est_loop_unroll_sz(uint factor) const {
2600
2601 precond(factor > 0);
2602
2603 // Take into account that after unroll conjoined heads and tails will fold.
2604 uint const b0 = _body.size() - EMPTY_LOOP_SIZE;
2605 uint const cc = 7;
2606 uint const sz = b0 + (b0 + 15) / 16;
2607 uint estimate = factor * sz + cc;
2608
2609 if ((estimate - cc) / factor != sz) {
2610 return UINT_MAX;
2611 }
2612
2613 return estimate + est_loop_flow_merge_sz();
2614 }
2615
2616 // Estimate the growth effect (in nodes) of merging control and data flow when
2617 // cloning a loop body, based on the amount of control and data flow reaching
2618 // outside of the (current) loop body.
2619 uint IdealLoopTree::est_loop_flow_merge_sz() const {
2620
2621 uint ctrl_edge_out_cnt = 0;
2622 uint data_edge_out_cnt = 0;
2623
2624 for (uint i = 0; i < _body.size(); i++) {
2625 Node* node = _body.at(i);
2626 uint outcnt = node->outcnt();
2627
2628 for (uint k = 0; k < outcnt; k++) {
2629 Node* out = node->raw_out(k);
2630 if (out == NULL) continue;
2631 if (out->is_CFG()) {
2632 if (!is_member(_phase->get_loop(out))) {
2633 ctrl_edge_out_cnt++;
2634 }
2635 } else if (_phase->has_ctrl(out)) {
2636 Node* ctrl = _phase->get_ctrl(out);
2637 assert(ctrl != NULL, "must be");
2638 assert(ctrl->is_CFG(), "must be");
2639 if (!is_member(_phase->get_loop(ctrl))) {
2640 data_edge_out_cnt++;
2641 }
2642 }
2643 }
2644 }
2645 // Use data and control count (x2.0) in estimate iff both are > 0. This is
2646 // a rather pessimistic estimate for the most part, in particular for some
2647 // complex loops, but still not enough to capture all loops.
2648 if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
2649 return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
2650 }
2651 return 0;
2652 }
2653
2654 #ifndef PRODUCT
2655 //------------------------------dump_head--------------------------------------
2656 // Dump 1 liner for loop header info
2657 void IdealLoopTree::dump_head() const {
2658 tty->sp(2 * _nest);
2659 tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx);
2660 if (_irreducible) tty->print(" IRREDUCIBLE");
2661 Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl);
2662 Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
2663 if (predicate != NULL ) {
2664 tty->print(" limit_check");
2665 entry = PhaseIdealLoop::skip_loop_predicates(entry);
2666 }
2667 if (UseProfiledLoopPredicate) {
2668 predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
2669 if (predicate != NULL) {
2670 tty->print(" profile_predicated");
2671 entry = PhaseIdealLoop::skip_loop_predicates(entry);
2672 }
2673 }
2674 if (UseLoopPredicate) {
2675 predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
2676 if (predicate != NULL) {
2677 tty->print(" predicated");
2678 }
2679 }
2680 if (_head->is_CountedLoop()) {
2681 CountedLoopNode *cl = _head->as_CountedLoop();
2682 tty->print(" counted");
2683
2684 Node* init_n = cl->init_trip();
2685 if (init_n != NULL && init_n->is_Con())
2686 tty->print(" [%d,", cl->init_trip()->get_int());
2687 else
2688 tty->print(" [int,");
2689 Node* limit_n = cl->limit();
2690 if (limit_n != NULL && limit_n->is_Con())
2691 tty->print("%d),", cl->limit()->get_int());
2692 else
2693 tty->print("int),");
2694 int stride_con = cl->stride_con();
2695 if (stride_con > 0) tty->print("+");
2696 tty->print("%d", stride_con);
2697
2698 tty->print(" (%0.f iters) ", cl->profile_trip_cnt());
2699
2700 if (cl->is_pre_loop ()) tty->print(" pre" );
2701 if (cl->is_main_loop()) tty->print(" main");
2702 if (cl->is_post_loop()) tty->print(" post");
2703 if (cl->is_vectorized_loop()) tty->print(" vector");
2704 if (cl->range_checks_present()) tty->print(" rc ");
2705 if (cl->is_multiversioned()) tty->print(" multi ");
2706 }
2707 if (_has_call) tty->print(" has_call");
2708 if (_has_sfpt) tty->print(" has_sfpt");
2709 if (_rce_candidate) tty->print(" rce");
2710 if (_safepts != NULL && _safepts->size() > 0) {
2711 tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }");
2712 }
2713 if (_required_safept != NULL && _required_safept->size() > 0) {
2714 tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }");
2715 }
2716 if (Verbose) {
2717 tty->print(" body={"); _body.dump_simple(); tty->print(" }");
2718 }
2719 if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) {
2720 tty->print(" strip_mined");
2721 }
2722 tty->cr();
2723 }
2724
2725 //------------------------------dump-------------------------------------------
2726 // Dump loops by loop tree
2727 void IdealLoopTree::dump() const {
2728 dump_head();
2729 if (_child) _child->dump();
2730 if (_next) _next ->dump();
2731 }
2732
2733 #endif
2734
2735 static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) {
2736 if (loop == root) {
2737 if (loop->_child != NULL) {
2738 log->begin_head("loop_tree");
2739 log->end_head();
2740 if( loop->_child ) log_loop_tree(root, loop->_child, log);
2741 log->tail("loop_tree");
2742 assert(loop->_next == NULL, "what?");
2743 }
2744 } else {
2745 Node* head = loop->_head;
2746 log->begin_head("loop");
2747 log->print(" idx='%d' ", head->_idx);
2748 if (loop->_irreducible) log->print("irreducible='1' ");
2749 if (head->is_Loop()) {
2750 if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' ");
2751 if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' ");
2752 }
2753 if (head->is_CountedLoop()) {
2754 CountedLoopNode* cl = head->as_CountedLoop();
2755 if (cl->is_pre_loop()) log->print("pre_loop='%d' ", cl->main_idx());
2756 if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx);
2757 if (cl->is_post_loop()) log->print("post_loop='%d' ", cl->main_idx());
2758 }
2759 log->end_head();
2760 if( loop->_child ) log_loop_tree(root, loop->_child, log);
2761 log->tail("loop");
2762 if( loop->_next ) log_loop_tree(root, loop->_next, log);
2763 }
2764 }
2765
2766 //---------------------collect_potentially_useful_predicates-----------------------
2767 // Helper function to collect potentially useful predicates to prevent them from
2768 // being eliminated by PhaseIdealLoop::eliminate_useless_predicates
2769 void PhaseIdealLoop::collect_potentially_useful_predicates(
2770 IdealLoopTree * loop, Unique_Node_List &useful_predicates) {
2771 if (loop->_child) { // child
2772 collect_potentially_useful_predicates(loop->_child, useful_predicates);
2773 }
2774
2775 // self (only loops that we can apply loop predication may use their predicates)
2776 if (loop->_head->is_Loop() &&
2777 !loop->_irreducible &&
2778 !loop->tail()->is_top()) {
2779 LoopNode* lpn = loop->_head->as_Loop();
2780 Node* entry = lpn->in(LoopNode::EntryControl);
2781 Node* predicate_proj = find_predicate(entry); // loop_limit_check first
2782 if (predicate_proj != NULL) { // right pattern that can be used by loop predication
2783 assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be");
2784 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
2785 entry = skip_loop_predicates(entry);
2786 }
2787 if (UseProfiledLoopPredicate) {
2788 predicate_proj = find_predicate(entry); // Predicate
2789 if (predicate_proj != NULL) {
2790 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
2791 entry = skip_loop_predicates(entry);
2792 }
2793 }
2794 predicate_proj = find_predicate(entry); // Predicate
2795 if (predicate_proj != NULL) {
2796 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
2797 }
2798 }
2799
2800 if (loop->_next) { // sibling
2801 collect_potentially_useful_predicates(loop->_next, useful_predicates);
2802 }
2803 }
2804
2805 //------------------------eliminate_useless_predicates-----------------------------
2806 // Eliminate all inserted predicates if they could not be used by loop predication.
2807 // Note: it will also eliminates loop limits check predicate since it also uses
2808 // Opaque1 node (see Parse::add_predicate()).
2809 void PhaseIdealLoop::eliminate_useless_predicates() {
2810 if (C->predicate_count() == 0)
2811 return; // no predicate left
2812
2813 Unique_Node_List useful_predicates; // to store useful predicates
2814 if (C->has_loops()) {
2815 collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates);
2816 }
2817
2818 for (int i = C->predicate_count(); i > 0; i--) {
2819 Node * n = C->predicate_opaque1_node(i-1);
2820 assert(n->Opcode() == Op_Opaque1, "must be");
2821 if (!useful_predicates.member(n)) { // not in the useful list
2822 _igvn.replace_node(n, n->in(1));
2823 }
2824 }
2825 }
2826
2827 //------------------------process_expensive_nodes-----------------------------
2828 // Expensive nodes have their control input set to prevent the GVN
2829 // from commoning them and as a result forcing the resulting node to
2830 // be in a more frequent path. Use CFG information here, to change the
2831 // control inputs so that some expensive nodes can be commoned while
2832 // not executed more frequently.
2833 bool PhaseIdealLoop::process_expensive_nodes() {
2834 assert(OptimizeExpensiveOps, "optimization off?");
2835
2836 // Sort nodes to bring similar nodes together
2837 C->sort_expensive_nodes();
2838
2839 bool progress = false;
2840
2841 for (int i = 0; i < C->expensive_count(); ) {
2842 Node* n = C->expensive_node(i);
2843 int start = i;
2844 // Find nodes similar to n
2845 i++;
2846 for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
2847 int end = i;
2848 // And compare them two by two
2849 for (int j = start; j < end; j++) {
2850 Node* n1 = C->expensive_node(j);
2851 if (is_node_unreachable(n1)) {
2852 continue;
2853 }
2854 for (int k = j+1; k < end; k++) {
2855 Node* n2 = C->expensive_node(k);
2856 if (is_node_unreachable(n2)) {
2857 continue;
2858 }
2859
2860 assert(n1 != n2, "should be pair of nodes");
2861
2862 Node* c1 = n1->in(0);
2863 Node* c2 = n2->in(0);
2864
2865 Node* parent_c1 = c1;
2866 Node* parent_c2 = c2;
2867
2868 // The call to get_early_ctrl_for_expensive() moves the
2869 // expensive nodes up but stops at loops that are in a if
2870 // branch. See whether we can exit the loop and move above the
2871 // If.
2872 if (c1->is_Loop()) {
2873 parent_c1 = c1->in(1);
2874 }
2875 if (c2->is_Loop()) {
2876 parent_c2 = c2->in(1);
2877 }
2878
2879 if (parent_c1 == parent_c2) {
2880 _igvn._worklist.push(n1);
2881 _igvn._worklist.push(n2);
2882 continue;
2883 }
2884
2885 // Look for identical expensive node up the dominator chain.
2886 if (is_dominator(c1, c2)) {
2887 c2 = c1;
2888 } else if (is_dominator(c2, c1)) {
2889 c1 = c2;
2890 } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
2891 parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
2892 // Both branches have the same expensive node so move it up
2893 // before the if.
2894 c1 = c2 = idom(parent_c1->in(0));
2895 }
2896 // Do the actual moves
2897 if (n1->in(0) != c1) {
2898 _igvn.hash_delete(n1);
2899 n1->set_req(0, c1);
2900 _igvn.hash_insert(n1);
2901 _igvn._worklist.push(n1);
2902 progress = true;
2903 }
2904 if (n2->in(0) != c2) {
2905 _igvn.hash_delete(n2);
2906 n2->set_req(0, c2);
2907 _igvn.hash_insert(n2);
2908 _igvn._worklist.push(n2);
2909 progress = true;
2910 }
2911 }
2912 }
2913 }
2914
2915 return progress;
2916 }
2917
2918
2919 //=============================================================================
2920 //----------------------------build_and_optimize-------------------------------
2921 // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
2922 // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups.
2923 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
2924 bool do_split_ifs = (mode == LoopOptsDefault);
2925 bool skip_loop_opts = (mode == LoopOptsNone);
2926
2927 int old_progress = C->major_progress();
2928 uint orig_worklist_size = _igvn._worklist.size();
2929
2930 // Reset major-progress flag for the driver's heuristics
2931 C->clear_major_progress();
2932
2933 #ifndef PRODUCT
2934 // Capture for later assert
2935 uint unique = C->unique();
2936 _loop_invokes++;
2937 _loop_work += unique;
2938 #endif
2939
2940 // True if the method has at least 1 irreducible loop
2941 _has_irreducible_loops = false;
2942
2943 _created_loop_node = false;
2944
2945 Arena *a = Thread::current()->resource_area();
2946 VectorSet visited(a);
2947 // Pre-grow the mapping from Nodes to IdealLoopTrees.
2948 _nodes.map(C->unique(), NULL);
2949 memset(_nodes.adr(), 0, wordSize * C->unique());
2950
2951 // Pre-build the top-level outermost loop tree entry
2952 _ltree_root = new IdealLoopTree( this, C->root(), C->root() );
2953 // Do not need a safepoint at the top level
2954 _ltree_root->_has_sfpt = 1;
2955
2956 // Initialize Dominators.
2957 // Checked in clone_loop_predicate() during beautify_loops().
2958 _idom_size = 0;
2959 _idom = NULL;
2960 _dom_depth = NULL;
2961 _dom_stk = NULL;
2962
2963 // Empty pre-order array
2964 allocate_preorders();
2965
2966 // Build a loop tree on the fly. Build a mapping from CFG nodes to
2967 // IdealLoopTree entries. Data nodes are NOT walked.
2968 build_loop_tree();
2969 // Check for bailout, and return
2970 if (C->failing()) {
2971 return;
2972 }
2973
2974 // No loops after all
2975 if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
2976
2977 // There should always be an outer loop containing the Root and Return nodes.
2978 // If not, we have a degenerate empty program. Bail out in this case.
2979 if (!has_node(C->root())) {
2980 if (!_verify_only) {
2981 C->clear_major_progress();
2982 C->record_method_not_compilable("empty program detected during loop optimization");
2983 }
2984 return;
2985 }
2986
2987 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2988 // Nothing to do, so get out
2989 bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only &&
2990 !bs->is_gc_specific_loop_opts_pass(mode);
2991 bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
2992 bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(mode);
2993 if (stop_early && !do_expensive_nodes) {
2994 _igvn.optimize(); // Cleanup NeverBranches
2995 return;
2996 }
2997
2998 // Set loop nesting depth
2999 _ltree_root->set_nest( 0 );
3000
3001 // Split shared headers and insert loop landing pads.
3002 // Do not bother doing this on the Root loop of course.
3003 if( !_verify_me && !_verify_only && _ltree_root->_child ) {
3004 C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
3005 if( _ltree_root->_child->beautify_loops( this ) ) {
3006 // Re-build loop tree!
3007 _ltree_root->_child = NULL;
3008 _nodes.clear();
3009 reallocate_preorders();
3010 build_loop_tree();
3011 // Check for bailout, and return
3012 if (C->failing()) {
3013 return;
3014 }
3015 // Reset loop nesting depth
3016 _ltree_root->set_nest( 0 );
3017
3018 C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
3019 }
3020 }
3021
3022 // Build Dominators for elision of NULL checks & loop finding.
3023 // Since nodes do not have a slot for immediate dominator, make
3024 // a persistent side array for that info indexed on node->_idx.
3025 _idom_size = C->unique();
3026 _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size );
3027 _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size );
3028 _dom_stk = NULL; // Allocated on demand in recompute_dom_depth
3029 memset( _dom_depth, 0, _idom_size * sizeof(uint) );
3030
3031 Dominators();
3032
3033 if (!_verify_only) {
3034 // As a side effect, Dominators removed any unreachable CFG paths
3035 // into RegionNodes. It doesn't do this test against Root, so
3036 // we do it here.
3037 for( uint i = 1; i < C->root()->req(); i++ ) {
3038 if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root?
3039 _igvn.delete_input_of(C->root(), i);
3040 i--; // Rerun same iteration on compressed edges
3041 }
3042 }
3043
3044 // Given dominators, try to find inner loops with calls that must
3045 // always be executed (call dominates loop tail). These loops do
3046 // not need a separate safepoint.
3047 Node_List cisstack(a);
3048 _ltree_root->check_safepts(visited, cisstack);
3049 }
3050
3051 // Walk the DATA nodes and place into loops. Find earliest control
3052 // node. For CFG nodes, the _nodes array starts out and remains
3053 // holding the associated IdealLoopTree pointer. For DATA nodes, the
3054 // _nodes array holds the earliest legal controlling CFG node.
3055
3056 // Allocate stack with enough space to avoid frequent realloc
3057 int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
3058 Node_Stack nstack( a, stack_size );
3059
3060 visited.clear();
3061 Node_List worklist(a);
3062 // Don't need C->root() on worklist since
3063 // it will be processed among C->top() inputs
3064 worklist.push(C->top());
3065 visited.set(C->top()->_idx); // Set C->top() as visited now
3066 build_loop_early( visited, worklist, nstack );
3067
3068 // Given early legal placement, try finding counted loops. This placement
3069 // is good enough to discover most loop invariants.
3070 if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) {
3071 _ltree_root->counted_loop( this );
3072 }
3073
3074 // Find latest loop placement. Find ideal loop placement.
3075 visited.clear();
3076 init_dom_lca_tags();
3077 // Need C->root() on worklist when processing outs
3078 worklist.push(C->root());
3079 NOT_PRODUCT( C->verify_graph_edges(); )
3080 worklist.push(C->top());
3081 build_loop_late( visited, worklist, nstack );
3082
3083 if (_verify_only) {
3084 C->restore_major_progress(old_progress);
3085 assert(C->unique() == unique, "verification mode made Nodes? ? ?");
3086 assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
3087 return;
3088 }
3089
3090 // clear out the dead code after build_loop_late
3091 while (_deadlist.size()) {
3092 _igvn.remove_globally_dead_node(_deadlist.pop());
3093 }
3094
3095 if (stop_early) {
3096 assert(do_expensive_nodes, "why are we here?");
3097 if (process_expensive_nodes()) {
3098 // If we made some progress when processing expensive nodes then
3099 // the IGVN may modify the graph in a way that will allow us to
3100 // make some more progress: we need to try processing expensive
3101 // nodes again.
3102 C->set_major_progress();
3103 }
3104 _igvn.optimize();
3105 return;
3106 }
3107
3108 // Some parser-inserted loop predicates could never be used by loop
3109 // predication or they were moved away from loop during some optimizations.
3110 // For example, peeling. Eliminate them before next loop optimizations.
3111 eliminate_useless_predicates();
3112
3113 #ifndef PRODUCT
3114 C->verify_graph_edges();
3115 if (_verify_me) { // Nested verify pass?
3116 // Check to see if the verify mode is broken
3117 assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?");
3118 return;
3119 }
3120 if (VerifyLoopOptimizations) verify();
3121 if (TraceLoopOpts && C->has_loops()) {
3122 _ltree_root->dump();
3123 }
3124 #endif
3125
3126 if (skip_loop_opts) {
3127 // restore major progress flag
3128 C->restore_major_progress(old_progress);
3129
3130 // Cleanup any modified bits
3131 _igvn.optimize();
3132
3133 if (C->log() != NULL) {
3134 log_loop_tree(_ltree_root, _ltree_root, C->log());
3135 }
3136 return;
3137 }
3138
3139 if (mode == LoopOptsMaxUnroll) {
3140 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
3141 IdealLoopTree* lpt = iter.current();
3142 if (lpt->is_innermost() && lpt->_allow_optimizations && !lpt->_has_call && lpt->is_counted()) {
3143 lpt->compute_trip_count(this);
3144 if (!lpt->do_one_iteration_loop(this) &&
3145 !lpt->do_remove_empty_loop(this)) {
3146 AutoNodeBudget node_budget(this);
3147 if (lpt->_head->as_CountedLoop()->is_normal_loop() &&
3148 lpt->policy_maximally_unroll(this)) {
3149 memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) );
3150 do_maximally_unroll(lpt, worklist);
3151 }
3152 }
3153 }
3154 }
3155
3156 C->restore_major_progress(old_progress);
3157
3158 _igvn.optimize();
3159
3160 if (C->log() != NULL) {
3161 log_loop_tree(_ltree_root, _ltree_root, C->log());
3162 }
3163 return;
3164 }
3165
3166 if (bs->optimize_loops(this, mode, visited, nstack, worklist)) {
3167 _igvn.optimize();
3168 if (C->log() != NULL) {
3169 log_loop_tree(_ltree_root, _ltree_root, C->log());
3170 }
3171 return;
3172 }
3173
3174 if (ReassociateInvariants) {
3175 // Reassociate invariants and prep for split_thru_phi
3176 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
3177 IdealLoopTree* lpt = iter.current();
3178 bool is_counted = lpt->is_counted();
3179 if (!is_counted || !lpt->is_innermost()) continue;
3180
3181 // check for vectorized loops, any reassociation of invariants was already done
3182 if (is_counted && lpt->_head->as_CountedLoop()->is_unroll_only()) {
3183 continue;
3184 } else {
3185 AutoNodeBudget node_budget(this);
3186 lpt->reassociate_invariants(this);
3187 }
3188 // Because RCE opportunities can be masked by split_thru_phi,
3189 // look for RCE candidates and inhibit split_thru_phi
3190 // on just their loop-phi's for this pass of loop opts
3191 if (SplitIfBlocks && do_split_ifs) {
3192 AutoNodeBudget node_budget(this, AutoNodeBudget::NO_BUDGET_CHECK);
3193 if (lpt->policy_range_check(this)) {
3194 lpt->_rce_candidate = 1; // = true
3195 }
3196 }
3197 }
3198 }
3199
3200 // Check for aggressive application of split-if and other transforms
3201 // that require basic-block info (like cloning through Phi's)
3202 if( SplitIfBlocks && do_split_ifs ) {
3203 visited.clear();
3204 split_if_with_blocks( visited, nstack);
3205 NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
3206 }
3207
3208 if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
3209 C->set_major_progress();
3210 }
3211
3212 // Perform loop predication before iteration splitting
3213 if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) {
3214 _ltree_root->_child->loop_predication(this);
3215 }
3216
3217 if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) {
3218 if (do_intrinsify_fill()) {
3219 C->set_major_progress();
3220 }
3221 }
3222
3223 // Perform iteration-splitting on inner loops. Split iterations to avoid
3224 // range checks or one-shot null checks.
3225
3226 // If split-if's didn't hack the graph too bad (no CFG changes)
3227 // then do loop opts.
3228 if (C->has_loops() && !C->major_progress()) {
3229 memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) );
3230 _ltree_root->_child->iteration_split( this, worklist );
3231 // No verify after peeling! GCM has hoisted code out of the loop.
3232 // After peeling, the hoisted code could sink inside the peeled area.
3233 // The peeling code does not try to recompute the best location for
3234 // all the code before the peeled area, so the verify pass will always
3235 // complain about it.
3236 }
3237 // Do verify graph edges in any case
3238 NOT_PRODUCT( C->verify_graph_edges(); );
3239
3240 if (!do_split_ifs) {
3241 // We saw major progress in Split-If to get here. We forced a
3242 // pass with unrolling and not split-if, however more split-if's
3243 // might make progress. If the unrolling didn't make progress
3244 // then the major-progress flag got cleared and we won't try
3245 // another round of Split-If. In particular the ever-common
3246 // instance-of/check-cast pattern requires at least 2 rounds of
3247 // Split-If to clear out.
3248 C->set_major_progress();
3249 }
3250
3251 // Repeat loop optimizations if new loops were seen
3252 if (created_loop_node()) {
3253 C->set_major_progress();
3254 }
3255
3256 // Keep loop predicates and perform optimizations with them
3257 // until no more loop optimizations could be done.
3258 // After that switch predicates off and do more loop optimizations.
3259 if (!C->major_progress() && (C->predicate_count() > 0)) {
3260 C->cleanup_loop_predicates(_igvn);
3261 if (TraceLoopOpts) {
3262 tty->print_cr("PredicatesOff");
3263 }
3264 C->set_major_progress();
3265 }
3266
3267 // Convert scalar to superword operations at the end of all loop opts.
3268 if (UseSuperWord && C->has_loops() && !C->major_progress()) {
3269 // SuperWord transform
3270 SuperWord sw(this);
3271 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
3272 IdealLoopTree* lpt = iter.current();
3273 if (lpt->is_counted()) {
3274 CountedLoopNode *cl = lpt->_head->as_CountedLoop();
3275
3276 if (PostLoopMultiversioning && cl->is_rce_post_loop() && !cl->is_vectorized_loop()) {
3277 // Check that the rce'd post loop is encountered first, multiversion after all
3278 // major main loop optimization are concluded
3279 if (!C->major_progress()) {
3280 IdealLoopTree *lpt_next = lpt->_next;
3281 if (lpt_next && lpt_next->is_counted()) {
3282 CountedLoopNode *cl = lpt_next->_head->as_CountedLoop();
3283 has_range_checks(lpt_next);
3284 if (cl->is_post_loop() && cl->range_checks_present()) {
3285 if (!cl->is_multiversioned()) {
3286 if (multi_version_post_loops(lpt, lpt_next) == false) {
3287 // Cause the rce loop to be optimized away if we fail
3288 cl->mark_is_multiversioned();
3289 cl->set_slp_max_unroll(0);
3290 poison_rce_post_loop(lpt);
3291 }
3292 }
3293 }
3294 }
3295 sw.transform_loop(lpt, true);
3296 }
3297 } else if (cl->is_main_loop()) {
3298 sw.transform_loop(lpt, true);
3299 }
3300 }
3301 }
3302 }
3303
3304 // Cleanup any modified bits
3305 _igvn.optimize();
3306
3307 // disable assert until issue with split_flow_path is resolved (6742111)
3308 // assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(),
3309 // "shouldn't introduce irreducible loops");
3310
3311 if (C->log() != NULL) {
3312 log_loop_tree(_ltree_root, _ltree_root, C->log());
3313 }
3314 }
3315
3316 #ifndef PRODUCT
3317 //------------------------------print_statistics-------------------------------
3318 int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes
3319 int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique
3320 void PhaseIdealLoop::print_statistics() {
3321 tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d", _loop_invokes, _loop_work);
3322 }
3323
3324 //------------------------------verify-----------------------------------------
3325 // Build a verify-only PhaseIdealLoop, and see that it agrees with me.
3326 static int fail; // debug only, so its multi-thread dont care
3327 void PhaseIdealLoop::verify() const {
3328 int old_progress = C->major_progress();
3329 ResourceMark rm;
3330 PhaseIdealLoop loop_verify( _igvn, this );
3331 VectorSet visited(Thread::current()->resource_area());
3332
3333 fail = 0;
3334 verify_compare( C->root(), &loop_verify, visited );
3335 assert( fail == 0, "verify loops failed" );
3336 // Verify loop structure is the same
3337 _ltree_root->verify_tree(loop_verify._ltree_root, NULL);
3338 // Reset major-progress. It was cleared by creating a verify version of
3339 // PhaseIdealLoop.
3340 C->restore_major_progress(old_progress);
3341 }
3342
3343 //------------------------------verify_compare---------------------------------
3344 // Make sure me and the given PhaseIdealLoop agree on key data structures
3345 void PhaseIdealLoop::verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const {
3346 if( !n ) return;
3347 if( visited.test_set( n->_idx ) ) return;
3348 if( !_nodes[n->_idx] ) { // Unreachable
3349 assert( !loop_verify->_nodes[n->_idx], "both should be unreachable" );
3350 return;
3351 }
3352
3353 uint i;
3354 for( i = 0; i < n->req(); i++ )
3355 verify_compare( n->in(i), loop_verify, visited );
3356
3357 // Check the '_nodes' block/loop structure
3358 i = n->_idx;
3359 if( has_ctrl(n) ) { // We have control; verify has loop or ctrl
3360 if( _nodes[i] != loop_verify->_nodes[i] &&
3361 get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) {
3362 tty->print("Mismatched control setting for: ");
3363 n->dump();
3364 if( fail++ > 10 ) return;
3365 Node *c = get_ctrl_no_update(n);
3366 tty->print("We have it as: ");
3367 if( c->in(0) ) c->dump();
3368 else tty->print_cr("N%d",c->_idx);
3369 tty->print("Verify thinks: ");
3370 if( loop_verify->has_ctrl(n) )
3371 loop_verify->get_ctrl_no_update(n)->dump();
3372 else
3373 loop_verify->get_loop_idx(n)->dump();
3374 tty->cr();
3375 }
3376 } else { // We have a loop
3377 IdealLoopTree *us = get_loop_idx(n);
3378 if( loop_verify->has_ctrl(n) ) {
3379 tty->print("Mismatched loop setting for: ");
3380 n->dump();
3381 if( fail++ > 10 ) return;
3382 tty->print("We have it as: ");
3383 us->dump();
3384 tty->print("Verify thinks: ");
3385 loop_verify->get_ctrl_no_update(n)->dump();
3386 tty->cr();
3387 } else if (!C->major_progress()) {
3388 // Loop selection can be messed up if we did a major progress
3389 // operation, like split-if. Do not verify in that case.
3390 IdealLoopTree *them = loop_verify->get_loop_idx(n);
3391 if( us->_head != them->_head || us->_tail != them->_tail ) {
3392 tty->print("Unequals loops for: ");
3393 n->dump();
3394 if( fail++ > 10 ) return;
3395 tty->print("We have it as: ");
3396 us->dump();
3397 tty->print("Verify thinks: ");
3398 them->dump();
3399 tty->cr();
3400 }
3401 }
3402 }
3403
3404 // Check for immediate dominators being equal
3405 if( i >= _idom_size ) {
3406 if( !n->is_CFG() ) return;
3407 tty->print("CFG Node with no idom: ");
3408 n->dump();
3409 return;
3410 }
3411 if( !n->is_CFG() ) return;
3412 if( n == C->root() ) return; // No IDOM here
3413
3414 assert(n->_idx == i, "sanity");
3415 Node *id = idom_no_update(n);
3416 if( id != loop_verify->idom_no_update(n) ) {
3417 tty->print("Unequals idoms for: ");
3418 n->dump();
3419 if( fail++ > 10 ) return;
3420 tty->print("We have it as: ");
3421 id->dump();
3422 tty->print("Verify thinks: ");
3423 loop_verify->idom_no_update(n)->dump();
3424 tty->cr();
3425 }
3426
3427 }
3428
3429 //------------------------------verify_tree------------------------------------
3430 // Verify that tree structures match. Because the CFG can change, siblings
3431 // within the loop tree can be reordered. We attempt to deal with that by
3432 // reordering the verify's loop tree if possible.
3433 void IdealLoopTree::verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const {
3434 assert( _parent == parent, "Badly formed loop tree" );
3435
3436 // Siblings not in same order? Attempt to re-order.
3437 if( _head != loop->_head ) {
3438 // Find _next pointer to update
3439 IdealLoopTree **pp = &loop->_parent->_child;
3440 while( *pp != loop )
3441 pp = &((*pp)->_next);
3442 // Find proper sibling to be next
3443 IdealLoopTree **nn = &loop->_next;
3444 while( (*nn) && (*nn)->_head != _head )
3445 nn = &((*nn)->_next);
3446
3447 // Check for no match.
3448 if( !(*nn) ) {
3449 // Annoyingly, irreducible loops can pick different headers
3450 // after a major_progress operation, so the rest of the loop
3451 // tree cannot be matched.
3452 if (_irreducible && Compile::current()->major_progress()) return;
3453 assert( 0, "failed to match loop tree" );
3454 }
3455
3456 // Move (*nn) to (*pp)
3457 IdealLoopTree *hit = *nn;
3458 *nn = hit->_next;
3459 hit->_next = loop;
3460 *pp = loop;
3461 loop = hit;
3462 // Now try again to verify
3463 }
3464
3465 assert( _head == loop->_head , "mismatched loop head" );
3466 Node *tail = _tail; // Inline a non-updating version of
3467 while( !tail->in(0) ) // the 'tail()' call.
3468 tail = tail->in(1);
3469 assert( tail == loop->_tail, "mismatched loop tail" );
3470
3471 // Counted loops that are guarded should be able to find their guards
3472 if( _head->is_CountedLoop() && _head->as_CountedLoop()->is_main_loop() ) {
3473 CountedLoopNode *cl = _head->as_CountedLoop();
3474 Node *init = cl->init_trip();
3475 Node *ctrl = cl->in(LoopNode::EntryControl);
3476 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
3477 Node *iff = ctrl->in(0);
3478 assert( iff->Opcode() == Op_If, "" );
3479 Node *bol = iff->in(1);
3480 assert( bol->Opcode() == Op_Bool, "" );
3481 Node *cmp = bol->in(1);
3482 assert( cmp->Opcode() == Op_CmpI, "" );
3483 Node *add = cmp->in(1);
3484 Node *opaq;
3485 if( add->Opcode() == Op_Opaque1 ) {
3486 opaq = add;
3487 } else {
3488 assert( add->Opcode() == Op_AddI || add->Opcode() == Op_ConI , "" );
3489 assert( add == init, "" );
3490 opaq = cmp->in(2);
3491 }
3492 assert( opaq->Opcode() == Op_Opaque1, "" );
3493
3494 }
3495
3496 if (_child != NULL) _child->verify_tree(loop->_child, this);
3497 if (_next != NULL) _next ->verify_tree(loop->_next, parent);
3498 // Innermost loops need to verify loop bodies,
3499 // but only if no 'major_progress'
3500 int fail = 0;
3501 if (!Compile::current()->major_progress() && _child == NULL) {
3502 for( uint i = 0; i < _body.size(); i++ ) {
3503 Node *n = _body.at(i);
3504 if (n->outcnt() == 0) continue; // Ignore dead
3505 uint j;
3506 for( j = 0; j < loop->_body.size(); j++ )
3507 if( loop->_body.at(j) == n )
3508 break;
3509 if( j == loop->_body.size() ) { // Not found in loop body
3510 // Last ditch effort to avoid assertion: Its possible that we
3511 // have some users (so outcnt not zero) but are still dead.
3512 // Try to find from root.
3513 if (Compile::current()->root()->find(n->_idx)) {
3514 fail++;
3515 tty->print("We have that verify does not: ");
3516 n->dump();
3517 }
3518 }
3519 }
3520 for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) {
3521 Node *n = loop->_body.at(i2);
3522 if (n->outcnt() == 0) continue; // Ignore dead
3523 uint j;
3524 for( j = 0; j < _body.size(); j++ )
3525 if( _body.at(j) == n )
3526 break;
3527 if( j == _body.size() ) { // Not found in loop body
3528 // Last ditch effort to avoid assertion: Its possible that we
3529 // have some users (so outcnt not zero) but are still dead.
3530 // Try to find from root.
3531 if (Compile::current()->root()->find(n->_idx)) {
3532 fail++;
3533 tty->print("Verify has that we do not: ");
3534 n->dump();
3535 }
3536 }
3537 }
3538 assert( !fail, "loop body mismatch" );
3539 }
3540 }
3541
3542 #endif
3543
3544 //------------------------------set_idom---------------------------------------
3545 void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) {
3546 uint idx = d->_idx;
3547 if (idx >= _idom_size) {
3548 uint newsize = next_power_of_2(idx);
3549 _idom = REALLOC_RESOURCE_ARRAY( Node*, _idom,_idom_size,newsize);
3550 _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize);
3551 memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) );
3552 _idom_size = newsize;
3553 }
3554 _idom[idx] = n;
3555 _dom_depth[idx] = dom_depth;
3556 }
3557
3558 //------------------------------recompute_dom_depth---------------------------------------
3559 // The dominator tree is constructed with only parent pointers.
3560 // This recomputes the depth in the tree by first tagging all
3561 // nodes as "no depth yet" marker. The next pass then runs up
3562 // the dom tree from each node marked "no depth yet", and computes
3563 // the depth on the way back down.
3564 void PhaseIdealLoop::recompute_dom_depth() {
3565 uint no_depth_marker = C->unique();
3566 uint i;
3567 // Initialize depth to "no depth yet" and realize all lazy updates
3568 for (i = 0; i < _idom_size; i++) {
3569 // Only indices with a _dom_depth has a Node* or NULL (otherwise uninitalized).
3570 if (_dom_depth[i] > 0 && _idom[i] != NULL) {
3571 _dom_depth[i] = no_depth_marker;
3572
3573 // heal _idom if it has a fwd mapping in _nodes
3574 if (_idom[i]->in(0) == NULL) {
3575 idom(i);
3576 }
3577 }
3578 }
3579 if (_dom_stk == NULL) {
3580 uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
3581 if (init_size < 10) init_size = 10;
3582 _dom_stk = new GrowableArray<uint>(init_size);
3583 }
3584 // Compute new depth for each node.
3585 for (i = 0; i < _idom_size; i++) {
3586 uint j = i;
3587 // Run up the dom tree to find a node with a depth
3588 while (_dom_depth[j] == no_depth_marker) {
3589 _dom_stk->push(j);
3590 j = _idom[j]->_idx;
3591 }
3592 // Compute the depth on the way back down this tree branch
3593 uint dd = _dom_depth[j] + 1;
3594 while (_dom_stk->length() > 0) {
3595 uint j = _dom_stk->pop();
3596 _dom_depth[j] = dd;
3597 dd++;
3598 }
3599 }
3600 }
3601
3602 //------------------------------sort-------------------------------------------
3603 // Insert 'loop' into the existing loop tree. 'innermost' is a leaf of the
3604 // loop tree, not the root.
3605 IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) {
3606 if( !innermost ) return loop; // New innermost loop
3607
3608 int loop_preorder = get_preorder(loop->_head); // Cache pre-order number
3609 assert( loop_preorder, "not yet post-walked loop" );
3610 IdealLoopTree **pp = &innermost; // Pointer to previous next-pointer
3611 IdealLoopTree *l = *pp; // Do I go before or after 'l'?
3612
3613 // Insert at start of list
3614 while( l ) { // Insertion sort based on pre-order
3615 if( l == loop ) return innermost; // Already on list!
3616 int l_preorder = get_preorder(l->_head); // Cache pre-order number
3617 assert( l_preorder, "not yet post-walked l" );
3618 // Check header pre-order number to figure proper nesting
3619 if( loop_preorder > l_preorder )
3620 break; // End of insertion
3621 // If headers tie (e.g., shared headers) check tail pre-order numbers.
3622 // Since I split shared headers, you'd think this could not happen.
3623 // BUT: I must first do the preorder numbering before I can discover I
3624 // have shared headers, so the split headers all get the same preorder
3625 // number as the RegionNode they split from.
3626 if( loop_preorder == l_preorder &&
3627 get_preorder(loop->_tail) < get_preorder(l->_tail) )
3628 break; // Also check for shared headers (same pre#)
3629 pp = &l->_parent; // Chain up list
3630 l = *pp;
3631 }
3632 // Link into list
3633 // Point predecessor to me
3634 *pp = loop;
3635 // Point me to successor
3636 IdealLoopTree *p = loop->_parent;
3637 loop->_parent = l; // Point me to successor
3638 if( p ) sort( p, innermost ); // Insert my parents into list as well
3639 return innermost;
3640 }
3641
3642 //------------------------------build_loop_tree--------------------------------
3643 // I use a modified Vick/Tarjan algorithm. I need pre- and a post- visit
3644 // bits. The _nodes[] array is mapped by Node index and holds a NULL for
3645 // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the
3646 // tightest enclosing IdealLoopTree for post-walked.
3647 //
3648 // During my forward walk I do a short 1-layer lookahead to see if I can find
3649 // a loop backedge with that doesn't have any work on the backedge. This
3650 // helps me construct nested loops with shared headers better.
3651 //
3652 // Once I've done the forward recursion, I do the post-work. For each child
3653 // I check to see if there is a backedge. Backedges define a loop! I
3654 // insert an IdealLoopTree at the target of the backedge.
3655 //
3656 // During the post-work I also check to see if I have several children
3657 // belonging to different loops. If so, then this Node is a decision point
3658 // where control flow can choose to change loop nests. It is at this
3659 // decision point where I can figure out how loops are nested. At this
3660 // time I can properly order the different loop nests from my children.
3661 // Note that there may not be any backedges at the decision point!
3662 //
3663 // Since the decision point can be far removed from the backedges, I can't
3664 // order my loops at the time I discover them. Thus at the decision point
3665 // I need to inspect loop header pre-order numbers to properly nest my
3666 // loops. This means I need to sort my childrens' loops by pre-order.
3667 // The sort is of size number-of-control-children, which generally limits
3668 // it to size 2 (i.e., I just choose between my 2 target loops).
3669 void PhaseIdealLoop::build_loop_tree() {
3670 // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
3671 GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
3672 Node *n = C->root();
3673 bltstack.push(n);
3674 int pre_order = 1;
3675 int stack_size;
3676
3677 while ( ( stack_size = bltstack.length() ) != 0 ) {
3678 n = bltstack.top(); // Leave node on stack
3679 if ( !is_visited(n) ) {
3680 // ---- Pre-pass Work ----
3681 // Pre-walked but not post-walked nodes need a pre_order number.
3682
3683 set_preorder_visited( n, pre_order ); // set as visited
3684
3685 // ---- Scan over children ----
3686 // Scan first over control projections that lead to loop headers.
3687 // This helps us find inner-to-outer loops with shared headers better.
3688
3689 // Scan children's children for loop headers.
3690 for ( int i = n->outcnt() - 1; i >= 0; --i ) {
3691 Node* m = n->raw_out(i); // Child
3692 if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children
3693 // Scan over children's children to find loop
3694 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
3695 Node* l = m->fast_out(j);
3696 if( is_visited(l) && // Been visited?
3697 !is_postvisited(l) && // But not post-visited
3698 get_preorder(l) < pre_order ) { // And smaller pre-order
3699 // Found! Scan the DFS down this path before doing other paths
3700 bltstack.push(m);
3701 break;
3702 }
3703 }
3704 }
3705 }
3706 pre_order++;
3707 }
3708 else if ( !is_postvisited(n) ) {
3709 // Note: build_loop_tree_impl() adds out edges on rare occasions,
3710 // such as com.sun.rsasign.am::a.
3711 // For non-recursive version, first, process current children.
3712 // On next iteration, check if additional children were added.
3713 for ( int k = n->outcnt() - 1; k >= 0; --k ) {
3714 Node* u = n->raw_out(k);
3715 if ( u->is_CFG() && !is_visited(u) ) {
3716 bltstack.push(u);
3717 }
3718 }
3719 if ( bltstack.length() == stack_size ) {
3720 // There were no additional children, post visit node now
3721 (void)bltstack.pop(); // Remove node from stack
3722 pre_order = build_loop_tree_impl( n, pre_order );
3723 // Check for bailout
3724 if (C->failing()) {
3725 return;
3726 }
3727 // Check to grow _preorders[] array for the case when
3728 // build_loop_tree_impl() adds new nodes.
3729 check_grow_preorders();
3730 }
3731 }
3732 else {
3733 (void)bltstack.pop(); // Remove post-visited node from stack
3734 }
3735 }
3736 }
3737
3738 //------------------------------build_loop_tree_impl---------------------------
3739 int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
3740 // ---- Post-pass Work ----
3741 // Pre-walked but not post-walked nodes need a pre_order number.
3742
3743 // Tightest enclosing loop for this Node
3744 IdealLoopTree *innermost = NULL;
3745
3746 // For all children, see if any edge is a backedge. If so, make a loop
3747 // for it. Then find the tightest enclosing loop for the self Node.
3748 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3749 Node* m = n->fast_out(i); // Child
3750 if( n == m ) continue; // Ignore control self-cycles
3751 if( !m->is_CFG() ) continue;// Ignore non-CFG edges
3752
3753 IdealLoopTree *l; // Child's loop
3754 if( !is_postvisited(m) ) { // Child visited but not post-visited?
3755 // Found a backedge
3756 assert( get_preorder(m) < pre_order, "should be backedge" );
3757 // Check for the RootNode, which is already a LoopNode and is allowed
3758 // to have multiple "backedges".
3759 if( m == C->root()) { // Found the root?
3760 l = _ltree_root; // Root is the outermost LoopNode
3761 } else { // Else found a nested loop
3762 // Insert a LoopNode to mark this loop.
3763 l = new IdealLoopTree(this, m, n);
3764 } // End of Else found a nested loop
3765 if( !has_loop(m) ) // If 'm' does not already have a loop set
3766 set_loop(m, l); // Set loop header to loop now
3767
3768 } else { // Else not a nested loop
3769 if( !_nodes[m->_idx] ) continue; // Dead code has no loop
3770 l = get_loop(m); // Get previously determined loop
3771 // If successor is header of a loop (nest), move up-loop till it
3772 // is a member of some outer enclosing loop. Since there are no
3773 // shared headers (I've split them already) I only need to go up
3774 // at most 1 level.
3775 while( l && l->_head == m ) // Successor heads loop?
3776 l = l->_parent; // Move up 1 for me
3777 // If this loop is not properly parented, then this loop
3778 // has no exit path out, i.e. its an infinite loop.
3779 if( !l ) {
3780 // Make loop "reachable" from root so the CFG is reachable. Basically
3781 // insert a bogus loop exit that is never taken. 'm', the loop head,
3782 // points to 'n', one (of possibly many) fall-in paths. There may be
3783 // many backedges as well.
3784
3785 // Here I set the loop to be the root loop. I could have, after
3786 // inserting a bogus loop exit, restarted the recursion and found my
3787 // new loop exit. This would make the infinite loop a first-class
3788 // loop and it would then get properly optimized. What's the use of
3789 // optimizing an infinite loop?
3790 l = _ltree_root; // Oops, found infinite loop
3791
3792 if (!_verify_only) {
3793 // Insert the NeverBranch between 'm' and it's control user.
3794 NeverBranchNode *iff = new NeverBranchNode( m );
3795 _igvn.register_new_node_with_optimizer(iff);
3796 set_loop(iff, l);
3797 Node *if_t = new CProjNode( iff, 0 );
3798 _igvn.register_new_node_with_optimizer(if_t);
3799 set_loop(if_t, l);
3800
3801 Node* cfg = NULL; // Find the One True Control User of m
3802 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
3803 Node* x = m->fast_out(j);
3804 if (x->is_CFG() && x != m && x != iff)
3805 { cfg = x; break; }
3806 }
3807 assert(cfg != NULL, "must find the control user of m");
3808 uint k = 0; // Probably cfg->in(0)
3809 while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
3810 cfg->set_req( k, if_t ); // Now point to NeverBranch
3811 _igvn._worklist.push(cfg);
3812
3813 // Now create the never-taken loop exit
3814 Node *if_f = new CProjNode( iff, 1 );
3815 _igvn.register_new_node_with_optimizer(if_f);
3816 set_loop(if_f, l);
3817 // Find frame ptr for Halt. Relies on the optimizer
3818 // V-N'ing. Easier and quicker than searching through
3819 // the program structure.
3820 Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr );
3821 _igvn.register_new_node_with_optimizer(frame);
3822 // Halt & Catch Fire
3823 Node* halt = new HaltNode(if_f, frame, "never-taken loop exit reached");
3824 _igvn.register_new_node_with_optimizer(halt);
3825 set_loop(halt, l);
3826 C->root()->add_req(halt);
3827 }
3828 set_loop(C->root(), _ltree_root);
3829 }
3830 }
3831 // Weeny check for irreducible. This child was already visited (this
3832 // IS the post-work phase). Is this child's loop header post-visited
3833 // as well? If so, then I found another entry into the loop.
3834 if (!_verify_only) {
3835 while( is_postvisited(l->_head) ) {
3836 // found irreducible
3837 l->_irreducible = 1; // = true
3838 l = l->_parent;
3839 _has_irreducible_loops = true;
3840 // Check for bad CFG here to prevent crash, and bailout of compile
3841 if (l == NULL) {
3842 C->record_method_not_compilable("unhandled CFG detected during loop optimization");
3843 return pre_order;
3844 }
3845 }
3846 C->set_has_irreducible_loop(_has_irreducible_loops);
3847 }
3848
3849 // This Node might be a decision point for loops. It is only if
3850 // it's children belong to several different loops. The sort call
3851 // does a trivial amount of work if there is only 1 child or all
3852 // children belong to the same loop. If however, the children
3853 // belong to different loops, the sort call will properly set the
3854 // _parent pointers to show how the loops nest.
3855 //
3856 // In any case, it returns the tightest enclosing loop.
3857 innermost = sort( l, innermost );
3858 }
3859
3860 // Def-use info will have some dead stuff; dead stuff will have no
3861 // loop decided on.
3862
3863 // Am I a loop header? If so fix up my parent's child and next ptrs.
3864 if( innermost && innermost->_head == n ) {
3865 assert( get_loop(n) == innermost, "" );
3866 IdealLoopTree *p = innermost->_parent;
3867 IdealLoopTree *l = innermost;
3868 while( p && l->_head == n ) {
3869 l->_next = p->_child; // Put self on parents 'next child'
3870 p->_child = l; // Make self as first child of parent
3871 l = p; // Now walk up the parent chain
3872 p = l->_parent;
3873 }
3874 } else {
3875 // Note that it is possible for a LoopNode to reach here, if the
3876 // backedge has been made unreachable (hence the LoopNode no longer
3877 // denotes a Loop, and will eventually be removed).
3878
3879 // Record tightest enclosing loop for self. Mark as post-visited.
3880 set_loop(n, innermost);
3881 // Also record has_call flag early on
3882 if( innermost ) {
3883 if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) {
3884 // Do not count uncommon calls
3885 if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) {
3886 Node *iff = n->in(0)->in(0);
3887 // No any calls for vectorized loops.
3888 if( UseSuperWord || !iff->is_If() ||
3889 (n->in(0)->Opcode() == Op_IfFalse &&
3890 (1.0 - iff->as_If()->_prob) >= 0.01) ||
3891 (iff->as_If()->_prob >= 0.01) )
3892 innermost->_has_call = 1;
3893 }
3894 } else if( n->is_Allocate() && (n->as_Allocate()->_is_scalar_replaceable || n->as_Allocate()->_is_stack_allocateable) ) {
3895 // Disable loop optimizations if the loop has a scalar replaceable
3896 // allocation. This disabling may cause a potential performance lost
3897 // if the allocation is not eliminated for some reason.
3898 innermost->_allow_optimizations = false;
3899 innermost->_has_call = 1; // = true
3900 } else if (n->Opcode() == Op_SafePoint) {
3901 // Record all safepoints in this loop.
3902 if (innermost->_safepts == NULL) innermost->_safepts = new Node_List();
3903 innermost->_safepts->push(n);
3904 }
3905 }
3906 }
3907
3908 // Flag as post-visited now
3909 set_postvisited(n);
3910 return pre_order;
3911 }
3912
3913
3914 //------------------------------build_loop_early-------------------------------
3915 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
3916 // First pass computes the earliest controlling node possible. This is the
3917 // controlling input with the deepest dominating depth.
3918 void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
3919 while (worklist.size() != 0) {
3920 // Use local variables nstack_top_n & nstack_top_i to cache values
3921 // on nstack's top.
3922 Node *nstack_top_n = worklist.pop();
3923 uint nstack_top_i = 0;
3924 //while_nstack_nonempty:
3925 while (true) {
3926 // Get parent node and next input's index from stack's top.
3927 Node *n = nstack_top_n;
3928 uint i = nstack_top_i;
3929 uint cnt = n->req(); // Count of inputs
3930 if (i == 0) { // Pre-process the node.
3931 if( has_node(n) && // Have either loop or control already?
3932 !has_ctrl(n) ) { // Have loop picked out already?
3933 // During "merge_many_backedges" we fold up several nested loops
3934 // into a single loop. This makes the members of the original
3935 // loop bodies pointing to dead loops; they need to move up
3936 // to the new UNION'd larger loop. I set the _head field of these
3937 // dead loops to NULL and the _parent field points to the owning
3938 // loop. Shades of UNION-FIND algorithm.
3939 IdealLoopTree *ilt;
3940 while( !(ilt = get_loop(n))->_head ) {
3941 // Normally I would use a set_loop here. But in this one special
3942 // case, it is legal (and expected) to change what loop a Node
3943 // belongs to.
3944 _nodes.map(n->_idx, (Node*)(ilt->_parent) );
3945 }
3946 // Remove safepoints ONLY if I've already seen I don't need one.
3947 // (the old code here would yank a 2nd safepoint after seeing a
3948 // first one, even though the 1st did not dominate in the loop body
3949 // and thus could be avoided indefinitely)
3950 if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
3951 is_deleteable_safept(n)) {
3952 Node *in = n->in(TypeFunc::Control);
3953 lazy_replace(n,in); // Pull safepoint now
3954 if (ilt->_safepts != NULL) {
3955 ilt->_safepts->yank(n);
3956 }
3957 // Carry on with the recursion "as if" we are walking
3958 // only the control input
3959 if( !visited.test_set( in->_idx ) ) {
3960 worklist.push(in); // Visit this guy later, using worklist
3961 }
3962 // Get next node from nstack:
3963 // - skip n's inputs processing by setting i > cnt;
3964 // - we also will not call set_early_ctrl(n) since
3965 // has_node(n) == true (see the condition above).
3966 i = cnt + 1;
3967 }
3968 }
3969 } // if (i == 0)
3970
3971 // Visit all inputs
3972 bool done = true; // Assume all n's inputs will be processed
3973 while (i < cnt) {
3974 Node *in = n->in(i);
3975 ++i;
3976 if (in == NULL) continue;
3977 if (in->pinned() && !in->is_CFG())
3978 set_ctrl(in, in->in(0));
3979 int is_visited = visited.test_set( in->_idx );
3980 if (!has_node(in)) { // No controlling input yet?
3981 assert( !in->is_CFG(), "CFG Node with no controlling input?" );
3982 assert( !is_visited, "visit only once" );
3983 nstack.push(n, i); // Save parent node and next input's index.
3984 nstack_top_n = in; // Process current input now.
3985 nstack_top_i = 0;
3986 done = false; // Not all n's inputs processed.
3987 break; // continue while_nstack_nonempty;
3988 } else if (!is_visited) {
3989 // This guy has a location picked out for him, but has not yet
3990 // been visited. Happens to all CFG nodes, for instance.
3991 // Visit him using the worklist instead of recursion, to break
3992 // cycles. Since he has a location already we do not need to
3993 // find his location before proceeding with the current Node.
3994 worklist.push(in); // Visit this guy later, using worklist
3995 }
3996 }
3997 if (done) {
3998 // All of n's inputs have been processed, complete post-processing.
3999
4000 // Compute earliest point this Node can go.
4001 // CFG, Phi, pinned nodes already know their controlling input.
4002 if (!has_node(n)) {
4003 // Record earliest legal location
4004 set_early_ctrl( n );
4005 }
4006 if (nstack.is_empty()) {
4007 // Finished all nodes on stack.
4008 // Process next node on the worklist.
4009 break;
4010 }
4011 // Get saved parent node and next input's index.
4012 nstack_top_n = nstack.node();
4013 nstack_top_i = nstack.index();
4014 nstack.pop();
4015 }
4016 } // while (true)
4017 }
4018 }
4019
4020 //------------------------------dom_lca_internal--------------------------------
4021 // Pair-wise LCA
4022 Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const {
4023 if( !n1 ) return n2; // Handle NULL original LCA
4024 assert( n1->is_CFG(), "" );
4025 assert( n2->is_CFG(), "" );
4026 // find LCA of all uses
4027 uint d1 = dom_depth(n1);
4028 uint d2 = dom_depth(n2);
4029 while (n1 != n2) {
4030 if (d1 > d2) {
4031 n1 = idom(n1);
4032 d1 = dom_depth(n1);
4033 } else if (d1 < d2) {
4034 n2 = idom(n2);
4035 d2 = dom_depth(n2);
4036 } else {
4037 // Here d1 == d2. Due to edits of the dominator-tree, sections
4038 // of the tree might have the same depth. These sections have
4039 // to be searched more carefully.
4040
4041 // Scan up all the n1's with equal depth, looking for n2.
4042 Node *t1 = idom(n1);
4043 while (dom_depth(t1) == d1) {
4044 if (t1 == n2) return n2;
4045 t1 = idom(t1);
4046 }
4047 // Scan up all the n2's with equal depth, looking for n1.
4048 Node *t2 = idom(n2);
4049 while (dom_depth(t2) == d2) {
4050 if (t2 == n1) return n1;
4051 t2 = idom(t2);
4052 }
4053 // Move up to a new dominator-depth value as well as up the dom-tree.
4054 n1 = t1;
4055 n2 = t2;
4056 d1 = dom_depth(n1);
4057 d2 = dom_depth(n2);
4058 }
4059 }
4060 return n1;
4061 }
4062
4063 //------------------------------compute_idom-----------------------------------
4064 // Locally compute IDOM using dom_lca call. Correct only if the incoming
4065 // IDOMs are correct.
4066 Node *PhaseIdealLoop::compute_idom( Node *region ) const {
4067 assert( region->is_Region(), "" );
4068 Node *LCA = NULL;
4069 for( uint i = 1; i < region->req(); i++ ) {
4070 if( region->in(i) != C->top() )
4071 LCA = dom_lca( LCA, region->in(i) );
4072 }
4073 return LCA;
4074 }
4075
4076 bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) {
4077 bool had_error = false;
4078 #ifdef ASSERT
4079 if (early != C->root()) {
4080 // Make sure that there's a dominance path from LCA to early
4081 Node* d = LCA;
4082 while (d != early) {
4083 if (d == C->root()) {
4084 dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
4085 tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
4086 had_error = true;
4087 break;
4088 }
4089 d = idom(d);
4090 }
4091 }
4092 #endif
4093 return had_error;
4094 }
4095
4096
4097 Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
4098 // Compute LCA over list of uses
4099 bool had_error = false;
4100 Node *LCA = NULL;
4101 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) {
4102 Node* c = n->fast_out(i);
4103 if (_nodes[c->_idx] == NULL)
4104 continue; // Skip the occasional dead node
4105 if( c->is_Phi() ) { // For Phis, we must land above on the path
4106 for( uint j=1; j<c->req(); j++ ) {// For all inputs
4107 if( c->in(j) == n ) { // Found matching input?
4108 Node *use = c->in(0)->in(j);
4109 if (_verify_only && use->is_top()) continue;
4110 LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
4111 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
4112 }
4113 }
4114 } else {
4115 // For CFG data-users, use is in the block just prior
4116 Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0);
4117 LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
4118 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
4119 }
4120 }
4121 assert(!had_error, "bad dominance");
4122 return LCA;
4123 }
4124
4125 // Check the shape of the graph at the loop entry. In some cases,
4126 // the shape of the graph does not match the shape outlined below.
4127 // That is caused by the Opaque1 node "protecting" the shape of
4128 // the graph being removed by, for example, the IGVN performed
4129 // in PhaseIdealLoop::build_and_optimize().
4130 //
4131 // After the Opaque1 node has been removed, optimizations (e.g., split-if,
4132 // loop unswitching, and IGVN, or a combination of them) can freely change
4133 // the graph's shape. As a result, the graph shape outlined below cannot
4134 // be guaranteed anymore.
4135 bool PhaseIdealLoop::is_canonical_loop_entry(CountedLoopNode* cl) {
4136 if (!cl->is_main_loop() && !cl->is_post_loop()) {
4137 return false;
4138 }
4139 Node* ctrl = cl->skip_predicates();
4140
4141 if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
4142 return false;
4143 }
4144 Node* iffm = ctrl->in(0);
4145 if (iffm == NULL || !iffm->is_If()) {
4146 return false;
4147 }
4148 Node* bolzm = iffm->in(1);
4149 if (bolzm == NULL || !bolzm->is_Bool()) {
4150 return false;
4151 }
4152 Node* cmpzm = bolzm->in(1);
4153 if (cmpzm == NULL || !cmpzm->is_Cmp()) {
4154 return false;
4155 }
4156 // compares can get conditionally flipped
4157 bool found_opaque = false;
4158 for (uint i = 1; i < cmpzm->req(); i++) {
4159 Node* opnd = cmpzm->in(i);
4160 if (opnd && opnd->Opcode() == Op_Opaque1) {
4161 found_opaque = true;
4162 break;
4163 }
4164 }
4165 if (!found_opaque) {
4166 return false;
4167 }
4168 return true;
4169 }
4170
4171 //------------------------------get_late_ctrl----------------------------------
4172 // Compute latest legal control.
4173 Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
4174 assert(early != NULL, "early control should not be NULL");
4175
4176 Node* LCA = compute_lca_of_uses(n, early);
4177 #ifdef ASSERT
4178 if (LCA == C->root() && LCA != early) {
4179 // def doesn't dominate uses so print some useful debugging output
4180 compute_lca_of_uses(n, early, true);
4181 }
4182 #endif
4183
4184 // if this is a load, check for anti-dependent stores
4185 // We use a conservative algorithm to identify potential interfering
4186 // instructions and for rescheduling the load. The users of the memory
4187 // input of this load are examined. Any use which is not a load and is
4188 // dominated by early is considered a potentially interfering store.
4189 // This can produce false positives.
4190 if (n->is_Load() && LCA != early) {
4191 int load_alias_idx = C->get_alias_index(n->adr_type());
4192 if (C->alias_type(load_alias_idx)->is_rewritable()) {
4193
4194 Node_List worklist;
4195
4196 Node *mem = n->in(MemNode::Memory);
4197 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
4198 Node* s = mem->fast_out(i);
4199 worklist.push(s);
4200 }
4201 while(worklist.size() != 0 && LCA != early) {
4202 Node* s = worklist.pop();
4203 if (s->is_Load() || s->Opcode() == Op_SafePoint ||
4204 (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
4205 continue;
4206 } else if (s->is_MergeMem()) {
4207 for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
4208 Node* s1 = s->fast_out(i);
4209 worklist.push(s1);
4210 }
4211 } else {
4212 Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
4213 const TypePtr* adr_type = s->adr_type();
4214 if (s->is_ArrayCopy()) {
4215 // Copy to known instance needs destination type to test for aliasing
4216 const TypePtr* dest_type = s->as_ArrayCopy()->_dest_type;
4217 if (dest_type != TypeOopPtr::BOTTOM) {
4218 adr_type = dest_type;
4219 }
4220 }
4221 assert(sctrl != NULL || !s->is_reachable_from_root(), "must have control");
4222 if (sctrl != NULL && !sctrl->is_top() && C->can_alias(adr_type, load_alias_idx) && is_dominator(early, sctrl)) {
4223 LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
4224 }
4225 }
4226 }
4227 }
4228 }
4229
4230 assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
4231 return LCA;
4232 }
4233
4234 // true if CFG node d dominates CFG node n
4235 bool PhaseIdealLoop::is_dominator(Node *d, Node *n) {
4236 if (d == n)
4237 return true;
4238 assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes");
4239 uint dd = dom_depth(d);
4240 while (dom_depth(n) >= dd) {
4241 if (n == d)
4242 return true;
4243 n = idom(n);
4244 }
4245 return false;
4246 }
4247
4248 //------------------------------dom_lca_for_get_late_ctrl_internal-------------
4249 // Pair-wise LCA with tags.
4250 // Tag each index with the node 'tag' currently being processed
4251 // before advancing up the dominator chain using idom().
4252 // Later calls that find a match to 'tag' know that this path has already
4253 // been considered in the current LCA (which is input 'n1' by convention).
4254 // Since get_late_ctrl() is only called once for each node, the tag array
4255 // does not need to be cleared between calls to get_late_ctrl().
4256 // Algorithm trades a larger constant factor for better asymptotic behavior
4257 //
4258 Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal( Node *n1, Node *n2, Node *tag ) {
4259 uint d1 = dom_depth(n1);
4260 uint d2 = dom_depth(n2);
4261
4262 do {
4263 if (d1 > d2) {
4264 // current lca is deeper than n2
4265 _dom_lca_tags.map(n1->_idx, tag);
4266 n1 = idom(n1);
4267 d1 = dom_depth(n1);
4268 } else if (d1 < d2) {
4269 // n2 is deeper than current lca
4270 Node *memo = _dom_lca_tags[n2->_idx];
4271 if( memo == tag ) {
4272 return n1; // Return the current LCA
4273 }
4274 _dom_lca_tags.map(n2->_idx, tag);
4275 n2 = idom(n2);
4276 d2 = dom_depth(n2);
4277 } else {
4278 // Here d1 == d2. Due to edits of the dominator-tree, sections
4279 // of the tree might have the same depth. These sections have
4280 // to be searched more carefully.
4281
4282 // Scan up all the n1's with equal depth, looking for n2.
4283 _dom_lca_tags.map(n1->_idx, tag);
4284 Node *t1 = idom(n1);
4285 while (dom_depth(t1) == d1) {
4286 if (t1 == n2) return n2;
4287 _dom_lca_tags.map(t1->_idx, tag);
4288 t1 = idom(t1);
4289 }
4290 // Scan up all the n2's with equal depth, looking for n1.
4291 _dom_lca_tags.map(n2->_idx, tag);
4292 Node *t2 = idom(n2);
4293 while (dom_depth(t2) == d2) {
4294 if (t2 == n1) return n1;
4295 _dom_lca_tags.map(t2->_idx, tag);
4296 t2 = idom(t2);
4297 }
4298 // Move up to a new dominator-depth value as well as up the dom-tree.
4299 n1 = t1;
4300 n2 = t2;
4301 d1 = dom_depth(n1);
4302 d2 = dom_depth(n2);
4303 }
4304 } while (n1 != n2);
4305 return n1;
4306 }
4307
4308 //------------------------------init_dom_lca_tags------------------------------
4309 // Tag could be a node's integer index, 32bits instead of 64bits in some cases
4310 // Intended use does not involve any growth for the array, so it could
4311 // be of fixed size.
4312 void PhaseIdealLoop::init_dom_lca_tags() {
4313 uint limit = C->unique() + 1;
4314 _dom_lca_tags.map( limit, NULL );
4315 #ifdef ASSERT
4316 for( uint i = 0; i < limit; ++i ) {
4317 assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer");
4318 }
4319 #endif // ASSERT
4320 }
4321
4322 //------------------------------clear_dom_lca_tags------------------------------
4323 // Tag could be a node's integer index, 32bits instead of 64bits in some cases
4324 // Intended use does not involve any growth for the array, so it could
4325 // be of fixed size.
4326 void PhaseIdealLoop::clear_dom_lca_tags() {
4327 uint limit = C->unique() + 1;
4328 _dom_lca_tags.map( limit, NULL );
4329 _dom_lca_tags.clear();
4330 #ifdef ASSERT
4331 for( uint i = 0; i < limit; ++i ) {
4332 assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer");
4333 }
4334 #endif // ASSERT
4335 }
4336
4337 //------------------------------build_loop_late--------------------------------
4338 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
4339 // Second pass finds latest legal placement, and ideal loop placement.
4340 void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
4341 while (worklist.size() != 0) {
4342 Node *n = worklist.pop();
4343 // Only visit once
4344 if (visited.test_set(n->_idx)) continue;
4345 uint cnt = n->outcnt();
4346 uint i = 0;
4347 while (true) {
4348 assert( _nodes[n->_idx], "no dead nodes" );
4349 // Visit all children
4350 if (i < cnt) {
4351 Node* use = n->raw_out(i);
4352 ++i;
4353 // Check for dead uses. Aggressively prune such junk. It might be
4354 // dead in the global sense, but still have local uses so I cannot
4355 // easily call 'remove_dead_node'.
4356 if( _nodes[use->_idx] != NULL || use->is_top() ) { // Not dead?
4357 // Due to cycles, we might not hit the same fixed point in the verify
4358 // pass as we do in the regular pass. Instead, visit such phis as
4359 // simple uses of the loop head.
4360 if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) {
4361 if( !visited.test(use->_idx) )
4362 worklist.push(use);
4363 } else if( !visited.test_set(use->_idx) ) {
4364 nstack.push(n, i); // Save parent and next use's index.
4365 n = use; // Process all children of current use.
4366 cnt = use->outcnt();
4367 i = 0;
4368 }
4369 } else {
4370 // Do not visit around the backedge of loops via data edges.
4371 // push dead code onto a worklist
4372 _deadlist.push(use);
4373 }
4374 } else {
4375 // All of n's children have been processed, complete post-processing.
4376 build_loop_late_post(n);
4377 if (nstack.is_empty()) {
4378 // Finished all nodes on stack.
4379 // Process next node on the worklist.
4380 break;
4381 }
4382 // Get saved parent node and next use's index. Visit the rest of uses.
4383 n = nstack.node();
4384 cnt = n->outcnt();
4385 i = nstack.index();
4386 nstack.pop();
4387 }
4388 }
4389 }
4390 }
4391
4392 // Verify that no data node is scheduled in the outer loop of a strip
4393 // mined loop.
4394 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
4395 #ifdef ASSERT
4396 if (get_loop(least)->_nest == 0) {
4397 return;
4398 }
4399 IdealLoopTree* loop = get_loop(least);
4400 Node* head = loop->_head;
4401 if (head->is_OuterStripMinedLoop() &&
4402 // Verification can't be applied to fully built strip mined loops
4403 head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) {
4404 Node* sfpt = head->as_Loop()->outer_safepoint();
4405 ResourceMark rm;
4406 Unique_Node_List wq;
4407 wq.push(sfpt);
4408 for (uint i = 0; i < wq.size(); i++) {
4409 Node *m = wq.at(i);
4410 for (uint i = 1; i < m->req(); i++) {
4411 Node* nn = m->in(i);
4412 if (nn == n) {
4413 return;
4414 }
4415 if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4416 wq.push(nn);
4417 }
4418 }
4419 }
4420 ShouldNotReachHere();
4421 }
4422 #endif
4423 }
4424
4425
4426 //------------------------------build_loop_late_post---------------------------
4427 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
4428 // Second pass finds latest legal placement, and ideal loop placement.
4429 void PhaseIdealLoop::build_loop_late_post(Node *n) {
4430 build_loop_late_post_work(n, true);
4431 }
4432
4433 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
4434
4435 if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
4436 _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops.
4437 }
4438
4439 #ifdef ASSERT
4440 if (_verify_only && !n->is_CFG()) {
4441 // Check def-use domination.
4442 compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
4443 }
4444 #endif
4445
4446 // CFG and pinned nodes already handled
4447 if( n->in(0) ) {
4448 if( n->in(0)->is_top() ) return; // Dead?
4449
4450 // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads
4451 // _must_ be pinned (they have to observe their control edge of course).
4452 // Unlike Stores (which modify an unallocable resource, the memory
4453 // state), Mods/Loads can float around. So free them up.
4454 switch( n->Opcode() ) {
4455 case Op_DivI:
4456 case Op_DivF:
4457 case Op_DivD:
4458 case Op_ModI:
4459 case Op_ModF:
4460 case Op_ModD:
4461 case Op_LoadB: // Same with Loads; they can sink
4462 case Op_LoadUB: // during loop optimizations.
4463 case Op_LoadUS:
4464 case Op_LoadD:
4465 case Op_LoadF:
4466 case Op_LoadI:
4467 case Op_LoadKlass:
4468 case Op_LoadNKlass:
4469 case Op_LoadL:
4470 case Op_LoadS:
4471 case Op_LoadP:
4472 case Op_LoadN:
4473 case Op_LoadRange:
4474 case Op_LoadD_unaligned:
4475 case Op_LoadL_unaligned:
4476 case Op_StrComp: // Does a bunch of load-like effects
4477 case Op_StrEquals:
4478 case Op_StrIndexOf:
4479 case Op_StrIndexOfChar:
4480 case Op_AryEq:
4481 case Op_HasNegatives:
4482 pinned = false;
4483 }
4484 if (n->is_CMove()) {
4485 pinned = false;
4486 }
4487 if( pinned ) {
4488 IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
4489 if( !chosen_loop->_child ) // Inner loop?
4490 chosen_loop->_body.push(n); // Collect inner loops
4491 return;
4492 }
4493 } else { // No slot zero
4494 if( n->is_CFG() ) { // CFG with no slot 0 is dead
4495 _nodes.map(n->_idx,0); // No block setting, it's globally dead
4496 return;
4497 }
4498 assert(!n->is_CFG() || n->outcnt() == 0, "");
4499 }
4500
4501 // Do I have a "safe range" I can select over?
4502 Node *early = get_ctrl(n);// Early location already computed
4503
4504 // Compute latest point this Node can go
4505 Node *LCA = get_late_ctrl( n, early );
4506 // LCA is NULL due to uses being dead
4507 if( LCA == NULL ) {
4508 #ifdef ASSERT
4509 for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) {
4510 assert( _nodes[n->out(i1)->_idx] == NULL, "all uses must also be dead");
4511 }
4512 #endif
4513 _nodes.map(n->_idx, 0); // This node is useless
4514 _deadlist.push(n);
4515 return;
4516 }
4517 assert(LCA != NULL && !LCA->is_top(), "no dead nodes");
4518
4519 Node *legal = LCA; // Walk 'legal' up the IDOM chain
4520 Node *least = legal; // Best legal position so far
4521 while( early != legal ) { // While not at earliest legal
4522 #ifdef ASSERT
4523 if (legal->is_Start() && !early->is_Root()) {
4524 // Bad graph. Print idom path and fail.
4525 dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
4526 assert(false, "Bad graph detected in build_loop_late");
4527 }
4528 #endif
4529 // Find least loop nesting depth
4530 legal = idom(legal); // Bump up the IDOM tree
4531 // Check for lower nesting depth
4532 if( get_loop(legal)->_nest < get_loop(least)->_nest )
4533 least = legal;
4534 }
4535 assert(early == legal || legal != C->root(), "bad dominance of inputs");
4536
4537 // Try not to place code on a loop entry projection
4538 // which can inhibit range check elimination.
4539 if (least != early) {
4540 Node* ctrl_out = least->unique_ctrl_out();
4541 if (ctrl_out && ctrl_out->is_Loop() &&
4542 least == ctrl_out->in(LoopNode::EntryControl)) {
4543 // Move the node above predicates as far up as possible so a
4544 // following pass of loop predication doesn't hoist a predicate
4545 // that depends on it above that node.
4546 Node* new_ctrl = least;
4547 for (;;) {
4548 if (!new_ctrl->is_Proj()) {
4549 break;
4550 }
4551 CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
4552 if (call == NULL) {
4553 break;
4554 }
4555 int req = call->uncommon_trap_request();
4556 Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
4557 if (trap_reason != Deoptimization::Reason_loop_limit_check &&
4558 trap_reason != Deoptimization::Reason_predicate &&
4559 trap_reason != Deoptimization::Reason_profile_predicate) {
4560 break;
4561 }
4562 Node* c = new_ctrl->in(0)->in(0);
4563 if (is_dominator(c, early) && c != early) {
4564 break;
4565 }
4566 new_ctrl = c;
4567 }
4568 least = new_ctrl;
4569 }
4570 }
4571
4572 #ifdef ASSERT
4573 // If verifying, verify that 'verify_me' has a legal location
4574 // and choose it as our location.
4575 if( _verify_me ) {
4576 Node *v_ctrl = _verify_me->get_ctrl_no_update(n);
4577 Node *legal = LCA;
4578 while( early != legal ) { // While not at earliest legal
4579 if( legal == v_ctrl ) break; // Check for prior good location
4580 legal = idom(legal) ;// Bump up the IDOM tree
4581 }
4582 // Check for prior good location
4583 if( legal == v_ctrl ) least = legal; // Keep prior if found
4584 }
4585 #endif
4586
4587 // Assign discovered "here or above" point
4588 least = find_non_split_ctrl(least);
4589 verify_strip_mined_scheduling(n, least);
4590 set_ctrl(n, least);
4591
4592 // Collect inner loop bodies
4593 IdealLoopTree *chosen_loop = get_loop(least);
4594 if( !chosen_loop->_child ) // Inner loop?
4595 chosen_loop->_body.push(n);// Collect inner loops
4596 }
4597
4598 #ifdef ASSERT
4599 void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) {
4600 tty->print_cr("%s", msg);
4601 tty->print("n: "); n->dump();
4602 tty->print("early(n): "); early->dump();
4603 if (n->in(0) != NULL && !n->in(0)->is_top() &&
4604 n->in(0) != early && !n->in(0)->is_Root()) {
4605 tty->print("n->in(0): "); n->in(0)->dump();
4606 }
4607 for (uint i = 1; i < n->req(); i++) {
4608 Node* in1 = n->in(i);
4609 if (in1 != NULL && in1 != n && !in1->is_top()) {
4610 tty->print("n->in(%d): ", i); in1->dump();
4611 Node* in1_early = get_ctrl(in1);
4612 tty->print("early(n->in(%d)): ", i); in1_early->dump();
4613 if (in1->in(0) != NULL && !in1->in(0)->is_top() &&
4614 in1->in(0) != in1_early && !in1->in(0)->is_Root()) {
4615 tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump();
4616 }
4617 for (uint j = 1; j < in1->req(); j++) {
4618 Node* in2 = in1->in(j);
4619 if (in2 != NULL && in2 != n && in2 != in1 && !in2->is_top()) {
4620 tty->print("n->in(%d)->in(%d): ", i, j); in2->dump();
4621 Node* in2_early = get_ctrl(in2);
4622 tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump();
4623 if (in2->in(0) != NULL && !in2->in(0)->is_top() &&
4624 in2->in(0) != in2_early && !in2->in(0)->is_Root()) {
4625 tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump();
4626 }
4627 }
4628 }
4629 }
4630 }
4631 tty->cr();
4632 tty->print("LCA(n): "); LCA->dump();
4633 for (uint i = 0; i < n->outcnt(); i++) {
4634 Node* u1 = n->raw_out(i);
4635 if (u1 == n)
4636 continue;
4637 tty->print("n->out(%d): ", i); u1->dump();
4638 if (u1->is_CFG()) {
4639 for (uint j = 0; j < u1->outcnt(); j++) {
4640 Node* u2 = u1->raw_out(j);
4641 if (u2 != u1 && u2 != n && u2->is_CFG()) {
4642 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
4643 }
4644 }
4645 } else {
4646 Node* u1_later = get_ctrl(u1);
4647 tty->print("later(n->out(%d)): ", i); u1_later->dump();
4648 if (u1->in(0) != NULL && !u1->in(0)->is_top() &&
4649 u1->in(0) != u1_later && !u1->in(0)->is_Root()) {
4650 tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump();
4651 }
4652 for (uint j = 0; j < u1->outcnt(); j++) {
4653 Node* u2 = u1->raw_out(j);
4654 if (u2 == n || u2 == u1)
4655 continue;
4656 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
4657 if (!u2->is_CFG()) {
4658 Node* u2_later = get_ctrl(u2);
4659 tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump();
4660 if (u2->in(0) != NULL && !u2->in(0)->is_top() &&
4661 u2->in(0) != u2_later && !u2->in(0)->is_Root()) {
4662 tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump();
4663 }
4664 }
4665 }
4666 }
4667 }
4668 tty->cr();
4669 int ct = 0;
4670 Node *dbg_legal = LCA;
4671 while(!dbg_legal->is_Start() && ct < 100) {
4672 tty->print("idom[%d] ",ct); dbg_legal->dump();
4673 ct++;
4674 dbg_legal = idom(dbg_legal);
4675 }
4676 tty->cr();
4677 }
4678 #endif
4679
4680 #ifndef PRODUCT
4681 //------------------------------dump-------------------------------------------
4682 void PhaseIdealLoop::dump() const {
4683 ResourceMark rm;
4684 Arena* arena = Thread::current()->resource_area();
4685 Node_Stack stack(arena, C->live_nodes() >> 2);
4686 Node_List rpo_list;
4687 VectorSet visited(arena);
4688 visited.set(C->top()->_idx);
4689 rpo(C->root(), stack, visited, rpo_list);
4690 // Dump root loop indexed by last element in PO order
4691 dump(_ltree_root, rpo_list.size(), rpo_list);
4692 }
4693
4694 void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const {
4695 loop->dump_head();
4696
4697 // Now scan for CFG nodes in the same loop
4698 for (uint j = idx; j > 0; j--) {
4699 Node* n = rpo_list[j-1];
4700 if (!_nodes[n->_idx]) // Skip dead nodes
4701 continue;
4702
4703 if (get_loop(n) != loop) { // Wrong loop nest
4704 if (get_loop(n)->_head == n && // Found nested loop?
4705 get_loop(n)->_parent == loop)
4706 dump(get_loop(n), rpo_list.size(), rpo_list); // Print it nested-ly
4707 continue;
4708 }
4709
4710 // Dump controlling node
4711 tty->sp(2 * loop->_nest);
4712 tty->print("C");
4713 if (n == C->root()) {
4714 n->dump();
4715 } else {
4716 Node* cached_idom = idom_no_update(n);
4717 Node* computed_idom = n->in(0);
4718 if (n->is_Region()) {
4719 computed_idom = compute_idom(n);
4720 // computed_idom() will return n->in(0) when idom(n) is an IfNode (or
4721 // any MultiBranch ctrl node), so apply a similar transform to
4722 // the cached idom returned from idom_no_update.
4723 cached_idom = find_non_split_ctrl(cached_idom);
4724 }
4725 tty->print(" ID:%d", computed_idom->_idx);
4726 n->dump();
4727 if (cached_idom != computed_idom) {
4728 tty->print_cr("*** BROKEN IDOM! Computed as: %d, cached as: %d",
4729 computed_idom->_idx, cached_idom->_idx);
4730 }
4731 }
4732 // Dump nodes it controls
4733 for (uint k = 0; k < _nodes.Size(); k++) {
4734 // (k < C->unique() && get_ctrl(find(k)) == n)
4735 if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) {
4736 Node* m = C->root()->find(k);
4737 if (m && m->outcnt() > 0) {
4738 if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
4739 tty->print_cr("*** BROKEN CTRL ACCESSOR! _nodes[k] is %p, ctrl is %p",
4740 _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL);
4741 }
4742 tty->sp(2 * loop->_nest + 1);
4743 m->dump();
4744 }
4745 }
4746 }
4747 }
4748 }
4749 #endif
4750
4751 // Collect a R-P-O for the whole CFG.
4752 // Result list is in post-order (scan backwards for RPO)
4753 void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const {
4754 stk.push(start, 0);
4755 visited.set(start->_idx);
4756
4757 while (stk.is_nonempty()) {
4758 Node* m = stk.node();
4759 uint idx = stk.index();
4760 if (idx < m->outcnt()) {
4761 stk.set_index(idx + 1);
4762 Node* n = m->raw_out(idx);
4763 if (n->is_CFG() && !visited.test_set(n->_idx)) {
4764 stk.push(n, 0);
4765 }
4766 } else {
4767 rpo_list.push(m);
4768 stk.pop();
4769 }
4770 }
4771 }
4772
4773
4774 //=============================================================================
4775 //------------------------------LoopTreeIterator-------------------------------
4776
4777 // Advance to next loop tree using a preorder, left-to-right traversal.
4778 void LoopTreeIterator::next() {
4779 assert(!done(), "must not be done.");
4780 if (_curnt->_child != NULL) {
4781 _curnt = _curnt->_child;
4782 } else if (_curnt->_next != NULL) {
4783 _curnt = _curnt->_next;
4784 } else {
4785 while (_curnt != _root && _curnt->_next == NULL) {
4786 _curnt = _curnt->_parent;
4787 }
4788 if (_curnt == _root) {
4789 _curnt = NULL;
4790 assert(done(), "must be done.");
4791 } else {
4792 assert(_curnt->_next != NULL, "must be more to do");
4793 _curnt = _curnt->_next;
4794 }
4795 }
4796 }