1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/c2/g1BarrierSetC2.hpp"
  27 #include "gc/g1/g1BarrierSet.hpp"
  28 #include "gc/g1/g1BarrierSetRuntime.hpp"
  29 #include "gc/g1/g1CardTable.hpp"
  30 #include "gc/g1/g1ThreadLocalData.hpp"
  31 #include "gc/g1/heapRegion.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/escape.hpp"
  35 #include "opto/graphKit.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/macro.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/type.hpp"
  40 #include "utilities/macros.hpp"
  41 
  42 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
  43   const Type **fields = TypeTuple::fields(2);
  44   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
  45   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
  46   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  47 
  48   // create result type (range)
  49   fields = TypeTuple::fields(0);
  50   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
  51 
  52   return TypeFunc::make(domain, range);
  53 }
  54 
  55 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
  56   const Type **fields = TypeTuple::fields(2);
  57   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
  58   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
  59   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  60 
  61   // create result type (range)
  62   fields = TypeTuple::fields(0);
  63   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
  64 
  65   return TypeFunc::make(domain, range);
  66 }
  67 
  68 #define __ ideal.
  69 /*
  70  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
  71  * required by SATB to make sure all objects live at the start of the
  72  * marking are kept alive, all reference updates need to any previous
  73  * reference stored before writing.
  74  *
  75  * If the previous value is NULL there is no need to save the old value.
  76  * References that are NULL are filtered during runtime by the barrier
  77  * code to avoid unnecessary queuing.
  78  *
  79  * However in the case of newly allocated objects it might be possible to
  80  * prove that the reference about to be overwritten is NULL during compile
  81  * time and avoid adding the barrier code completely.
  82  *
  83  * The compiler needs to determine that the object in which a field is about
  84  * to be written is newly allocated, and that no prior store to the same field
  85  * has happened since the allocation.
  86  *
  87  * Returns true if the pre-barrier can be removed
  88  */
  89 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
  90                                                PhaseTransform* phase,
  91                                                Node* adr,
  92                                                BasicType bt,
  93                                                uint adr_idx) const {
  94   intptr_t offset = 0;
  95   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
  96   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
  97 
  98   if (offset == Type::OffsetBot) {
  99     return false; // cannot unalias unless there are precise offsets
 100   }
 101 
 102   if (alloc == NULL) {
 103     return false; // No allocation found
 104   }
 105 
 106   intptr_t size_in_bytes = type2aelembytes(bt);
 107 
 108   Node* mem = kit->memory(adr_idx); // start searching here...
 109 
 110   for (int cnt = 0; cnt < 50; cnt++) {
 111 
 112     if (mem->is_Store()) {
 113 
 114       Node* st_adr = mem->in(MemNode::Address);
 115       intptr_t st_offset = 0;
 116       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 117 
 118       if (st_base == NULL) {
 119         break; // inscrutable pointer
 120       }
 121 
 122       // Break we have found a store with same base and offset as ours so break
 123       if (st_base == base && st_offset == offset) {
 124         break;
 125       }
 126 
 127       if (st_offset != offset && st_offset != Type::OffsetBot) {
 128         const int MAX_STORE = BytesPerLong;
 129         if (st_offset >= offset + size_in_bytes ||
 130             st_offset <= offset - MAX_STORE ||
 131             st_offset <= offset - mem->as_Store()->memory_size()) {
 132           // Success:  The offsets are provably independent.
 133           // (You may ask, why not just test st_offset != offset and be done?
 134           // The answer is that stores of different sizes can co-exist
 135           // in the same sequence of RawMem effects.  We sometimes initialize
 136           // a whole 'tile' of array elements with a single jint or jlong.)
 137           mem = mem->in(MemNode::Memory);
 138           continue; // advance through independent store memory
 139         }
 140       }
 141 
 142       if (st_base != base
 143           && MemNode::detect_ptr_independence(base, alloc, st_base,
 144                                               AllocateNode::Ideal_allocation(st_base, phase),
 145                                               phase)) {
 146         // Success:  The bases are provably independent.
 147         mem = mem->in(MemNode::Memory);
 148         continue; // advance through independent store memory
 149       }
 150     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 151 
 152       InitializeNode* st_init = mem->in(0)->as_Initialize();
 153       AllocateNode* st_alloc = st_init->allocation();
 154 
 155       // Make sure that we are looking at the same allocation site.
 156       // The alloc variable is guaranteed to not be null here from earlier check.
 157       if (alloc == st_alloc) {
 158         // Check that the initialization is storing NULL so that no previous store
 159         // has been moved up and directly write a reference
 160         Node* captured_store = st_init->find_captured_store(offset,
 161                                                             type2aelembytes(T_OBJECT),
 162                                                             phase);
 163         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
 164           return true;
 165         }
 166       }
 167     }
 168 
 169     // Unless there is an explicit 'continue', we must bail out here,
 170     // because 'mem' is an inscrutable memory state (e.g., a call).
 171     break;
 172   }
 173 
 174   return false;
 175 }
 176 
 177 // G1 pre/post barriers
 178 void G1BarrierSetC2::pre_barrier(GraphKit* kit,
 179                                  bool do_load,
 180                                  Node* ctl,
 181                                  Node* obj,
 182                                  Node* adr,
 183                                  uint alias_idx,
 184                                  Node* val,
 185                                  const TypeOopPtr* val_type,
 186                                  Node* pre_val,
 187                                  BasicType bt) const {
 188   // Some sanity checks
 189   // Note: val is unused in this routine.
 190 
 191   if (do_load) {
 192     // We need to generate the load of the previous value
 193     assert(obj != NULL, "must have a base");
 194     assert(adr != NULL, "where are loading from?");
 195     assert(pre_val == NULL, "loaded already?");
 196     assert(val_type != NULL, "need a type");
 197 
 198     if (use_ReduceInitialCardMarks()
 199         && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 200       return;
 201     }
 202 
 203   } else {
 204     // In this case both val_type and alias_idx are unused.
 205     assert(pre_val != NULL, "must be loaded already");
 206     // Nothing to be done if pre_val is null.
 207     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 208     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 209   }
 210   assert(bt == T_OBJECT, "or we shouldn't be here");
 211 
 212   IdealKit ideal(kit, true);
 213 
 214   Node* tls = __ thread(); // ThreadLocalStorage
 215 
 216   Node* no_base = __ top();
 217   Node* zero  = __ ConI(0);
 218   Node* zeroX = __ ConX(0);
 219 
 220   float likely  = PROB_LIKELY(0.999);
 221   float unlikely  = PROB_UNLIKELY(0.999);
 222 
 223   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
 224   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
 225 
 226   // Offsets into the thread
 227   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 228   const int index_offset   = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
 229   const int buffer_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 230 
 231   // Now the actual pointers into the thread
 232   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
 233   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 234   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 235 
 236   // Now some of the values
 237   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 238 
 239   // if (!marking)
 240   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 241     BasicType index_bt = TypeX_X->basic_type();
 242     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
 243     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 244 
 245     if (do_load) {
 246       // load original value
 247       // alias_idx correct??
 248       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 249     }
 250 
 251     // if (pre_val != NULL)
 252     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 253       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 254 
 255       // is the queue for this thread full?
 256       __ if_then(index, BoolTest::ne, zeroX, likely); {
 257 
 258         // decrement the index
 259         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 260 
 261         // Now get the buffer location we will log the previous value into and store it
 262         Node *log_addr = __ AddP(no_base, buffer, next_index);
 263         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 264         // update the index
 265         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 266 
 267       } __ else_(); {
 268 
 269         // logging buffer is full, call the runtime
 270         const TypeFunc *tf = write_ref_field_pre_entry_Type();
 271         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
 272       } __ end_if();  // (!index)
 273     } __ end_if();  // (pre_val != NULL)
 274   } __ end_if();  // (!marking)
 275 
 276   // Final sync IdealKit and GraphKit.
 277   kit->final_sync(ideal);
 278 }
 279 
 280 /*
 281  * G1 similar to any GC with a Young Generation requires a way to keep track of
 282  * references from Old Generation to Young Generation to make sure all live
 283  * objects are found. G1 also requires to keep track of object references
 284  * between different regions to enable evacuation of old regions, which is done
 285  * as part of mixed collections. References are tracked in remembered sets and
 286  * is continuously updated as reference are written to with the help of the
 287  * post-barrier.
 288  *
 289  * To reduce the number of updates to the remembered set the post-barrier
 290  * filters updates to fields in objects located in the Young Generation,
 291  * the same region as the reference, when the NULL is being written or
 292  * if the card is already marked as dirty by an earlier write.
 293  *
 294  * Under certain circumstances it is possible to avoid generating the
 295  * post-barrier completely if it is possible during compile time to prove
 296  * the object is newly allocated and that no safepoint exists between the
 297  * allocation and the store.
 298  *
 299  * In the case of slow allocation the allocation code must handle the barrier
 300  * as part of the allocation in the case the allocated object is not located
 301  * in the nursery; this would happen for humongous objects.
 302  *
 303  * Returns true if the post barrier can be removed
 304  */
 305 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
 306                                                 PhaseTransform* phase, Node* store,
 307                                                 Node* adr) const {
 308   intptr_t      offset = 0;
 309   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 310   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
 311 
 312   if (offset == Type::OffsetBot) {
 313     return false; // cannot unalias unless there are precise offsets
 314   }
 315 
 316   if (alloc == NULL) {
 317      return false; // No allocation found
 318   }
 319 
 320   // Start search from Store node
 321   Node* mem = store->in(MemNode::Control);
 322   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 323 
 324     InitializeNode* st_init = mem->in(0)->as_Initialize();
 325     AllocateNode*  st_alloc = st_init->allocation();
 326 
 327     // Make sure we are looking at the same allocation
 328     if (alloc == st_alloc) {
 329       return true;
 330     }
 331   }
 332 
 333   return false;
 334 }
 335 
 336 //
 337 // Update the card table and add card address to the queue
 338 //
 339 void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
 340                                   IdealKit& ideal,
 341                                   Node* card_adr,
 342                                   Node* oop_store,
 343                                   uint oop_alias_idx,
 344                                   Node* index,
 345                                   Node* index_adr,
 346                                   Node* buffer,
 347                                   const TypeFunc* tf) const {
 348   Node* zero  = __ ConI(0);
 349   Node* zeroX = __ ConX(0);
 350   Node* no_base = __ top();
 351   BasicType card_bt = T_BYTE;
 352   // Smash zero into card. MUST BE ORDERED WRT TO STORE
 353   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
 354 
 355   //  Now do the queue work
 356   __ if_then(index, BoolTest::ne, zeroX); {
 357 
 358     Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 359     Node* log_addr = __ AddP(no_base, buffer, next_index);
 360 
 361     // Order, see storeCM.
 362     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 363     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
 364 
 365   } __ else_(); {
 366     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
 367   } __ end_if();
 368 
 369 }
 370 
 371 void G1BarrierSetC2::post_barrier(GraphKit* kit,
 372                                   Node* ctl,
 373                                   Node* oop_store,
 374                                   Node* obj,
 375                                   Node* adr,
 376                                   uint alias_idx,
 377                                   Node* val,
 378                                   BasicType bt,
 379                                   bool use_precise) const {
 380   // If we are writing a NULL then we need no post barrier
 381 
 382   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
 383     // Must be NULL
 384     const Type* t = val->bottom_type();
 385     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
 386     // No post barrier if writing NULLx
 387     return;
 388   }
 389 
 390   if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
 391     // We can skip marks on a freshly-allocated object in Eden.
 392     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
 393     // That routine informs GC to take appropriate compensating steps,
 394     // upon a slow-path allocation, so as to make this card-mark
 395     // elision safe.
 396     return;
 397   }
 398 
 399   if (use_ReduceInitialCardMarks()
 400       && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
 401     return;
 402   }
 403 
 404   if (!use_precise) {
 405     // All card marks for a (non-array) instance are in one place:
 406     adr = obj;
 407   }
 408   // (Else it's an array (or unknown), and we want more precise card marks.)
 409   assert(adr != NULL, "");
 410 
 411   IdealKit ideal(kit, true);
 412 
 413   Node* tls = __ thread(); // ThreadLocalStorage
 414 
 415   BarrierSet* bs = BarrierSet::barrier_set();
 416   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
 417   CardTable* ct = ctbs->card_table();
 418 
 419   Node* no_base = __ top();
 420   float likely = PROB_LIKELY_MAG(3);
 421   float unlikely = PROB_UNLIKELY_MAG(3);
 422   Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
 423   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
 424   Node* zeroX = __ ConX(0);
 425 
 426   const TypeFunc *tf = write_ref_field_post_entry_Type();
 427 
 428   // Offsets into the thread
 429   const int index_offset  = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
 430   const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
 431 
 432   // Pointers into the thread
 433 
 434   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
 435   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
 436 
 437   // Now some values
 438   // Use ctrl to avoid hoisting these values past a safepoint, which could
 439   // potentially reset these fields in the JavaThread.
 440   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
 441   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 442 
 443   // Convert the store obj pointer to an int prior to doing math on it
 444   // Must use ctrl to prevent "integerized oop" existing across safepoint
 445   Node* cast =  __ CastPX(__ ctrl(), adr);
 446 
 447   // Divide pointer by card size
 448   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
 449 
 450   // Combine card table base and card offset
 451   Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
 452 
 453   // If we know the value being stored does it cross regions?
 454 
 455   if (val != NULL) {
 456     // Does the store cause us to cross regions?
 457 
 458     // Should be able to do an unsigned compare of region_size instead of
 459     // and extra shift. Do we have an unsigned compare??
 460     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
 461     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
 462 
 463     // if (xor_res == 0) same region so skip
 464     __ if_then(xor_res, BoolTest::ne, zeroX, likely); {
 465 
 466       // if ((unsigned)(card_offset - low_map_offset) >= (high_map_offset - low_map_offset)) stack allocated object, so skip
 467       if (kit->C->do_stack_allocation()) {
 468         state()->add_enqueue_barrier(static_cast<CastP2XNode*>(cast));
 469         Node* low_off = kit->longcon(ct->byte_map_bottom_offset());
 470         Node* delta_off = kit->longcon(ct->byte_map_top_offset() - ct->byte_map_bottom_offset());
 471         Node* sub_off = __ SubL(cast, low_off);
 472 
 473         __ uif_then(sub_off, BoolTest::le, delta_off, likely); } {
 474 
 475           // No barrier if we are storing a NULL
 476           __ if_then(val, BoolTest::ne, kit->null(), likely); {
 477 
 478             // Ok must mark the card if not already dirty
 479 
 480             // load the original value of the card
 481             Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 482 
 483             __ if_then(card_val, BoolTest::ne, young_card, unlikely); {
 484               kit->sync_kit(ideal);
 485               kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
 486               __ sync_kit(kit);
 487 
 488               Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 489               __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
 490                 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 491               } __ end_if();
 492             } __ end_if();
 493           } __ end_if();
 494       } if (kit->C->do_stack_allocation()) {
 495         __ end_if();
 496       }
 497     } __ end_if();
 498   } else {
 499     // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
 500     // We don't need a barrier here if the destination is a newly allocated object
 501     // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
 502     // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
 503     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 504 
 505     // if ((unsigned)(card_offset - low_map_offset) >= (high_map_offset - low_map_offset)) stack allocated object, so skip
 506     if (kit->C->do_stack_allocation()) {
 507       state()->add_enqueue_barrier(static_cast<CastP2XNode*>(cast));
 508       Node* low_off = kit->longcon(ct->byte_map_bottom_offset());
 509       Node* delta_off = kit->longcon(ct->byte_map_top_offset() - ct->byte_map_bottom_offset());
 510       Node* sub_off = __ SubL(cast, low_off);
 511 
 512       __ uif_then(sub_off, BoolTest::le, delta_off, likely); } {
 513 
 514         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 515         __ if_then(card_val, BoolTest::ne, young_card); {
 516           g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 517         } __ end_if();
 518 
 519       } if (kit->C->do_stack_allocation()) {
 520         __ end_if();
 521       }
 522   }
 523 
 524   // Final sync IdealKit and GraphKit.
 525   kit->final_sync(ideal);
 526 }
 527 
 528 // Helper that guards and inserts a pre-barrier.
 529 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 530                                         Node* pre_val, bool need_mem_bar) const {
 531   // We could be accessing the referent field of a reference object. If so, when G1
 532   // is enabled, we need to log the value in the referent field in an SATB buffer.
 533   // This routine performs some compile time filters and generates suitable
 534   // runtime filters that guard the pre-barrier code.
 535   // Also add memory barrier for non volatile load from the referent field
 536   // to prevent commoning of loads across safepoint.
 537 
 538   // Some compile time checks.
 539 
 540   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 541   const TypeX* otype = offset->find_intptr_t_type();
 542   if (otype != NULL && otype->is_con() &&
 543       otype->get_con() != java_lang_ref_Reference::referent_offset()) {
 544     // Constant offset but not the reference_offset so just return
 545     return;
 546   }
 547 
 548   // We only need to generate the runtime guards for instances.
 549   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 550   if (btype != NULL) {
 551     if (btype->isa_aryptr()) {
 552       // Array type so nothing to do
 553       return;
 554     }
 555 
 556     const TypeInstPtr* itype = btype->isa_instptr();
 557     if (itype != NULL) {
 558       // Can the klass of base_oop be statically determined to be
 559       // _not_ a sub-class of Reference and _not_ Object?
 560       ciKlass* klass = itype->klass();
 561       if ( klass->is_loaded() &&
 562           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 563           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 564         return;
 565       }
 566     }
 567   }
 568 
 569   // The compile time filters did not reject base_oop/offset so
 570   // we need to generate the following runtime filters
 571   //
 572   // if (offset == java_lang_ref_Reference::_reference_offset) {
 573   //   if (instance_of(base, java.lang.ref.Reference)) {
 574   //     pre_barrier(_, pre_val, ...);
 575   //   }
 576   // }
 577 
 578   float likely   = PROB_LIKELY(  0.999);
 579   float unlikely = PROB_UNLIKELY(0.999);
 580 
 581   IdealKit ideal(kit);
 582 
 583   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset());
 584 
 585   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 586       // Update graphKit memory and control from IdealKit.
 587       kit->sync_kit(ideal);
 588 
 589       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 590       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 591 
 592       // Update IdealKit memory and control from graphKit.
 593       __ sync_kit(kit);
 594 
 595       Node* one = __ ConI(1);
 596       // is_instof == 0 if base_oop == NULL
 597       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 598 
 599         // Update graphKit from IdeakKit.
 600         kit->sync_kit(ideal);
 601 
 602         // Use the pre-barrier to record the value in the referent field
 603         pre_barrier(kit, false /* do_load */,
 604                     __ ctrl(),
 605                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 606                     pre_val /* pre_val */,
 607                     T_OBJECT);
 608         if (need_mem_bar) {
 609           // Add memory barrier to prevent commoning reads from this field
 610           // across safepoint since GC can change its value.
 611           kit->insert_mem_bar(Op_MemBarCPUOrder);
 612         }
 613         // Update IdealKit from graphKit.
 614         __ sync_kit(kit);
 615 
 616       } __ end_if(); // _ref_type != ref_none
 617   } __ end_if(); // offset == referent_offset
 618 
 619   // Final sync IdealKit and GraphKit.
 620   kit->final_sync(ideal);
 621 }
 622 
 623 #undef __
 624 
 625 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 626   DecoratorSet decorators = access.decorators();
 627   Node* adr = access.addr().node();
 628   Node* obj = access.base();
 629 
 630   bool anonymous = (decorators & C2_UNSAFE_ACCESS) != 0;
 631   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 632   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 633   bool in_heap = (decorators & IN_HEAP) != 0;
 634   bool in_native = (decorators & IN_NATIVE) != 0;
 635   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 636   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 637   bool is_mixed = !in_heap && !in_native;
 638   bool need_cpu_mem_bar = !is_unordered || mismatched || is_mixed;
 639 
 640   Node* top = Compile::current()->top();
 641   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 642   Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
 643 
 644   // If we are reading the value of the referent field of a Reference
 645   // object (either by using Unsafe directly or through reflection)
 646   // then, if G1 is enabled, we need to record the referent in an
 647   // SATB log buffer using the pre-barrier mechanism.
 648   // Also we need to add memory barrier to prevent commoning reads
 649   // from this field across safepoint since GC can change its value.
 650   bool need_read_barrier = in_heap && (on_weak ||
 651                                        (unknown && offset != top && obj != top));
 652 
 653   if (!access.is_oop() || !need_read_barrier) {
 654     return load;
 655   }
 656 
 657   assert(access.is_parse_access(), "entry not supported at optimization time");
 658   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 659   GraphKit* kit = parse_access.kit();
 660 
 661   if (on_weak) {
 662     // Use the pre-barrier to record the value in the referent field
 663     pre_barrier(kit, false /* do_load */,
 664                 kit->control(),
 665                 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 666                 load /* pre_val */, T_OBJECT);
 667     // Add memory barrier to prevent commoning reads from this field
 668     // across safepoint since GC can change its value.
 669     kit->insert_mem_bar(Op_MemBarCPUOrder);
 670   } else if (unknown) {
 671     // We do not require a mem bar inside pre_barrier if need_mem_bar
 672     // is set: the barriers would be emitted by us.
 673     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 674   }
 675 
 676   return load;
 677 }
 678 
 679 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
 680   if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
 681     return true;
 682   }
 683   if (node->Opcode() != Op_CallLeaf) {
 684     return false;
 685   }
 686   CallLeafNode *call = node->as_CallLeaf();
 687   if (call->_name == NULL) {
 688     return false;
 689   }
 690 
 691   return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
 692 }
 693 
 694 bool G1BarrierSetC2::process_barrier_node(Node* node, PhaseIterGVN& igvn) const {
 695   assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
 696 
 697   // Must have a control node
 698   if (node->in(0) == NULL) {
 699     return false;
 700   }
 701 
 702   // Search for CastP2X->Xor->URShift->Cmp path which
 703   // checks if the store done to a different from the value's region.
 704   Node* xorx = node->find_out_with(Op_XorX);
 705   BoolNode* bool_node = NULL;
 706 
 707   if (xorx != NULL) {
 708 
 709     Node* shift = shift = xorx->unique_out();
 710     Node* cmpx = shift->unique_out();
 711 
 712     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 713             cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 714             "missing region check in G1 post barrier");
 715 
 716     Node* bol = cmpx->unique_out();
 717     assert(bol->unique_out()->is_If(), "should find if after the bool node");
 718     Node* if_node = bol->unique_out();
 719     Node* if_true = if_node->find_out_with(Op_IfTrue);
 720     assert(if_true != NULL, "there should be false projection");
 721 
 722     Node* iff_check = if_true->find_out_with(Op_If);
 723     // Not a barrier with bound check
 724     if (iff_check == NULL) {
 725       return false;
 726     }
 727 
 728     Node* iff_check_in_1_node = iff_check->in(1);
 729     if (!iff_check_in_1_node->is_Bool()) {
 730       return false;
 731     }
 732     bool_node = iff_check_in_1_node->as_Bool();
 733 
 734   } else {
 735     // this "could" be the the path followed when !use_ReduceInitialCardMarks() is
 736     // used or when the two sides of the barrier are scalar replaced
 737     //assert(false, "we managed to get here!!! process_barrier_node");
 738     Node *addl_node = node->find_out_with(Op_AddL);
 739     if (addl_node == NULL) {
 740       return false;
 741     }
 742 
 743     Node* cmpx = addl_node->unique_out();
 744     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 745           cmpx->unique_out()->as_Bool()->_test._test == BoolTest::le,
 746           "missing region check in G1 post barrier");
 747 
 748     bool_node = cmpx->unique_out()->as_Bool();
 749   }
 750 
 751   if (bool_node->_test._test != BoolTest::le) {
 752     return false;
 753   }
 754 
 755   // the input to the bool is the CMPX
 756   Node* bool_node_in_1_node = bool_node->in(1);
 757   if (!bool_node_in_1_node->is_Cmp()) {
 758     return false;
 759   }
 760   CmpNode* cmp_node = bool_node_in_1_node->as_Cmp();
 761 
 762   // the input to the CMPX is the card_table_top_offset constant
 763   Node* cmp_node_in_2_node = cmp_node->in(2);
 764   if (!cmp_node_in_2_node->is_Con()) {
 765     return false;
 766   }
 767 
 768   BarrierSet* bs = BarrierSet::barrier_set();
 769   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
 770   CardTable* ct = ctbs->card_table();
 771   size_t constant = ct->byte_map_top_offset() - ct->byte_map_bottom_offset();
 772 
 773   // Check that the input to this CMP node is the expected constant
 774   const TypeX* otype = cmp_node_in_2_node->find_intptr_t_type();
 775   if (otype != NULL && otype->is_con() &&
 776       size_t(otype->get_con()) != constant) {
 777     // Constant offset but not the card table size constant so just return
 778     return false;
 779   }
 780 
 781   // we can't change the compare or the constant so create a new constant(0) and replace the variable
 782   Node* cmp_node_in_1_node = cmp_node->in(1);
 783   ConNode* zeroConstant_node = igvn.makecon(TypeX_ZERO);
 784   if (cmp_node_in_1_node->_idx == zeroConstant_node->_idx) {
 785     // we can get here via different nodes - but we only want to change the input once
 786     return false;
 787   }
 788 
 789   igvn.rehash_node_delayed(cmp_node);
 790   int numReplaced = cmp_node->replace_edge(cmp_node_in_1_node, zeroConstant_node);
 791   assert(numReplaced == 1, "Failed to replace the card_offset with Conx(0)");
 792   return true;
 793 }
 794 
 795 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
 796   assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
 797   assert(node->outcnt() <= 3, "expects 1, 2 or 3 users: Xor, URShift and SubL nodes");
 798   // It could be only one user, URShift node, in Object.clone() intrinsic
 799   // but the new allocation is passed to arraycopy stub and it could not
 800   // be scalar replaced. So we don't check the case.
 801 
 802   // Certain loop optimisations may introduce a CastP2X node with
 803   // ConvL2I in case of an AllocateArray op. Check for that case
 804   // here and do not attempt to eliminate it as write barrier.
 805   if (macro->C->do_stack_allocation() && !state()->is_a_barrier(static_cast<CastP2XNode*>(node))) {
 806     return;
 807   }
 808 
 809   // An other case of only one user (Xor) is when the value check for NULL
 810   // in G1 post barrier is folded after CCP so the code which used URShift
 811   // is removed.
 812 
 813   // Take Region node before eliminating post barrier since it also
 814   // eliminates CastP2X node when it has only one user.
 815   Node* this_region = node->in(0);
 816   assert(this_region != NULL, "");
 817 
 818   // Remove G1 post barrier.
 819 
 820   // Search for CastP2X->Xor->URShift->Cmp path which
 821   // checks if the store done to a different from the value's region.
 822   // And replace Cmp with #0 (false) to collapse G1 post barrier.
 823   Node* xorx = node->find_out_with(Op_XorX);
 824   if (xorx != NULL) {
 825     Node* shift = xorx->unique_out();
 826     Node* cmpx = shift->unique_out();
 827     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 828     cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 829     "missing region check in G1 post barrier");
 830     macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 831 
 832     // Remove G1 pre barrier.
 833 
 834     // Search "if (marking != 0)" check and set it to "false".
 835     // There is no G1 pre barrier if previous stored value is NULL
 836     // (for example, after initialization).
 837     if (this_region->is_Region() && this_region->req() == 3) {
 838       int ind = 1;
 839       if (!this_region->in(ind)->is_IfFalse()) {
 840         ind = 2;
 841       }
 842       if (this_region->in(ind)->is_IfFalse() &&
 843           this_region->in(ind)->in(0)->Opcode() == Op_If) {
 844         Node* bol = this_region->in(ind)->in(0)->in(1);
 845         assert(bol->is_Bool(), "");
 846         cmpx = bol->in(1);
 847         if (bol->as_Bool()->_test._test == BoolTest::ne &&
 848             cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
 849             cmpx->in(1)->is_Load()) {
 850           Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
 851           const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 852           if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
 853               adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 854               adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
 855             macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 856           }
 857         }
 858       }
 859     }
 860   } else {
 861     // In a scenario where the two sides of the barrier are scalar replaced
 862     // or stack allocated, the XorX node will be visited more than once, because
 863     // both edges will be CastP2X nodes from two distinct allocates. In certain
 864     // instances, the removal of the CastP2X node will result in removal of the
 865     // XorX node, causing the assert below to be hit when eliminate_gc_barrier is
 866     // called for the second node.
 867     // assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 868 
 869     // This is a G1 post barrier emitted by the Object.clone() intrinsic.
 870     // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
 871     // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
 872     Node* shift = node->find_out_with(Op_URShiftX);
 873     assert(shift != NULL, "missing G1 post barrier");
 874     Node* addp = shift->unique_out();
 875     Node* load = addp->find_out_with(Op_LoadB);
 876     assert(load != NULL, "missing G1 post barrier");
 877     Node* cmpx = load->unique_out();
 878     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 879            cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 880            "missing card value check in G1 post barrier");
 881     macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 882     // There is no G1 pre barrier in this case
 883   }
 884   // Now CastP2X can be removed since it is used only on dead path
 885   // which currently still alive until igvn optimize it.
 886   // TODO: fix this following assert becuase of SUBL
 887   // assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
 888   macro->replace_node(node, macro->top());
 889 
 890   // Remove this node from our state
 891   state()->remove_enqueue_barrier(static_cast<CastP2XNode*>(node));
 892 }
 893 
 894 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
 895   if (!use_ReduceInitialCardMarks() &&
 896       c != NULL && c->is_Region() && c->req() == 3) {
 897     for (uint i = 1; i < c->req(); i++) {
 898       if (c->in(i) != NULL && c->in(i)->is_Region() &&
 899           c->in(i)->req() == 3) {
 900         Node* r = c->in(i);
 901         for (uint j = 1; j < r->req(); j++) {
 902           if (r->in(j) != NULL && r->in(j)->is_Proj() &&
 903               r->in(j)->in(0) != NULL &&
 904               r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
 905               r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
 906             Node* call = r->in(j)->in(0);
 907             c = c->in(i == 1 ? 2 : 1);
 908             if (c != NULL) {
 909               c = c->in(0);
 910               if (c != NULL) {
 911                 c = c->in(0);
 912                 assert(call->in(0) == NULL ||
 913                        call->in(0)->in(0) == NULL ||
 914                        call->in(0)->in(0)->in(0) == NULL ||
 915                        call->in(0)->in(0)->in(0)->in(0) == NULL ||
 916                        call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||
 917                        c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
 918                 return c;
 919               }
 920             }
 921           }
 922         }
 923       }
 924     }
 925   }
 926   return c;
 927 }
 928 
 929 #ifdef ASSERT
 930 void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
 931   if (phase != BarrierSetC2::BeforeCodeGen) {
 932     return;
 933   }
 934   // Verify G1 pre-barriers
 935   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 936 
 937   ResourceArea *area = Thread::current()->resource_area();
 938   Unique_Node_List visited(area);
 939   Node_List worklist(area);
 940   // We're going to walk control flow backwards starting from the Root
 941   worklist.push(compile->root());
 942   while (worklist.size() > 0) {
 943     Node* x = worklist.pop();
 944     if (x == NULL || x == compile->top()) continue;
 945     if (visited.member(x)) {
 946       continue;
 947     } else {
 948       visited.push(x);
 949     }
 950 
 951     if (x->is_Region()) {
 952       for (uint i = 1; i < x->req(); i++) {
 953         worklist.push(x->in(i));
 954       }
 955     } else {
 956       worklist.push(x->in(0));
 957       // We are looking for the pattern:
 958       //                            /->ThreadLocal
 959       // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
 960       //              \->ConI(0)
 961       // We want to verify that the If and the LoadB have the same control
 962       // See GraphKit::g1_write_barrier_pre()
 963       if (x->is_If()) {
 964         IfNode *iff = x->as_If();
 965         if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
 966           CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
 967           if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
 968               && cmp->in(1)->is_Load()) {
 969             LoadNode* load = cmp->in(1)->as_Load();
 970             if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
 971                 && load->in(2)->in(3)->is_Con()
 972                 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
 973 
 974               Node* if_ctrl = iff->in(0);
 975               Node* load_ctrl = load->in(0);
 976 
 977               if (if_ctrl != load_ctrl) {
 978                 // Skip possible CProj->NeverBranch in infinite loops
 979                 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
 980                     && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
 981                   if_ctrl = if_ctrl->in(0)->in(0);
 982                 }
 983               }
 984               assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
 985             }
 986           }
 987         }
 988       }
 989     }
 990   }
 991 }
 992 #endif
 993 
 994 bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
 995   if (opcode == Op_StoreP) {
 996     Node* adr = n->in(MemNode::Address);
 997     const Type* adr_type = gvn->type(adr);
 998     // Pointer stores in G1 barriers looks like unsafe access.
 999     // Ignore such stores to be able scalar replace non-escaping
1000     // allocations.
1001     if (adr_type->isa_rawptr() && adr->is_AddP()) {
1002       Node* base = conn_graph->get_addp_base(adr);
1003       if (base->Opcode() == Op_LoadP &&
1004           base->in(MemNode::Address)->is_AddP()) {
1005         adr = base->in(MemNode::Address);
1006         Node* tls = conn_graph->get_addp_base(adr);
1007         if (tls->Opcode() == Op_ThreadLocal) {
1008           int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1009           const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
1010           if (offs == buf_offset) {
1011             return true; // G1 pre barrier previous oop value store.
1012           }
1013           if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
1014             return true; // G1 post barrier card address store.
1015           }
1016         }
1017       }
1018     }
1019   }
1020   return false;
1021 }