< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

 171         }
 172         if (is_instance) {
 173           result = proj_in->in(TypeFunc::Memory);
 174         } else if (is_boxed_value_load) {
 175           Node* klass = alloc->in(AllocateNode::KlassNode);
 176           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 177           if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
 178             result = proj_in->in(TypeFunc::Memory); // not related allocation
 179           }
 180         }
 181       } else if (proj_in->is_MemBar()) {
 182         ArrayCopyNode* ac = NULL;
 183         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 184           break;
 185         }
 186         result = proj_in->in(TypeFunc::Memory);
 187       } else {
 188         assert(false, "unexpected projection");
 189       }
 190     } else if (result->is_ClearArray()) {
 191       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {



 192         // Can not bypass initialization of the instance
 193         // we are looking for.
 194         break;
 195       }
 196       // Otherwise skip it (the call updated 'result' value).
 197     } else if (result->is_MergeMem()) {
 198       result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
 199     }
 200   }
 201   return result;
 202 }
 203 
 204 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
 205   const TypeOopPtr* t_oop = t_adr->isa_oopptr();
 206   if (t_oop == NULL)
 207     return mchain;  // don't try to optimize non-oop types
 208   Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
 209   bool is_instance = t_oop->is_known_instance_field();
 210   PhaseIterGVN *igvn = phase->is_IterGVN();
 211   if (is_instance && igvn != NULL && result->is_Phi()) {

 691       }
 692       // Found an arraycopy that may affect that load
 693       return mem;
 694     } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
 695       // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
 696       if (mem->is_Proj() && mem->in(0)->is_Call()) {
 697         // ArrayCopyNodes processed here as well.
 698         CallNode *call = mem->in(0)->as_Call();
 699         if (!call->may_modify(addr_t, phase)) {
 700           mem = call->in(TypeFunc::Memory);
 701           continue;         // (a) advance through independent call memory
 702         }
 703       } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
 704         ArrayCopyNode* ac = NULL;
 705         if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase, ac)) {
 706           break;
 707         }
 708         mem = mem->in(0)->in(TypeFunc::Memory);
 709         continue;           // (a) advance through independent MemBar memory
 710       } else if (mem->is_ClearArray()) {
 711         if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {



 712           // (the call updated 'mem' value)
 713           continue;         // (a) advance through independent allocation memory
 714         } else {
 715           // Can not bypass initialization of the instance
 716           // we are looking for.
 717           return mem;
 718         }
 719       } else if (mem->is_MergeMem()) {
 720         int alias_idx = phase->C->get_alias_index(adr_type());
 721         mem = mem->as_MergeMem()->memory_at(alias_idx);
 722         continue;           // (a) advance through independent MergeMem memory
 723       }
 724     }
 725 
 726     // Unless there is an explicit 'continue', we must bail out here,
 727     // because 'mem' is an inscrutable memory state (e.g., a call).
 728     break;
 729   }
 730 
 731   return NULL;              // bail out

 171         }
 172         if (is_instance) {
 173           result = proj_in->in(TypeFunc::Memory);
 174         } else if (is_boxed_value_load) {
 175           Node* klass = alloc->in(AllocateNode::KlassNode);
 176           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 177           if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
 178             result = proj_in->in(TypeFunc::Memory); // not related allocation
 179           }
 180         }
 181       } else if (proj_in->is_MemBar()) {
 182         ArrayCopyNode* ac = NULL;
 183         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 184           break;
 185         }
 186         result = proj_in->in(TypeFunc::Memory);
 187       } else {
 188         assert(false, "unexpected projection");
 189       }
 190     } else if (result->is_ClearArray()) {
 191       intptr_t offset;
 192       AllocateNode* alloc = AllocateNode::Ideal_allocation(result->in(3), phase, offset);
 193 
 194       if (!is_instance || (alloc == NULL) || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 195         // Can not bypass initialization of the instance
 196         // we are looking for.
 197         break;
 198       }
 199       // Otherwise skip it (the call updated 'result' value).
 200     } else if (result->is_MergeMem()) {
 201       result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
 202     }
 203   }
 204   return result;
 205 }
 206 
 207 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
 208   const TypeOopPtr* t_oop = t_adr->isa_oopptr();
 209   if (t_oop == NULL)
 210     return mchain;  // don't try to optimize non-oop types
 211   Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
 212   bool is_instance = t_oop->is_known_instance_field();
 213   PhaseIterGVN *igvn = phase->is_IterGVN();
 214   if (is_instance && igvn != NULL && result->is_Phi()) {

 694       }
 695       // Found an arraycopy that may affect that load
 696       return mem;
 697     } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
 698       // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
 699       if (mem->is_Proj() && mem->in(0)->is_Call()) {
 700         // ArrayCopyNodes processed here as well.
 701         CallNode *call = mem->in(0)->as_Call();
 702         if (!call->may_modify(addr_t, phase)) {
 703           mem = call->in(TypeFunc::Memory);
 704           continue;         // (a) advance through independent call memory
 705         }
 706       } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
 707         ArrayCopyNode* ac = NULL;
 708         if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase, ac)) {
 709           break;
 710         }
 711         mem = mem->in(0)->in(TypeFunc::Memory);
 712         continue;           // (a) advance through independent MemBar memory
 713       } else if (mem->is_ClearArray()) {
 714         intptr_t offset;
 715         AllocateNode* alloc = AllocateNode::Ideal_allocation(mem->in(3), phase, offset);
 716 
 717         if ((alloc != NULL) && ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
 718           // (the call updated 'mem' value)
 719           continue;         // (a) advance through independent allocation memory
 720         } else {
 721           // Can not bypass initialization of the instance
 722           // we are looking for.
 723           return mem;
 724         }
 725       } else if (mem->is_MergeMem()) {
 726         int alias_idx = phase->C->get_alias_index(adr_type());
 727         mem = mem->as_MergeMem()->memory_at(alias_idx);
 728         continue;           // (a) advance through independent MergeMem memory
 729       }
 730     }
 731 
 732     // Unless there is an explicit 'continue', we must bail out here,
 733     // because 'mem' is an inscrutable memory state (e.g., a call).
 734     break;
 735   }
 736 
 737   return NULL;              // bail out
< prev index next >