changeset 52228:20636453ea76 lworld

8206141: [lworld] Improve accessing a flattened value type array passed as Object[] Reviewed-by: thartmann
author roland
date Fri, 05 Oct 2018 10:30:21 +0200
parents 3562e8a980d4
children 0cafe4547a4f
files src/hotspot/share/gc/shared/c2/barrierSetC2.cpp src/hotspot/share/gc/shared/c2/barrierSetC2.hpp src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.hpp src/hotspot/share/opto/arraycopynode.cpp src/hotspot/share/opto/escape.cpp src/hotspot/share/opto/graphKit.cpp src/hotspot/share/opto/graphKit.hpp src/hotspot/share/opto/library_call.cpp src/hotspot/share/opto/parse2.cpp src/hotspot/share/opto/parseHelper.cpp src/hotspot/share/opto/runtime.cpp src/hotspot/share/opto/runtime.hpp src/hotspot/share/opto/valuetypenode.cpp test/hotspot/jtreg/compiler/valhalla/valuetypes/TestIntrinsics.java test/hotspot/jtreg/compiler/valhalla/valuetypes/TestLWorld.java
diffstat 16 files changed, 354 insertions(+), 135 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -562,33 +562,15 @@
   return atomic_add_at_resolved(access, new_val, value_type);
 }
 
-void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
-  // Exclude the header but include array length to copy by 8 bytes words.
-  // Can't use base_offset_in_bytes(bt) since basic type is unknown.
-  int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
-                            instanceOopDesc::base_offset_in_bytes();
-  // base_off:
-  // 8  - 32-bit VM
-  // 12 - 64-bit VM, compressed klass
-  // 16 - 64-bit VM, normal klass
-  if (base_off % BytesPerLong != 0) {
-    assert(UseCompressedClassPointers, "");
-    if (is_array) {
-      // Exclude length to copy by 8 bytes words.
-      base_off += sizeof(int);
-    } else {
-      // Include klass to copy by 8 bytes words.
-      base_off = instanceOopDesc::klass_offset_in_bytes();
-    }
-    assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
-  }
-  Node* src_base  = kit->basic_plus_adr(src,  base_off);
-  Node* dst_base = kit->basic_plus_adr(dst, base_off);
-
-  // Compute the length also, if needed:
-  Node* countx = size;
-  countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off)));
-  countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) ));
+void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const {
+#ifdef ASSERT
+  intptr_t src_offset;
+  Node* src = AddPNode::Ideal_base_and_offset(src_base, &kit->gvn(), src_offset);
+  intptr_t dst_offset;
+  Node* dst = AddPNode::Ideal_base_and_offset(dst_base, &kit->gvn(), dst_offset);
+  assert(src == NULL || (src_offset % BytesPerLong == 0), "expect 8 bytes alignment");
+  assert(dst == NULL || (dst_offset % BytesPerLong == 0), "expect 8 bytes alignment");
+#endif
 
   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp	Fri Oct 05 10:30:21 2018 +0200
@@ -188,7 +188,7 @@
   virtual Node* atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const;
   virtual Node* atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const;
 
-  virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
+  virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const;
 
   // These are general helper methods used by C2
   virtual bool array_copy_requires_gc_barriers(BasicType type) const { return false; }
--- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -135,8 +135,8 @@
   kit->final_sync(ideal);
 }
 
-void CardTableBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
-  BarrierSetC2::clone(kit, src, dst, size, is_array);
+void CardTableBarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const {
+  BarrierSetC2::clone(kit, src_base, dst_base, countx, is_array);
   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 
   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
@@ -149,6 +149,9 @@
     Node* no_particular_value = NULL;
     Node* no_particular_field = NULL;
     int raw_adr_idx = Compile::AliasIdxRaw;
+    intptr_t unused_offset;
+    Node* dst = AddPNode::Ideal_base_and_offset(dst_base, &kit->gvn(), unused_offset);
+    assert(dst != NULL, "dst_base not an Addp");
     post_barrier(kit, kit->control(),
                  kit->memory(raw_adr_type),
                  dst,
--- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.hpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.hpp	Fri Oct 05 10:30:21 2018 +0200
@@ -42,7 +42,7 @@
   Node* byte_map_base_node(GraphKit* kit) const;
 
 public:
-  virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
+  virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const;
   virtual bool is_gc_barrier_node(Node* node) const;
   virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
   virtual bool array_copy_requires_gc_barriers(BasicType type) const;
--- a/src/hotspot/share/opto/arraycopynode.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/arraycopynode.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -585,6 +585,17 @@
     return NULL;
   }
 
+  Node* src = in(ArrayCopyNode::Src);
+  Node* dest = in(ArrayCopyNode::Dest);
+  const Type* src_type = phase->type(src);
+  const Type* dest_type = phase->type(dest);
+
+  if (src_type->isa_aryptr() && dest_type->isa_instptr()) {
+    // clone used for load of unknown value type can't be optimized at
+    // this point
+    return NULL;
+  }
+
   Node* mem = try_clone_instance(phase, can_reshape, count);
   if (mem != NULL) {
     return (mem == NodeSentinel) ? NULL : mem;
@@ -622,8 +633,6 @@
   new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory)));
   new_map->set_i_o(in(TypeFunc::I_O));
 
-  Node* src = in(ArrayCopyNode::Src);
-  Node* dest = in(ArrayCopyNode::Dest);
   const TypeAryPtr* atp_src = get_address_type(phase, src);
   const TypeAryPtr* atp_dest = get_address_type(phase, dest);
   uint alias_idx_src = phase->C->get_alias_index(atp_src);
--- a/src/hotspot/share/opto/escape.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/escape.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -1047,7 +1047,9 @@
                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
-                  strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0)
+                  strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "load_unknown_value") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "store_unknown_value") == 0)
                  ))) {
             call->dump();
             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
--- a/src/hotspot/share/opto/graphKit.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/graphKit.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -1722,9 +1722,9 @@
   }
 }
 
-void GraphKit::access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array) {
+void GraphKit::access_clone(Node* ctl, Node* src_base, Node* dst_base, Node* countx, bool is_array) {
   set_control(ctl);
-  return _barrier_set->clone(this, src, dst, size, is_array);
+  return _barrier_set->clone(this, src_base, dst_base, countx, is_array);
 }
 
 //-------------------------array_element_address-------------------------
@@ -3347,14 +3347,12 @@
 }
 
 // Deoptimize if 'ary' is flattened or if 'obj' is null and 'ary' is a value type array
-void GraphKit::gen_value_type_array_guard(Node* ary, Node* obj, Node* elem_klass) {
+void GraphKit::gen_value_type_array_guard(Node* ary, Node* obj, int nargs) {
   assert(EnableValhalla, "should only be used if value types are enabled");
-  if (elem_klass == NULL) {
-    // Load array element klass
-    Node* kls = load_object_klass(ary);
-    Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
-    elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
-  }
+  // Load array element klass
+  Node* kls = load_object_klass(ary);
+  Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
+  Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
   // Check if element is a value type
   Node* flags_addr = basic_plus_adr(elem_klass, in_bytes(Klass::access_flags_offset()));
   Node* flags = make_load(NULL, flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
@@ -3367,51 +3365,21 @@
     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
     { BuildCutout unless(this, bol, PROB_MAX);
       // TODO just deoptimize for now if we store null to a value type array
+      inc_sp(nargs);
       uncommon_trap(Deoptimization::Reason_array_check,
                     Deoptimization::Action_none);
     }
   } else {
-    // Check if array is flattened or if we are storing null to a value type array
-    // TODO can we merge these checks?
-    gen_flattened_array_guard(ary);
-    if (objtype->meet(TypePtr::NULL_PTR) == objtype) {
-      // Check if (is_value_elem && obj_is_null) <=> (!is_value_elem | !obj_is_null == 0)
-      // TODO what if we later figure out that obj is never null?
-      Node* not_value = _gvn.transform(new XorINode(is_value_elem, intcon(JVM_ACC_VALUE)));
-      not_value = _gvn.transform(new ConvI2LNode(not_value));
-      Node* not_null = _gvn.transform(new CastP2XNode(NULL, obj));
-      Node* both = _gvn.transform(new OrLNode(not_null, not_value));
-      Node* cmp  = _gvn.transform(new CmpLNode(both, longcon(0)));
-      Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
-      { BuildCutout unless(this, bol, PROB_MAX);
-        // TODO just deoptimize for now if we store null to a value type array
-        uncommon_trap(Deoptimization::Reason_array_check,
-                      Deoptimization::Action_none);
-      }
-    }
-  }
-}
-
-Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) {
-  Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
-  Node* layout_val = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
-  layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
-  Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value)));
-  return cmp;
-}
-
-
-// Deoptimize if 'ary' is a flattened value type array
-void GraphKit::gen_flattened_array_guard(Node* ary, int nargs) {
-  assert(EnableValhalla, "should only be used if value types are enabled");
-  if (ValueArrayFlatten) {
-    // Cannot statically determine if array is flattened, emit runtime check
-    Node* kls = load_object_klass(ary);
-    Node* cmp = gen_lh_array_test(kls, Klass::_lh_array_tag_vt_value);
-    Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
-
+    // Check if (is_value_elem && obj_is_null) <=> (!is_value_elem | !obj_is_null == 0)
+    // TODO what if we later figure out that obj is never null?
+    Node* not_value = _gvn.transform(new XorINode(is_value_elem, intcon(JVM_ACC_VALUE)));
+    not_value = _gvn.transform(new ConvI2LNode(not_value));
+    Node* not_null = _gvn.transform(new CastP2XNode(NULL, obj));
+    Node* both = _gvn.transform(new OrLNode(not_null, not_value));
+    Node* cmp  = _gvn.transform(new CmpLNode(both, longcon(0)));
+    Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
     { BuildCutout unless(this, bol, PROB_MAX);
-      // TODO just deoptimize for now if value type array is flattened
+      // TODO just deoptimize for now if we store null to a value type array
       inc_sp(nargs);
       uncommon_trap(Deoptimization::Reason_array_check,
                     Deoptimization::Action_none);
@@ -3419,6 +3387,20 @@
   }
 }
 
+Node* GraphKit::load_lh_array_tag(Node* kls) {
+  Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
+  Node* layout_val = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
+  return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+}
+
+
+Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) {
+  Node* layout_val = load_lh_array_tag(kls);
+  Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value)));
+  return cmp;
+}
+
+
 //------------------------------next_monitor-----------------------------------
 // What number should be given to the next monitor?
 int GraphKit::next_monitor() {
--- a/src/hotspot/share/opto/graphKit.hpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/graphKit.hpp	Fri Oct 05 10:30:21 2018 +0200
@@ -644,7 +644,7 @@
                              BasicType bt,
                              DecoratorSet decorators);
 
-  void access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array);
+  void access_clone(Node* ctl, Node* src_base, Node* dst_base, Node* countx, bool is_array);
 
   // Return addressing for an array element.
   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
@@ -831,8 +831,8 @@
   Node* is_always_locked(Node* obj);
   Node* gen_value_type_test(Node* kls);
   void gen_value_type_guard(Node* obj, int nargs = 0);
-  void gen_value_type_array_guard(Node* ary, Node* obj, Node* elem_klass = NULL);
-  void gen_flattened_array_guard(Node* ary, int nargs = 0);
+  void gen_value_type_array_guard(Node* ary, Node* obj, int nargs);
+  Node* load_lh_array_tag(Node* kls);
   Node* gen_lh_array_test(Node* kls, unsigned int lh_value);
 
   Node* gen_subtype_check(Node* subklass, Node* superklass) {
--- a/src/hotspot/share/opto/library_call.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/library_call.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -4318,7 +4318,34 @@
   // TODO: generate fields copies for small objects instead.
   Node* size = _gvn.transform(obj_size);
 
-  access_clone(control(), obj, alloc_obj, size, is_array);
+  // Exclude the header but include array length to copy by 8 bytes words.
+  // Can't use base_offset_in_bytes(bt) since basic type is unknown.
+  int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
+                            instanceOopDesc::base_offset_in_bytes();
+  // base_off:
+  // 8  - 32-bit VM
+  // 12 - 64-bit VM, compressed klass
+  // 16 - 64-bit VM, normal klass
+  if (base_off % BytesPerLong != 0) {
+    assert(UseCompressedClassPointers, "");
+    if (is_array) {
+      // Exclude length to copy by 8 bytes words.
+      base_off += sizeof(int);
+    } else {
+      // Include klass to copy by 8 bytes words.
+      base_off = instanceOopDesc::klass_offset_in_bytes();
+    }
+    assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
+  }
+  Node* src_base  = basic_plus_adr(obj,  base_off);
+  Node* dst_base = basic_plus_adr(alloc_obj, base_off);
+
+  // Compute the length also, if needed:
+  Node* countx = size;
+  countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
+  countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
+
+  access_clone(control(), src_base, dst_base, countx, is_array);
 
   // Do not let reads from the cloned object float above the arraycopy.
   if (alloc != NULL) {
--- a/src/hotspot/share/opto/parse2.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/parse2.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -63,6 +63,7 @@
 
   // Handle value type arrays
   const TypeOopPtr* elemptr = elemtype->make_oopptr();
+  const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
   if (elemtype->isa_valuetype() != NULL) {
     // Load from flattened value type array
     ciValueKlass* vk = elemtype->is_valuetype()->value_klass();
@@ -73,15 +74,79 @@
     // Load from non-flattened value type array (elements can never be null)
     bt = T_VALUETYPE;
     assert(elemptr->meet(TypePtr::NULL_PTR) != elemptr, "value type array elements should never be null");
-  } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type()) {
+  } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() &&
+             !ary_t->klass_is_exact()) {
     // Cannot statically determine if array is flattened, emit runtime check
-    gen_flattened_array_guard(ary, 2);
+    IdealKit ideal(this);
+    IdealVariable res(ideal);
+    ideal.declarations_done();
+    Node* kls = load_object_klass(ary);
+    Node* tag = load_lh_array_tag(kls);
+    ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
+      // non flattened
+      sync_kit(ideal);
+      const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
+      elemtype = ary_t->elem()->make_oopptr();
+      Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,
+                                IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
+      ideal.sync_kit(this);
+      ideal.set(res, ld);
+    } ideal.else_(); {
+      // flattened
+      sync_kit(ideal);
+      Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
+      Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
+      Node* obj_size  = NULL;
+      kill_dead_locals();
+      inc_sp(2);
+      Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
+      dec_sp(2);
+
+      AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
+      assert(alloc->maybe_set_complete(&_gvn), "");
+      alloc->initialization()->set_complete_with_arraycopy();
+      BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+      // Unknown value type so might have reference fields
+      if (!bs->array_copy_requires_gc_barriers(T_OBJECT)) {
+        int base_off = sizeof(instanceOopDesc);
+        Node* dst_base = basic_plus_adr(alloc_obj, base_off);
+        Node* countx = obj_size;
+        countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
+        countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
+
+        assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
+        Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
+        Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
+        uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE);
+        Node* base  = basic_plus_adr(ary, header);
+        idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control());
+        Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift));
+        Node* adr = basic_plus_adr(ary, base, scale);
+
+        access_clone(control(), adr, dst_base, countx, false);
+      } else {
+        ideal.sync_kit(this);
+        ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(),
+                             CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value),
+                             "load_unknown_value",
+                             ary, idx, alloc_obj);
+        sync_kit(ideal);
+      }
+
+      insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
+
+      ideal.sync_kit(this);
+      ideal.set(res, alloc_obj);
+    } ideal.end_if();
+    sync_kit(ideal);
+    push_node(bt, ideal.value(res));
+    return;
   }
 
   if (elemtype == TypeInt::BOOL) {
     bt = T_BOOLEAN;
   } else if (bt == T_OBJECT) {
-    elemtype = _gvn.type(ary)->is_aryptr()->elem()->make_oopptr();
+    elemtype = ary_t->elem()->make_oopptr();
   }
 
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
@@ -111,48 +176,95 @@
   Node* idx = pop();        // Index in the array
   Node* ary = pop();        // The array itself
 
+  const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
   if (bt == T_OBJECT) {
     const TypeOopPtr* elemptr = elemtype->make_oopptr();
+    const Type* val_t = _gvn.type(val);
     if (elemtype->isa_valuetype() != NULL) {
       // Store to flattened value type array
+      if (!val->is_ValueType() && val_t == TypePtr::NULL_PTR) {
+        // Can not store null into a value type array
+        inc_sp(3);
+        uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
+        return;
+      }
       cast_val->as_ValueType()->store_flattened(this, ary, adr);
       return;
     } else if (elemptr->is_valuetypeptr()) {
       // Store to non-flattened value type array
-    } else if (ValueArrayFlatten && elemptr->can_be_value_type() && val->is_ValueType()) {
-      IdealKit ideal(this);
-      Node* kls = load_object_klass(ary);
-      Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
-      Node* layout_val = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
-      layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
-      ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
-        // non flattened
+      if (!val->is_ValueType() && val_t == TypePtr::NULL_PTR) {
+        // Can not store null into a value type array
+        inc_sp(3);
+        uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
+        return;
+      }
+    } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() &&
+               (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) {
+      if (ValueArrayFlatten) {
+        IdealKit ideal(this);
+        Node* kls = load_object_klass(ary);
+        Node* layout_val = load_lh_array_tag(kls);
+        ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
+          // non flattened
+          sync_kit(ideal);
+
+          if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) {
+            gen_value_type_array_guard(ary, val, 3);
+          }
+
+          const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
+          elemtype = ary_t->elem()->make_oopptr();
+          access_store_at(control(), ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
+          ideal.sync_kit(this);
+        } ideal.else_(); {
+          // flattened
+          // Object/interface array must be flattened, cast it
+          if (val->is_ValueType()) {
+            sync_kit(ideal);
+            const TypeValueType* vt = _gvn.type(val)->is_valuetype();
+            ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass());
+            const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
+            ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
+            adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control());
+            val->as_ValueType()->store_flattened(this, ary, adr);
+            ideal.sync_kit(this);
+          } else {
+            if (TypePtr::NULL_PTR->higher_equal(val_t)) {
+              sync_kit(ideal);
+              Node* null_ctl = top();
+              val = null_check_oop(val, &null_ctl);
+              {
+                assert(null_ctl != top(), "expected to possibly be null");
+                PreserveJVMState pjvms(this);
+                set_control(null_ctl);
+                inc_sp(3);
+                uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
+              }
+              ideal.sync_kit(this);
+            }
+
+            if (!ideal.ctrl()->is_top()) {
+              ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(),
+                                   CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value),
+                                   "store_unknown_value",
+                                   val, ary, idx);
+            }
+          }
+        } ideal.end_if();
         sync_kit(ideal);
-        const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
-        elemtype = _gvn.type(ary)->is_aryptr()->elem()->make_oopptr();
-        access_store_at(control(), ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
-        ideal.sync_kit(this);
-      } ideal.else_(); {
-        // flattened
-        sync_kit(ideal);
-        // Object/interface array must be flattened, cast it
-        const TypeValueType* vt = _gvn.type(val)->is_valuetype();
-        ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass());
-        const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
-        ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
-        adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control());
-        val->as_ValueType()->store_flattened(this, ary, adr);
-        ideal.sync_kit(this);
-      } ideal.end_if();
-      sync_kit(ideal);
-      return;
+        return;
+      } else {
+        if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) {
+          gen_value_type_array_guard(ary, val, 3);
+        }
+      }
     }
   }
 
   if (elemtype == TypeInt::BOOL) {
     bt = T_BOOLEAN;
   } else if (bt == T_OBJECT) {
-    elemtype = _gvn.type(ary)->is_aryptr()->elem()->make_oopptr();
+    elemtype = ary_t->elem()->make_oopptr();
   }
 
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
--- a/src/hotspot/share/opto/parseHelper.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/parseHelper.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -149,25 +149,16 @@
   Node *idx = peek(1);
   Node *ary = peek(2);
 
-  const Type* elemtype = _gvn.type(ary)->is_aryptr()->elem();
+  const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
+  const Type* elemtype = ary_t->elem();
   const TypeOopPtr* elemptr = elemtype->make_oopptr();
   bool is_value_array = elemtype->isa_valuetype() != NULL || (elemptr != NULL && elemptr->is_valuetypeptr());
-  bool can_be_value_array = is_value_array || (elemptr != NULL && (elemptr->can_be_value_type()));
 
   if (_gvn.type(obj) == TypePtr::NULL_PTR) {
     // There's never a type check on null values.
     // This cutout lets us avoid the uncommon_trap(Reason_array_check)
     // below, which turns into a performance liability if the
     // gen_checkcast folds up completely.
-    if (is_value_array) {
-      // Can not store null into a value type array
-      uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
-      return obj;
-    } else if (can_be_value_array) {
-      // Throw exception if array is a value type array
-      gen_value_type_array_guard(ary, zerocon(T_OBJECT));
-      return obj;
-    }
     return obj;
   }
 
@@ -252,10 +243,6 @@
     ciValueKlass* vk = elemtype->isa_valuetype() ? elemtype->is_valuetype()->value_klass() :
                                                    elemptr->value_klass();
     a_e_klass = makecon(TypeKlassPtr::make(vk));
-  } else if (can_be_value_array && !obj->is_ValueType() && _gvn.type(obj)->is_oopptr()->can_be_value_type()) {
-    // We cannot statically determine if the array is a value type array
-    // and we also don't know if 'obj' is a value type. Emit runtime checks.
-    gen_value_type_array_guard(ary, obj, a_e_klass);
   }
 
   // Check (the hard way) and throw if not a subklass.
--- a/src/hotspot/share/opto/runtime.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/runtime.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -50,6 +50,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.inline.hpp"
 #include "oops/valueArrayKlass.hpp"
+#include "oops/valueArrayOop.inline.hpp"
 #include "opto/ad.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
@@ -1719,3 +1720,69 @@
 
   return TypeFunc::make(domain, range);
 }
+
+JRT_LEAF(void, OptoRuntime::load_unknown_value(valueArrayOopDesc* array, int index, instanceOopDesc* buffer))
+{
+  Klass* klass = array->klass();
+  assert(klass->is_valueArray_klass(), "expected value array oop");
+
+  ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass);
+  ValueKlass* vklass = vaklass->element_klass();
+  void* src = array->value_at_addr(index, vaklass->layout_helper());
+  vklass->value_store(src, vklass->data_for_oop(buffer),
+                        vaklass->element_byte_size(), true, false);
+}
+JRT_END
+
+const TypeFunc *OptoRuntime::load_unknown_value_Type() {
+  // create input type (domain)
+  const Type **fields = TypeTuple::fields(3);
+  // We don't know the number of returned values and their
+  // types. Assume all registers available to the return convention
+  // are used.
+  fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL;
+  fields[TypeFunc::Parms+1] = TypeInt::POS;
+  fields[TypeFunc::Parms+2] = TypeInstPtr::NOTNULL;
+
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(0);
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+JRT_LEAF(void, OptoRuntime::store_unknown_value(instanceOopDesc* buffer, valueArrayOopDesc* array, int index))
+{
+  assert(buffer != NULL, "can't store null into flat array");
+  Klass* klass = array->klass();
+  assert(klass->is_valueArray_klass(), "expected value array");
+  assert(ArrayKlass::cast(klass)->element_klass() == buffer->klass(), "Store type incorrect");
+
+  ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass);
+  ValueKlass* vklass = vaklass->element_klass();
+  const int lh = vaklass->layout_helper();
+  vklass->value_store(vklass->data_for_oop(buffer), array->value_at_addr(index, lh),
+                      vaklass->element_byte_size(), true, false);
+}
+JRT_END
+
+const TypeFunc *OptoRuntime::store_unknown_value_Type() {
+  // create input type (domain)
+  const Type **fields = TypeTuple::fields(3);
+  // We don't know the number of returned values and their
+  // types. Assume all registers available to the return convention
+  // are used.
+  fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
+  fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL;
+  fields[TypeFunc::Parms+2] = TypeInt::POS;
+
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(0);
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
+
+  return TypeFunc::make(domain, range);
+}
--- a/src/hotspot/share/opto/runtime.hpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/runtime.hpp	Fri Oct 05 10:30:21 2018 +0200
@@ -321,6 +321,11 @@
   static const TypeFunc* store_value_type_fields_Type();
   static const TypeFunc* pack_value_type_Type();
 
+  static void load_unknown_value(valueArrayOopDesc* array, int index, instanceOopDesc* buffer);
+  static const TypeFunc *load_unknown_value_Type();
+  static void store_unknown_value(instanceOopDesc* buffer, valueArrayOopDesc* array, int index);
+  static const TypeFunc *store_unknown_value_Type();
+
  private:
  static NamedCounter * volatile _named_counters;
 
--- a/src/hotspot/share/opto/valuetypenode.cpp	Fri Oct 05 10:12:54 2018 +0200
+++ b/src/hotspot/share/opto/valuetypenode.cpp	Fri Oct 05 10:30:21 2018 +0200
@@ -231,7 +231,7 @@
   Unique_Node_List worklist;
   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
     Node* u = fast_out(i);
-    if (u->is_SafePoint() && (!u->is_Call() || u->as_Call()->has_debug_use(this))) {
+    if (u->is_SafePoint() && !u->is_CallLeaf() && (!u->is_Call() || u->as_Call()->has_debug_use(this))) {
       SafePointNode* sfpt = u->as_SafePoint();
       Node* in_oop = get_oop();
       const Type* oop_type = in_oop->bottom_type();
--- a/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestIntrinsics.java	Fri Oct 05 10:12:54 2018 +0200
+++ b/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestIntrinsics.java	Fri Oct 05 10:30:21 2018 +0200
@@ -141,7 +141,7 @@
 
     // Test default value type array creation via reflection
     @Test()
-    public Object[] test7(Class<?> componentType, int len, long hash) {
+    public Object[] test7(Class<?> componentType, int len) {
         Object[] va = (Object[])Array.newInstance(componentType, len);
         return va;
     }
@@ -150,7 +150,7 @@
     public void test7_verifier(boolean warmup) {
         int len = Math.abs(rI) % 42;
         long hash = MyValue1.createDefaultDontInline().hashPrimitive();
-        Object[] va = test7(MyValue1.class, len, hash);
+        Object[] va = test7(MyValue1.class, len);
         for (int i = 0; i < len; ++i) {
             Asserts.assertEQ(((MyValue1)va[i]).hashPrimitive(), hash);
         }
--- a/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestLWorld.java	Fri Oct 05 10:12:54 2018 +0200
+++ b/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestLWorld.java	Fri Oct 05 10:30:21 2018 +0200
@@ -2270,4 +2270,47 @@
     public void test88_verifier(boolean warmup) {
         test88();
     }
+
+    // Tests for loading/storing unkown values
+    @Test
+    public Object test89(Object[] va) {
+        return va[0];
+    }
+
+    @DontCompile
+    public void test89_verifier(boolean warmup) {
+        MyValue1 vt = (MyValue1)test89(testValue1Array);
+        Asserts.assertEquals(testValue1Array[0].hash(), vt.hash());
+    }
+
+    @Test
+    public void test90(Object[] va, Object vt) {
+        va[0] = vt;
+    }
+
+    @DontCompile
+    public void test90_verifier(boolean warmup) {
+        MyValue1[] va = new MyValue1[1];
+        test90(va, testValue1);
+        Asserts.assertEquals(va[0].hash(), testValue1.hash());
+    }
+
+    // Verify that mixing instances and arrays with the clone api
+    // doesn't break anything
+    @Test
+    public Object test91(Object o) {
+        MyValue1[] va = new MyValue1[1];
+        Object[] next = va;
+        Object[] arr = va;
+        for (int i = 0; i < 10; i++) {
+            arr = next;
+            next = new Integer[1];
+        }
+        return arr[0];
+    }
+
+    @DontCompile
+    public void test91_verifier(boolean warmup) {
+        test91(42);
+    }
 }