changeset 54103:673c89f7f22a lworld

8218612: [lworld] C1 support for array covariance for aastore Reviewed-by: thartmann
author iklam
date Thu, 07 Feb 2019 23:38:41 -0800
parents ce420c48c4b6
children 05294909ab1d
files src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp src/hotspot/cpu/x86/c1_Runtime1_x86.cpp src/hotspot/share/c1/c1_CodeStubs.hpp src/hotspot/share/c1/c1_GraphBuilder.cpp src/hotspot/share/c1/c1_LIRGenerator.cpp src/hotspot/share/c1/c1_LIRGenerator.hpp src/hotspot/share/c1/c1_Runtime1.cpp src/hotspot/share/c1/c1_Runtime1.hpp test/hotspot/jtreg/compiler/valhalla/valuetypes/TestUnloadedValueTypeArray.java
diffstat 9 files changed, 168 insertions(+), 49 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp	Thu Feb 07 23:38:41 2019 -0800
@@ -164,6 +164,15 @@
   _info = new CodeEmitInfo(info);
 }
 
+void LoadFlattenedArrayStub::visit(LIR_OpVisitState* visitor) {
+  visitor->do_slow_case(_info);
+  visitor->do_input(_array);
+  visitor->do_input(_index);
+  visitor->do_output(_result);
+
+  // Tell the register allocator that the runtime call will scratch rax.
+  visitor->do_output(FrameMap::rax_oop_opr);
+}
 
 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
   assert(__ rsp_offset() == 0, "frame size should be fixed");
@@ -180,6 +189,29 @@
 }
 
 
+// Implementation of StoreFlattenedArrayStub
+
+StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
+  _array = array;
+  _index = index;
+  _value = value;
+  _info = new CodeEmitInfo(info);
+}
+
+
+void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
+  assert(__ rsp_offset() == 0, "frame size should be fixed");
+  __ bind(_entry);
+  ce->store_parameter(_array->as_register(), 2);
+  ce->store_parameter(_index->as_register(), 1);
+  ce->store_parameter(_value->as_register(), 0);
+  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id)));
+  ce->add_call_info_here(_info);
+  ce->verify_oop_map(_info);
+  __ jmp(_continuation);
+}
+
+
 // Implementation of NewInstanceStub
 
 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp	Thu Feb 07 23:38:41 2019 -0800
@@ -1238,6 +1238,24 @@
       }
       break;
 
+    case store_flattened_array_id:
+      {
+        StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
+        OopMap* map = save_live_registers(sasm, 4);
+
+        // Called with store_parameter and not C abi
+
+        f.load_argument(2, rax); // rax,: array
+        f.load_argument(1, rbx); // rbx,: index
+        f.load_argument(0, rcx); // rcx,: value
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), rax, rbx, rcx);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+      }
+      break;
+
     case register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Feb 07 23:38:41 2019 -0800
@@ -231,6 +231,7 @@
 #endif // PRODUCT
 };
 
+
 class LoadFlattenedArrayStub: public CodeStub {
  private:
   LIR_Opr          _array;
@@ -242,18 +243,35 @@
   LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info);
   virtual void emit_code(LIR_Assembler* e);
   virtual CodeEmitInfo* info() const             { return _info; }
-  virtual void visit(LIR_OpVisitState* visitor) {
-    visitor->do_slow_case(_info);
-    visitor->do_input(_array);
-    visitor->do_input(_index);
-    visitor->do_output(_result);
-  }
+  virtual void visit(LIR_OpVisitState* visitor);
 #ifndef PRODUCT
   virtual void print_name(outputStream* out) const { out->print("LoadFlattenedArrayStub"); }
 #endif // PRODUCT
 };
 
 
+class StoreFlattenedArrayStub: public CodeStub {
+ private:
+  LIR_Opr          _array;
+  LIR_Opr          _index;
+  LIR_Opr          _value;
+  CodeEmitInfo*    _info;
+
+ public:
+  StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info);
+  virtual void emit_code(LIR_Assembler* e);
+  virtual CodeEmitInfo* info() const             { return _info; }
+  virtual void visit(LIR_OpVisitState* visitor) {
+    visitor->do_slow_case(_info);
+    visitor->do_input(_array);
+    visitor->do_input(_index);
+    visitor->do_input(_value);
+  }
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const { out->print("StoreFlattenedArrayStub"); }
+#endif // PRODUCT
+};
+
 
 class NewInstanceStub: public CodeStub {
  private:
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Feb 07 23:38:41 2019 -0800
@@ -1010,7 +1010,6 @@
 void GraphBuilder::store_indexed(BasicType type) {
   // In case of in block code motion in range check elimination
   ValueStack* state_before = copy_state_indexed_access();
-  ValueStack* deopt_state = copy_state_before();
   compilation()->set_has_access_indexed(true);
   Value value = pop(as_ValueType(type));
   Value index = ipop();
@@ -1034,10 +1033,6 @@
     check_boolean = true;
   }
 
-  if (array->is_flattened_array() && !array_type->is_loaded()) {
-    // Value array access may be deoptimized. Need full "before" states.
-    state_before = deopt_state;
-  }
   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
   append(result);
   _memory->store_value(value);
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Feb 07 23:38:41 2019 -0800
@@ -1648,25 +1648,24 @@
   }
 }
 
-void LIRGenerator::maybe_deopt_value_array_access(LIRItem& array, CodeEmitInfo* null_check_info, CodeEmitInfo* deopt_info) {
-  LIR_Opr klass = new_register(T_METADATA);
-  __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
+void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) {
+  LIR_Opr array_klass_reg = new_register(T_METADATA);
+
+  __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
   LIR_Opr layout = new_register(T_INT);
-  __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
+  __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
   __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
   __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
-
-  CodeStub* stub = new DeoptimizeStub(deopt_info, Deoptimization::Reason_unloaded, Deoptimization::Action_make_not_entrant);
-  __ branch(lir_cond_equal, T_ILLEGAL, stub);
+  __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
 }
 
 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
   assert(x->is_pinned(),"");
-  bool is_flattened = x->array()->is_flattened_array();
+  bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
   bool needs_range_check = x->compute_needs_range_check();
   bool use_length = x->length() != NULL;
   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
-  bool needs_store_check = obj_store && !is_flattened &&
+  bool needs_store_check = obj_store && !is_loaded_flattened_array &&
                                         (x->value()->as_Constant() == NULL ||
                                          !get_jobject_constant(x->value())->is_null_object() ||
                                          x->should_profile());
@@ -1682,10 +1681,10 @@
   if (use_length && needs_range_check) {
     length.set_instruction(x->length());
     length.load_item();
-
   }
 
-  if (needs_store_check || x->check_boolean() || is_flattened) {
+  if (needs_store_check || x->check_boolean()
+      || is_loaded_flattened_array || x->array()->maybe_flattened_array()) {
     value.load_item();
   } else {
     value.load_for_store(x->elt_type());
@@ -1718,25 +1717,30 @@
     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
   }
 
-  if (is_flattened) {
-    if (x->array()->declared_type()->is_loaded()) {
+  if (is_loaded_flattened_array) {
+    index.load_item();
+    access_flattened_array(false, array, index, value);
+  } else {
+    StoreFlattenedArrayStub* slow_path = NULL;
+
+    if (x->array()->maybe_flattened_array()) {
+      // Check if we indeed have a flattened array
       index.load_item();
-      access_flattened_array(false, array, index, value);
-      return;
-    } else {
-      // If the array is indeed flattened, deopt. Otherwise access it as a normal object array.
-      CodeEmitInfo* deopt_info = state_for(x, x->state_before());
-      maybe_deopt_value_array_access(array, null_check_info, deopt_info);
+      slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
+      check_flattened_array(array, slow_path);
+    }
+
+    DecoratorSet decorators = IN_HEAP | IS_ARRAY;
+    if (x->check_boolean()) {
+      decorators |= C1_MASK_BOOLEAN;
+    }
+
+    access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
+                    NULL, null_check_info);
+    if (slow_path != NULL) {
+      __ branch_destination(slow_path->continuation());
     }
   }
-
-  DecoratorSet decorators = IN_HEAP | IS_ARRAY;
-  if (x->check_boolean()) {
-    decorators |= C1_MASK_BOOLEAN;
-  }
-
-  access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
-                  NULL, null_check_info);
 }
 
 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
@@ -2040,14 +2044,7 @@
     if (x->array()->maybe_flattened_array()) {
       // Check if we indeed have a flattened array
       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
-      LIR_Opr array_klass_reg = new_register(T_METADATA);
-
-      __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
-      LIR_Opr layout = new_register(T_INT);
-      __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
-      __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
-      __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
-      __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
+      check_flattened_array(array, slow_path);
     }
 
     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp	Thu Feb 07 23:38:41 2019 -0800
@@ -267,7 +267,7 @@
   void do_vectorizedMismatch(Intrinsic* x);
 
   void access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item);
-  void maybe_deopt_value_array_access(LIRItem& array, CodeEmitInfo* null_check_info, CodeEmitInfo* deopt_info);
+  void check_flattened_array(LIRItem& array, CodeStub* slow_path);
 
  public:
   LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Feb 07 23:38:41 2019 -0800
@@ -121,6 +121,7 @@
 int Runtime1::_new_instance_slowcase_cnt = 0;
 int Runtime1::_new_multi_array_slowcase_cnt = 0;
 int Runtime1::_load_flattened_array_slowcase_cnt = 0;
+int Runtime1::_store_flattened_array_slowcase_cnt = 0;
 int Runtime1::_monitorenter_slowcase_cnt = 0;
 int Runtime1::_monitorexit_slowcase_cnt = 0;
 int Runtime1::_patch_code_slowcase_cnt = 0;
@@ -416,7 +417,7 @@
 
 
 JRT_ENTRY(void, Runtime1::load_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index))
-  NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
+  NOT_PRODUCT(_load_flattened_array_slowcase_cnt++;)
   Klass* klass = array->klass();
   assert(klass->is_valueArray_klass(), "expected value array oop");
   assert(array->length() > 0 && index < array->length(), "already checked");
@@ -436,6 +437,24 @@
 JRT_END
 
 
+JRT_ENTRY(void, Runtime1::store_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index, oopDesc* value))
+  NOT_PRODUCT(_store_flattened_array_slowcase_cnt++;)
+  if (value == NULL) {
+    SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
+  } else {
+    Klass* klass = array->klass();
+    assert(klass->is_valueArray_klass(), "expected value array");
+    assert(ArrayKlass::cast(klass)->element_klass() == value->klass(), "Store type incorrect");
+
+    ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass);
+    ValueKlass* vklass = vaklass->element_klass();
+    const int lh = vaklass->layout_helper();
+    vklass->value_store(vklass->data_for_oop(value), array->value_at_addr(index, lh),
+                        vaklass->element_byte_size(), true, false);
+  }
+JRT_END
+
+
 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 JRT_END
@@ -1524,7 +1543,8 @@
   tty->print_cr(" _new_object_array_slowcase_cnt:  %d", _new_object_array_slowcase_cnt);
   tty->print_cr(" _new_instance_slowcase_cnt:      %d", _new_instance_slowcase_cnt);
   tty->print_cr(" _new_multi_array_slowcase_cnt:   %d", _new_multi_array_slowcase_cnt);
-  tty->print_cr(" _load_flattened_array_slowcase_cnt:%d", _load_flattened_array_slowcase_cnt);
+  tty->print_cr(" _load_flattened_array_slowcase_cnt: %d", _load_flattened_array_slowcase_cnt);
+  tty->print_cr(" _store_flattened_array_slowcase_cnt:%d", _store_flattened_array_slowcase_cnt);
   tty->print_cr(" _monitorenter_slowcase_cnt:      %d", _monitorenter_slowcase_cnt);
   tty->print_cr(" _monitorexit_slowcase_cnt:       %d", _monitorexit_slowcase_cnt);
   tty->print_cr(" _patch_code_slowcase_cnt:        %d", _patch_code_slowcase_cnt);
--- a/src/hotspot/share/c1/c1_Runtime1.hpp	Thu Feb 07 14:54:22 2019 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.hpp	Thu Feb 07 23:38:41 2019 -0800
@@ -54,6 +54,7 @@
   stub(new_value_array)              \
   stub(new_multi_array)              \
   stub(load_flattened_array)         \
+  stub(store_flattened_array)        \
   stub(handle_exception_nofpu)         /* optimized version that does not preserve fpu registers */ \
   stub(handle_exception)             \
   stub(handle_exception_from_callee) \
@@ -110,6 +111,7 @@
   static int _new_instance_slowcase_cnt;
   static int _new_multi_array_slowcase_cnt;
   static int _load_flattened_array_slowcase_cnt;
+  static int _store_flattened_array_slowcase_cnt;
   static int _monitorenter_slowcase_cnt;
   static int _monitorexit_slowcase_cnt;
   static int _patch_code_slowcase_cnt;
@@ -148,6 +150,7 @@
   static void new_object_array(JavaThread* thread, Klass* klass, jint length);
   static void new_multi_array (JavaThread* thread, Klass* klass, int rank, jint* dims);
   static void load_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index);
+  static void store_flattened_array(JavaThread* thread, valueArrayOopDesc* array, int index, oopDesc* value);
 
   static address counter_overflow(JavaThread* thread, int bci, Method* method);
 
--- a/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestUnloadedValueTypeArray.java	Thu Feb 07 14:54:22 2019 +0100
+++ b/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestUnloadedValueTypeArray.java	Thu Feb 07 23:38:41 2019 -0800
@@ -31,6 +31,7 @@
  *        -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test1
  *        -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test2
  *        -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test3
+ *        -XX:CompileCommand=compileonly,TestUnloadedValueTypeArray::test4
  *      TestUnloadedValueTypeArray
  */
 
@@ -66,7 +67,16 @@
     }
 }
 
+value final class MyValue4 {
+    final int foo;
 
+    private MyValue4() {
+        foo = 0x53;
+    }
+    static MyValue4 make(int n) {
+        return __WithField(MyValue4.default.foo, n);
+    }
+}
 
 public class TestUnloadedValueTypeArray {
 
@@ -124,9 +134,35 @@
         Asserts.assertEQ(arr[1].foo, 2345);
     }
 
+    static MyValue4[] test4(boolean b) {
+        // range check elimination
+        if (b) {
+            MyValue4[] arr = new MyValue4[10];
+            arr[1] = MyValue4.make(2345);
+            return arr;
+        } else {
+            return null;
+        }
+    }
+
+    static void test4_verifier() {
+        int n = 50000;
+
+        for (int i=0; i<n; i++) {
+            test4(false);
+        }
+
+        MyValue4[] arr = null;
+        for (int i=0; i<n; i++) {
+          arr = test4(true);
+        }
+        Asserts.assertEQ(arr[1].foo, 2345);
+    }
+
     static public void main(String[] args) {
         test1();
         test2_verifier();
         test3_verifier();
+        test4_verifier();
     }
 }