changeset 48835:62004f705d27

Merge
author jwilhelm
date Mon, 05 Feb 2018 23:12:03 +0100
parents eb5572d58bb1 19ef3f64bc10
children 423bcbb288ff
files make/conf/jib-profiles.js src/hotspot/os/aix/os_aix.cpp src/hotspot/share/interpreter/bytecodeInterpreter.cpp src/hotspot/share/runtime/arguments.cpp test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java test/hotspot/jtreg/serviceability/dcmd/jvmti/LoadAgentDcmdTest.java
diffstat 444 files changed, 128289 insertions(+), 2062 deletions(-) [+]
line wrap: on
line diff
--- a/make/conf/jib-profiles.js	Fri Feb 09 02:23:34 2018 +0000
+++ b/make/conf/jib-profiles.js	Mon Feb 05 23:12:03 2018 +0100
@@ -829,7 +829,7 @@
         jtreg: {
             server: "javare",
             revision: "4.2",
-            build_number: "b11",
+            build_number: "b12",
             checksum_file: "MD5_VALUES",
             file: "jtreg_bin-4.2.zip",
             environment_name: "JT_HOME",
--- a/make/test/JtregNativeHotspot.gmk	Fri Feb 09 02:23:34 2018 +0000
+++ b/make/test/JtregNativeHotspot.gmk	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -68,6 +68,7 @@
     $(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup \
     $(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption \
     $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \
+    $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo \
     $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetNamedModule \
     $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/IsModifiableModule \
     $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/AddModuleReads \
@@ -101,6 +102,7 @@
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorInfoTest := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorStackDepthInfoTest := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libIsModifiableModuleTest := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleReadsTest := -lc
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Mon Feb 05 23:12:03 2018 +0100
@@ -17725,7 +17725,7 @@
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 8) {
       __ eor(as_FloatRegister($dst$$reg), __ T8B,
              as_FloatRegister($src$$reg),
@@ -17744,7 +17744,7 @@
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 8) {
       __ eor(as_FloatRegister($dst$$reg), __ T16B,
              as_FloatRegister($src$$reg),
@@ -17764,9 +17764,8 @@
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 8) sh = 7;
-    sh = -sh & 7;
     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
            as_FloatRegister($src$$reg), sh);
   %}
@@ -17779,9 +17778,8 @@
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 8) sh = 7;
-    sh = -sh & 7;
     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
            as_FloatRegister($src$$reg), sh);
   %}
@@ -17795,14 +17793,14 @@
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 8) {
       __ eor(as_FloatRegister($dst$$reg), __ T8B,
              as_FloatRegister($src$$reg),
              as_FloatRegister($src$$reg));
     } else {
       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
-             as_FloatRegister($src$$reg), -sh & 7);
+             as_FloatRegister($src$$reg), sh);
     }
   %}
   ins_pipe(vshift64_imm);
@@ -17814,14 +17812,14 @@
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 8) {
       __ eor(as_FloatRegister($dst$$reg), __ T16B,
              as_FloatRegister($src$$reg),
              as_FloatRegister($src$$reg));
     } else {
       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
-             as_FloatRegister($src$$reg), -sh & 7);
+             as_FloatRegister($src$$reg), sh);
     }
   %}
   ins_pipe(vshift128_imm);
@@ -17890,7 +17888,7 @@
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 16) {
       __ eor(as_FloatRegister($dst$$reg), __ T8B,
              as_FloatRegister($src$$reg),
@@ -17909,7 +17907,7 @@
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 16) {
       __ eor(as_FloatRegister($dst$$reg), __ T16B,
              as_FloatRegister($src$$reg),
@@ -17929,9 +17927,8 @@
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 16) sh = 15;
-    sh = -sh & 15;
     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
            as_FloatRegister($src$$reg), sh);
   %}
@@ -17944,9 +17941,8 @@
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 16) sh = 15;
-    sh = -sh & 15;
     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
            as_FloatRegister($src$$reg), sh);
   %}
@@ -17960,14 +17956,14 @@
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 16) {
       __ eor(as_FloatRegister($dst$$reg), __ T8B,
              as_FloatRegister($src$$reg),
              as_FloatRegister($src$$reg));
     } else {
       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
-             as_FloatRegister($src$$reg), -sh & 15);
+             as_FloatRegister($src$$reg), sh);
     }
   %}
   ins_pipe(vshift64_imm);
@@ -17979,14 +17975,14 @@
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
   ins_encode %{
-    int sh = (int)$shift$$constant & 31;
+    int sh = (int)$shift$$constant;
     if (sh >= 16) {
       __ eor(as_FloatRegister($dst$$reg), __ T16B,
              as_FloatRegister($src$$reg),
              as_FloatRegister($src$$reg));
     } else {
       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
-             as_FloatRegister($src$$reg), -sh & 15);
+             as_FloatRegister($src$$reg), sh);
     }
   %}
   ins_pipe(vshift128_imm);
@@ -18054,7 +18050,7 @@
   ins_encode %{
     __ shl(as_FloatRegister($dst$$reg), __ T2S,
            as_FloatRegister($src$$reg),
-           (int)$shift$$constant & 31);
+           (int)$shift$$constant);
   %}
   ins_pipe(vshift64_imm);
 %}
@@ -18067,7 +18063,7 @@
   ins_encode %{
     __ shl(as_FloatRegister($dst$$reg), __ T4S,
            as_FloatRegister($src$$reg),
-           (int)$shift$$constant & 31);
+           (int)$shift$$constant);
   %}
   ins_pipe(vshift128_imm);
 %}
@@ -18080,7 +18076,7 @@
   ins_encode %{
     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
             as_FloatRegister($src$$reg),
-            -(int)$shift$$constant & 31);
+            (int)$shift$$constant);
   %}
   ins_pipe(vshift64_imm);
 %}
@@ -18093,7 +18089,7 @@
   ins_encode %{
     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
             as_FloatRegister($src$$reg),
-            -(int)$shift$$constant & 31);
+            (int)$shift$$constant);
   %}
   ins_pipe(vshift128_imm);
 %}
@@ -18106,7 +18102,7 @@
   ins_encode %{
     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
             as_FloatRegister($src$$reg),
-            -(int)$shift$$constant & 31);
+            (int)$shift$$constant);
   %}
   ins_pipe(vshift64_imm);
 %}
@@ -18119,7 +18115,7 @@
   ins_encode %{
     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
             as_FloatRegister($src$$reg),
-            -(int)$shift$$constant & 31);
+            (int)$shift$$constant);
   %}
   ins_pipe(vshift128_imm);
 %}
@@ -18159,7 +18155,7 @@
   ins_encode %{
     __ shl(as_FloatRegister($dst$$reg), __ T2D,
            as_FloatRegister($src$$reg),
-           (int)$shift$$constant & 63);
+           (int)$shift$$constant);
   %}
   ins_pipe(vshift128_imm);
 %}
@@ -18172,7 +18168,7 @@
   ins_encode %{
     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
             as_FloatRegister($src$$reg),
-            -(int)$shift$$constant & 63);
+            (int)$shift$$constant);
   %}
   ins_pipe(vshift128_imm);
 %}
@@ -18185,7 +18181,7 @@
   ins_encode %{
     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
             as_FloatRegister($src$$reg),
-            -(int)$shift$$constant & 63);
+            (int)$shift$$constant);
   %}
   ins_pipe(vshift128_imm);
 %}
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -518,6 +518,7 @@
     XXMRGHW_OPCODE = (60u << OPCODE_SHIFT |   18u << 3),
     XXMRGLW_OPCODE = (60u << OPCODE_SHIFT |   50u << 3),
     XXSPLTW_OPCODE = (60u << OPCODE_SHIFT |  164u << 2),
+    XXLOR_OPCODE   = (60u << OPCODE_SHIFT |  146u << 3),
     XXLXOR_OPCODE  = (60u << OPCODE_SHIFT |  154u << 3),
     XXLEQV_OPCODE  = (60u << OPCODE_SHIFT |  186u << 3),
 
@@ -2162,6 +2163,7 @@
   inline void mtvsrd(   VectorSRegister d, Register a);
   inline void mtvsrwz(  VectorSRegister d, Register a);
   inline void xxspltw(  VectorSRegister d, VectorSRegister b, int ui2);
+  inline void xxlor(    VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xxlxor(   VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xxleqv(   VectorSRegister d, VectorSRegister a, VectorSRegister b);
 
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -766,6 +766,7 @@
 inline void Assembler::mtvsrd(  VectorSRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d)  | ra(a)); }
 inline void Assembler::mtvsrwz( VectorSRegister d, Register a)               { emit_int32( MTVSRWZ_OPCODE | vsrt(d) | ra(a)); }
 inline void Assembler::xxspltw( VectorSRegister d, VectorSRegister b, int ui2)           { emit_int32( XXSPLTW_OPCODE | vsrt(d) | vsrb(b) | xxsplt_uim(uimm(ui2,2))); }
+inline void Assembler::xxlor(   VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLOR_OPCODE  | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::xxlxor(  VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLXOR_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::xxleqv(  VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLEQV_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::mtvrd(    VectorRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
--- a/src/hotspot/cpu/ppc/ppc.ad	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/ppc/ppc.ad	Mon Feb 05 23:12:03 2018 +0100
@@ -1656,9 +1656,9 @@
 
 // =============================================================================
 
-// Figure out which register class each belongs in: rc_int, rc_float or
+// Figure out which register class each belongs in: rc_int, rc_float, rc_vs or
 // rc_stack.
-enum RC { rc_bad, rc_int, rc_float, rc_stack };
+enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack };
 
 static enum RC rc_class(OptoReg::Name reg) {
   // Return the register class for the given register. The given register
@@ -1673,6 +1673,9 @@
   // We have 64 floating-point register halves, starting at index 64.
   if (reg < 64+64) return rc_float;
 
+  // We have 64 vector-scalar registers, starting at index 128.
+  if (reg < 64+64+64) return rc_vs;
+
   // Between float regs & stack are the flags regs.
   assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
 
@@ -1735,6 +1738,58 @@
   if (src_lo == dst_lo && src_hi == dst_hi)
     return size;            // Self copy, no move.
 
+  if (bottom_type()->isa_vect() != NULL && ideal_reg() == Op_VecX) {
+    // Memory->Memory Spill.
+    if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
+      int src_offset = ra_->reg2offset(src_lo);
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ ld(R0, src_offset, R1_SP);
+        __ std(R0, dst_offset, R1_SP);
+        __ ld(R0, src_offset+8, R1_SP);
+        __ std(R0, dst_offset+8, R1_SP);
+      }
+      size += 16;
+    }
+    // VectorSRegister->Memory Spill.
+    else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
+      VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ addi(R0, R1_SP, dst_offset);
+        __ stxvd2x(Rsrc, R0);
+      }
+      size += 8;
+    }
+    // Memory->VectorSRegister Spill.
+    else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
+      VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
+      int src_offset = ra_->reg2offset(src_lo);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ addi(R0, R1_SP, src_offset);
+        __ lxvd2x(Rdst, R0);
+      }
+      size += 8;
+    }
+    // VectorSRegister->VectorSRegister.
+    else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
+      VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
+      VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ xxlor(Rdst, Rsrc, Rsrc);
+      }
+      size += 4;
+    }
+    else {
+      ShouldNotReachHere(); // No VSR spill.
+    }
+    return size;
+  }
+
   // --------------------------------------
   // Memory->Memory Spill. Use R0 to hold the value.
   if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
@@ -3524,7 +3579,7 @@
     assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
   %}
 
-  enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc) %{
+  enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc, iRegLdst tmp) %{
     // Create new nodes.
 
     // Make an operand with the bit pattern to load as float.
@@ -3533,8 +3588,8 @@
 
     loadConLReplicatedNodesTuple loadConLNodes =
       loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
-                                OptoReg::Name(R20_H_num), OptoReg::Name(R20_num),
-                                OptoReg::Name(VSR11_num), OptoReg::Name(VSR10_num));
+                                ra_->get_reg_second(n_tmp), ra_->get_reg_first(n_tmp),
+                                ra_->get_reg_second(this), ra_->get_reg_first(this));
 
     // Push new nodes.
     if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
@@ -14013,12 +14068,13 @@
   %}
 %}
 
-instruct repl4F_immF_Ex(vecX dst, immF src) %{
+instruct repl4F_immF_Ex(vecX dst, immF src, iRegLdst tmp) %{
   match(Set dst (ReplicateF src));
   predicate(n->as_Vector()->length() == 4);
+  effect(TEMP tmp);
   ins_cost(10 * DEFAULT_COST);
 
-  postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase) );
+  postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase, tmp) );
 %}
 
 instruct repl4F_immF0(vecX dst, immF_0 zero) %{
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -109,8 +109,7 @@
 
   if (PowerArchitecturePPC64 >= 8) {
     if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
-      // TODO: Switch on when it works stable. Currently, MachSpillCopyNode::implementation code is missing.
-      //FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
+      FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
     }
   } else {
     if (SuperwordUseVSX) {
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -991,8 +991,8 @@
   int offset = -1;
 
   switch (c->type()) {
+    case T_FLOAT: type = T_INT; // Float constants are stored by int store instructions.
     case T_INT:
-    case T_FLOAT:
     case T_ADDRESS: {
       LIR_Opr tmp = FrameMap::O7_opr;
       int value = c->as_jint_bits();
@@ -1202,6 +1202,7 @@
       __ stw(tmp, to.base(), to.disp());
       break;
     }
+    case T_ADDRESS:
     case T_OBJECT: {
       Register tmp = O7;
       Address from = frame_map()->address_for_slot(src->single_stack_ix());
@@ -1355,7 +1356,6 @@
   }
 }
 
-
 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
                             bool wide, bool unaligned) {
@@ -2265,10 +2265,10 @@
          op->obj()->as_register()   == O0 &&
          op->klass()->as_register() == G5, "must be");
   if (op->init_check()) {
+    add_debug_info_for_null_check_here(op->stub()->info());
     __ ldub(op->klass()->as_register(),
           in_bytes(InstanceKlass::init_state_offset()),
           op->tmp1()->as_register());
-    add_debug_info_for_null_check_here(op->stub()->info());
     __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
     __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
     __ delayed()->nop();
--- a/src/hotspot/cpu/sparc/c1_globals_sparc.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/sparc/c1_globals_sparc.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,32 +32,32 @@
 // (see c1_globals.hpp)
 
 #ifndef TIERED
-define_pd_global(bool, BackgroundCompilation,        true );
-define_pd_global(bool, CICompileOSR,                 true );
-define_pd_global(bool, InlineIntrinsics,             true );
-define_pd_global(bool, PreferInterpreterNativeStubs, false);
-define_pd_global(bool, ProfileTraps,                 false);
-define_pd_global(bool, UseOnStackReplacement,        true );
-define_pd_global(bool, TieredCompilation,            false);
-define_pd_global(intx, CompileThreshold,             1000 ); // Design center runs on 1.3.1
+define_pd_global(bool,  BackgroundCompilation,        true );
+define_pd_global(bool,  CICompileOSR,                 true );
+define_pd_global(bool,  InlineIntrinsics,             true );
+define_pd_global(bool,  PreferInterpreterNativeStubs, false);
+define_pd_global(bool,  ProfileTraps,                 false);
+define_pd_global(bool,  UseOnStackReplacement,        true );
+define_pd_global(bool,  TieredCompilation,            false);
+define_pd_global(intx,  CompileThreshold,             1000 ); // Design center runs on 1.3.1
 
-define_pd_global(intx, OnStackReplacePercentage,     1400 );
-define_pd_global(bool, UseTLAB,                      true );
-define_pd_global(bool, ProfileInterpreter,           false);
-define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(bool, ResizeTLAB,                   true );
-define_pd_global(intx, ReservedCodeCacheSize,        32*M );
-define_pd_global(intx, NonProfiledCodeHeapSize,      13*M );
-define_pd_global(intx, ProfiledCodeHeapSize,         14*M );
-define_pd_global(intx, NonNMethodCodeHeapSize,       5*M  );
-define_pd_global(intx, CodeCacheExpansionSize,       32*K );
-define_pd_global(uintx, CodeCacheMinBlockLength,     1);
-define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
-define_pd_global(size_t, MetaspaceSize,              12*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true );
-define_pd_global(size_t, NewSizeThreadIncrease,      16*K );
-define_pd_global(uint64_t, MaxRAM,                   1ULL*G);
-define_pd_global(intx, InitialCodeCacheSize,         160*K);
+define_pd_global(intx,  OnStackReplacePercentage,     1400 );
+define_pd_global(bool,  UseTLAB,                      true );
+define_pd_global(bool,  ProfileInterpreter,           false);
+define_pd_global(intx,  FreqInlineSize,               325  );
+define_pd_global(bool,  ResizeTLAB,                   true );
+define_pd_global(uintx, ReservedCodeCacheSize,        32*M );
+define_pd_global(uintx, NonProfiledCodeHeapSize,      13*M );
+define_pd_global(uintx, ProfiledCodeHeapSize,         14*M );
+define_pd_global(uintx, NonNMethodCodeHeapSize,       5*M  );
+define_pd_global(uintx, CodeCacheExpansionSize,       32*K );
+define_pd_global(uintx, CodeCacheMinBlockLength,      1);
+define_pd_global(uintx, CodeCacheMinimumUseSpace,     400*K);
+define_pd_global(size_t, MetaspaceSize,               12*M );
+define_pd_global(bool,  NeverActAsServerClassMachine, true );
+define_pd_global(size_t, NewSizeThreadIncrease,       16*K );
+define_pd_global(uint64_t, MaxRAM,                    1ULL*G);
+define_pd_global(uintx, InitialCodeCacheSize,         160*K);
 #endif // !TIERED
 
 define_pd_global(bool, UseTypeProfile,               false);
--- a/src/hotspot/cpu/sparc/c2_globals_sparc.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/sparc/c2_globals_sparc.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,12 +71,12 @@
 // sequence of instructions to load a 64 bit pointer.
 //
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,        48*M);
-define_pd_global(intx, NonProfiledCodeHeapSize,      21*M);
-define_pd_global(intx, ProfiledCodeHeapSize,         22*M);
-define_pd_global(intx, NonNMethodCodeHeapSize,       5*M );
-define_pd_global(intx, CodeCacheExpansionSize,       64*K);
+define_pd_global(uintx, InitialCodeCacheSize,        2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(uintx, ReservedCodeCacheSize,       48*M);
+define_pd_global(uintx, NonProfiledCodeHeapSize,     21*M);
+define_pd_global(uintx, ProfiledCodeHeapSize,        22*M);
+define_pd_global(uintx, NonNMethodCodeHeapSize,      5*M );
+define_pd_global(uintx, CodeCacheExpansionSize,      64*K);
 
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
--- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -107,8 +107,8 @@
 
 #ifdef ASSERT
   // read the value once
-  intptr_t data = method_holder->data();
-  address destination = jump->jump_destination();
+  volatile intptr_t data = method_holder->data();
+  volatile address destination = jump->jump_destination();
   assert(data == 0 || data == (intptr_t)callee(),
          "a) MT-unsafe modification of inline cache");
   assert(destination == (address)-1 || destination == entry,
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -837,6 +837,20 @@
       case BarrierSet::G1SATBCTLogging:
         // With G1, don't generate the call if we statically know that the target in uninitialized
         if (!dest_uninitialized) {
+          Register tmp = O5;
+          assert_different_registers(addr, count, tmp);
+          Label filtered;
+          // Is marking active?
+          if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+            __ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
+          } else {
+            guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
+                      "Assumption");
+            __ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
+          }
+          // Is marking active?
+          __ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
+
           __ save_frame(0);
           // Save the necessary global regs... will be used after.
           if (addr->is_global()) {
@@ -856,6 +870,9 @@
             __ mov(L1, count);
           }
           __ restore();
+
+          __ bind(filtered);
+          DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
         }
         break;
       case BarrierSet::CardTableForRS:
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1510,11 +1510,11 @@
 }
 
 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
-  assert(entry != NULL, "call most probably wrong");
   InstructionMark im(this);
   emit_int8((unsigned char)0xE8);
   intptr_t disp = entry - (pc() + sizeof(int32_t));
-  assert(is_simm32(disp), "must be 32bit offset (call2)");
+  // Entry is NULL in case of a scratch emit.
+  assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
   // Technically, should use call32_operand, but this format is
   // implied by the fact that we're emitting a call instruction.
 
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1543,10 +1543,10 @@
 
 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
   if (op->init_check()) {
+    add_debug_info_for_null_check_here(op->stub()->info());
     __ cmpb(Address(op->klass()->as_register(),
                     InstanceKlass::init_state_offset()),
                     InstanceKlass::fully_initialized);
-    add_debug_info_for_null_check_here(op->stub()->info());
     __ jcc(Assembler::notEqual, *op->stub()->entry());
   }
   __ allocate_object(op->obj()->as_register(),
@@ -2580,7 +2580,9 @@
     move_regs(lreg, rax);
 
     int idivl_offset = __ corrected_idivl(rreg);
-    add_debug_info_for_div0(idivl_offset, info);
+    if (ImplicitDiv0Checks) {
+      add_debug_info_for_div0(idivl_offset, info);
+    }
     if (code == lir_irem) {
       move_regs(rdx, dreg); // result is in rdx
     } else {
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -572,6 +572,8 @@
     if (!ImplicitDiv0Checks) {
       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
+      // Idiv/irem cannot trap (passing info would generate an assertion).
+      info = NULL;
     }
     LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
     if (x->op() == Bytecodes::_irem) {
--- a/src/hotspot/cpu/x86/c1_globals_x86.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/c1_globals_x86.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,15 +45,15 @@
 define_pd_global(intx,   OnStackReplacePercentage,     933  );
 define_pd_global(intx,   FreqInlineSize,               325  );
 define_pd_global(size_t, NewSizeThreadIncrease,        4*K  );
-define_pd_global(intx, InitialCodeCacheSize,           160*K);
-define_pd_global(intx, ReservedCodeCacheSize,          32*M );
-define_pd_global(intx, NonProfiledCodeHeapSize,        13*M );
-define_pd_global(intx, ProfiledCodeHeapSize,           14*M );
-define_pd_global(intx, NonNMethodCodeHeapSize,         5*M  );
+define_pd_global(uintx,  InitialCodeCacheSize,         160*K);
+define_pd_global(uintx,  ReservedCodeCacheSize,        32*M );
+define_pd_global(uintx,  NonProfiledCodeHeapSize,      13*M );
+define_pd_global(uintx,  ProfiledCodeHeapSize,         14*M );
+define_pd_global(uintx,  NonNMethodCodeHeapSize,       5*M  );
 define_pd_global(bool,   ProfileInterpreter,           false);
-define_pd_global(intx, CodeCacheExpansionSize,         32*K );
-define_pd_global(uintx, CodeCacheMinBlockLength,       1    );
-define_pd_global(uintx, CodeCacheMinimumUseSpace,      400*K);
+define_pd_global(uintx,  CodeCacheExpansionSize,       32*K );
+define_pd_global(uintx,  CodeCacheMinBlockLength,      1    );
+define_pd_global(uintx,  CodeCacheMinimumUseSpace,     400*K);
 define_pd_global(size_t, MetaspaceSize,                12*M );
 define_pd_global(bool,   NeverActAsServerClassMachine, true );
 define_pd_global(uint64_t, MaxRAM,                    1ULL*G);
--- a/src/hotspot/cpu/x86/c2_globals_x86.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/c2_globals_x86.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,26 +48,26 @@
 define_pd_global(intx, MinJumpTableSize,             10);
 define_pd_global(intx, LoopPercentProfileLimit,      30);
 #ifdef AMD64
-define_pd_global(intx, INTPRESSURE,                  13);
-define_pd_global(intx, FLOATPRESSURE,                14);
-define_pd_global(intx, InteriorEntryAlignment,       16);
-define_pd_global(size_t, NewSizeThreadIncrease,      ScaleForWordSize(4*K));
-define_pd_global(intx, LoopUnrollLimit,              60);
+define_pd_global(intx,  INTPRESSURE,                 13);
+define_pd_global(intx,  FLOATPRESSURE,               14);
+define_pd_global(intx,  InteriorEntryAlignment,      16);
+define_pd_global(size_t, NewSizeThreadIncrease,     ScaleForWordSize(4*K));
+define_pd_global(intx,  LoopUnrollLimit,             60);
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,         2496*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, CodeCacheExpansionSize,       64*K);
+define_pd_global(uintx, InitialCodeCacheSize,        2496*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(uintx, CodeCacheExpansionSize,      64*K);
 
 // Ergonomics related flags
 define_pd_global(uint64_t, MaxRAM,                   128ULL*G);
 #else
-define_pd_global(intx, INTPRESSURE,                  6);
-define_pd_global(intx, FLOATPRESSURE,                6);
-define_pd_global(intx, InteriorEntryAlignment,       4);
+define_pd_global(intx,  INTPRESSURE,                 6);
+define_pd_global(intx,  FLOATPRESSURE,               6);
+define_pd_global(intx,  InteriorEntryAlignment,      4);
 define_pd_global(size_t, NewSizeThreadIncrease,      4*K);
-define_pd_global(intx, LoopUnrollLimit,              50);     // Design center runs on 1.3.1
+define_pd_global(intx,  LoopUnrollLimit,             50);     // Design center runs on 1.3.1
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,         2304*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, CodeCacheExpansionSize,       32*K);
+define_pd_global(uintx, InitialCodeCacheSize,        2304*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(uintx, CodeCacheExpansionSize,      32*K);
 
 // Ergonomics related flags
 define_pd_global(uint64_t, MaxRAM,                   4ULL*G);
@@ -84,10 +84,10 @@
 define_pd_global(bool, SuperWordLoopUnrollAnalysis,  true);
 define_pd_global(bool, IdealizeClearArrayNode,       true);
 
-define_pd_global(intx, ReservedCodeCacheSize,        48*M);
-define_pd_global(intx, NonProfiledCodeHeapSize,      21*M);
-define_pd_global(intx, ProfiledCodeHeapSize,         22*M);
-define_pd_global(intx, NonNMethodCodeHeapSize,       5*M );
+define_pd_global(uintx, ReservedCodeCacheSize,       48*M);
+define_pd_global(uintx, NonProfiledCodeHeapSize,     21*M);
+define_pd_global(uintx, ProfiledCodeHeapSize,        22*M);
+define_pd_global(uintx, NonNMethodCodeHeapSize,      5*M );
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -160,8 +160,8 @@
 
 #ifdef ASSERT
   // read the value once
-  intptr_t data = method_holder->data();
-  address destination = jump->jump_destination();
+  volatile intptr_t data = method_holder->data();
+  volatile address destination = jump->jump_destination();
   assert(data == 0 || data == (intptr_t)callee(),
          "a) MT-unsafe modification of inline cache");
   assert(destination == (address)-1 || destination == entry,
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -516,6 +516,8 @@
   // Add in the index
   addptr(result, tmp);
   load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+  // The resulting oop is null if the reference is not yet resolved.
+  // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy.
 }
 
 // load cpool->resolved_klass_at(index)
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -836,7 +836,8 @@
   andq(rsp, -16);     // align stack as required by push_CPU_state and call
   push_CPU_state();   // keeps alignment at 16 bytes
   lea(c_rarg0, ExternalAddress((address) msg));
-  call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
+  lea(rax, ExternalAddress(CAST_FROM_FN_PTR(address, warning)));
+  call(rax);
   pop_CPU_state();
   mov(rsp, rbp);
   pop(rbp);
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -589,7 +589,7 @@
 
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
-  BLOCK_COMMENT("trace_method_handle {");
+  BLOCK_COMMENT(err_msg("trace_method_handle %s {", adaptername));
   __ enter();
   __ andptr(rsp, -16); // align stack if needed for FPU state
   __ pusha();
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@
   // We assume caller has already has return address slot on the stack
   // We push epb twice in this sequence because we want the real rbp,
   // to be under the return like a normal enter and we want to use pusha
-  // We push by hand instead of pusing push
+  // We push by hand instead of using push.
   __ enter();
   __ pusha();
   __ pushf();
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -679,10 +679,28 @@
       case BarrierSet::G1SATBCTLogging:
         // With G1, don't generate the call if we statically know that the target in uninitialized
         if (!uninitialized_target) {
+          Register thread = rax;
+          Label filtered;
+          __ push(thread);
+          __ get_thread(thread);
+          Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+                                               SATBMarkQueue::byte_offset_of_active()));
+          // Is marking active?
+          if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+            __ cmpl(in_progress, 0);
+          } else {
+            assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+            __ cmpb(in_progress, 0);
+          }
+          __ pop(thread);
+          __ jcc(Assembler::equal, filtered);
+
            __ pusha();                      // push registers
            __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
                            start, count);
            __ popa();
+
+           __ bind(filtered);
          }
         break;
       case BarrierSet::CardTableForRS:
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1201,6 +1201,18 @@
       case BarrierSet::G1SATBCTLogging:
         // With G1, don't generate the call if we statically know that the target in uninitialized
         if (!dest_uninitialized) {
+          Label filtered;
+          Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+                                                   SATBMarkQueue::byte_offset_of_active()));
+          // Is marking active?
+          if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+            __ cmpl(in_progress, 0);
+          } else {
+            assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+            __ cmpb(in_progress, 0);
+          }
+          __ jcc(Assembler::equal, filtered);
+
            __ pusha();                      // push registers
            if (count == c_rarg0) {
              if (addr == c_rarg1) {
@@ -1216,6 +1228,8 @@
            }
            __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
            __ popa();
+
+           __ bind(filtered);
         }
          break;
       case BarrierSet::CardTableForRS:
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -419,7 +419,7 @@
 void TemplateTable::ldc(bool wide) {
   transition(vtos, vtos);
   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
-  Label call_ldc, notFloat, notClass, Done;
+  Label call_ldc, notFloat, notClass, notInt, Done;
 
   if (wide) {
     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
@@ -465,19 +465,18 @@
   __ jmp(Done);
 
   __ bind(notFloat);
-#ifdef ASSERT
-  {
-    Label L;
-    __ cmpl(rdx, JVM_CONSTANT_Integer);
-    __ jcc(Assembler::equal, L);
-    // String and Object are rewritten to fast_aldc
-    __ stop("unexpected tag type in ldc");
-    __ bind(L);
-  }
-#endif
-  // itos JVM_CONSTANT_Integer only
+  __ cmpl(rdx, JVM_CONSTANT_Integer);
+  __ jccb(Assembler::notEqual, notInt);
+
+  // itos
   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
   __ push(itos);
+  __ jmp(Done);
+
+  // assume the tag is for condy; if not, the VM runtime will tell us
+  __ bind(notInt);
+  condy_helper(Done);
+
   __ bind(Done);
 }
 
@@ -487,6 +486,7 @@
 
   Register result = rax;
   Register tmp = rdx;
+  Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
   int index_size = wide ? sizeof(u2) : sizeof(u1);
 
   Label resolved;
@@ -496,17 +496,28 @@
   assert_different_registers(result, tmp);
   __ get_cache_index_at_bcp(tmp, 1, index_size);
   __ load_resolved_reference_at_index(result, tmp);
-  __ testl(result, result);
+  __ testptr(result, result);
   __ jcc(Assembler::notZero, resolved);
 
   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 
   // first time invocation - must resolve first
-  __ movl(tmp, (int)bytecode());
-  __ call_VM(result, entry, tmp);
-
+  __ movl(rarg, (int)bytecode());
+  __ call_VM(result, entry, rarg);
   __ bind(resolved);
 
+  { // Check for the null sentinel.
+    // If we just called the VM, that already did the mapping for us,
+    // but it's harmless to retry.
+    Label notNull;
+    ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
+    __ movptr(tmp, null_sentinel);
+    __ cmpptr(tmp, result);
+    __ jccb(Assembler::notEqual, notNull);
+    __ xorptr(result, result);  // NULL object reference
+    __ bind(notNull);
+  }
+
   if (VerifyOops) {
     __ verify_oop(result);
   }
@@ -514,7 +525,7 @@
 
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
-  Label Long, Done;
+  Label notDouble, notLong, Done;
   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 
   __ get_cpool_and_tags(rcx, rax);
@@ -522,25 +533,143 @@
   const int tags_offset = Array<u1>::base_offset_in_bytes();
 
   // get type
-  __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
-          JVM_CONSTANT_Double);
-  __ jccb(Assembler::notEqual, Long);
+  __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
+  __ cmpl(rdx, JVM_CONSTANT_Double);
+  __ jccb(Assembler::notEqual, notDouble);
 
   // dtos
   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
   __ push(dtos);
 
-  __ jmpb(Done);
-  __ bind(Long);
+  __ jmp(Done);
+  __ bind(notDouble);
+  __ cmpl(rdx, JVM_CONSTANT_Long);
+  __ jccb(Assembler::notEqual, notLong);
 
   // ltos
   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
   __ push(ltos);
+  __ jmp(Done);
+
+  __ bind(notLong);
+  condy_helper(Done);
 
   __ bind(Done);
 }
 
+void TemplateTable::condy_helper(Label& Done) {
+  const Register obj = rax;
+  const Register off = rbx;
+  const Register flags = rcx;
+  const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
+  __ movl(rarg, (int)bytecode());
+  call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
+#ifndef _LP64
+  // borrow rdi from locals
+  __ get_thread(rdi);
+  __ get_vm_result_2(flags, rdi);
+  __ restore_locals();
+#else
+  __ get_vm_result_2(flags, r15_thread);
+#endif
+  // VMr = obj = base address to find primitive value to push
+  // VMr2 = flags = (tos, off) using format of CPCE::_flags
+  __ movl(off, flags);
+  __ andl(off, ConstantPoolCacheEntry::field_index_mask);
+  const Address field(obj, off, Address::times_1, 0*wordSize);
+
+  // What sort of thing are we loading?
+  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
+  __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
+
+  switch (bytecode()) {
+  case Bytecodes::_ldc:
+  case Bytecodes::_ldc_w:
+    {
+      // tos in (itos, ftos, stos, btos, ctos, ztos)
+      Label notInt, notFloat, notShort, notByte, notChar, notBool;
+      __ cmpl(flags, itos);
+      __ jcc(Assembler::notEqual, notInt);
+      // itos
+      __ movl(rax, field);
+      __ push(itos);
+      __ jmp(Done);
+
+      __ bind(notInt);
+      __ cmpl(flags, ftos);
+      __ jcc(Assembler::notEqual, notFloat);
+      // ftos
+      __ load_float(field);
+      __ push(ftos);
+      __ jmp(Done);
+
+      __ bind(notFloat);
+      __ cmpl(flags, stos);
+      __ jcc(Assembler::notEqual, notShort);
+      // stos
+      __ load_signed_short(rax, field);
+      __ push(stos);
+      __ jmp(Done);
+
+      __ bind(notShort);
+      __ cmpl(flags, btos);
+      __ jcc(Assembler::notEqual, notByte);
+      // btos
+      __ load_signed_byte(rax, field);
+      __ push(btos);
+      __ jmp(Done);
+
+      __ bind(notByte);
+      __ cmpl(flags, ctos);
+      __ jcc(Assembler::notEqual, notChar);
+      // ctos
+      __ load_unsigned_short(rax, field);
+      __ push(ctos);
+      __ jmp(Done);
+
+      __ bind(notChar);
+      __ cmpl(flags, ztos);
+      __ jcc(Assembler::notEqual, notBool);
+      // ztos
+      __ load_signed_byte(rax, field);
+      __ push(ztos);
+      __ jmp(Done);
+
+      __ bind(notBool);
+      break;
+    }
+
+  case Bytecodes::_ldc2_w:
+    {
+      Label notLong, notDouble;
+      __ cmpl(flags, ltos);
+      __ jcc(Assembler::notEqual, notLong);
+      // ltos
+      __ movptr(rax, field);
+      NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
+      __ push(ltos);
+      __ jmp(Done);
+
+      __ bind(notLong);
+      __ cmpl(flags, dtos);
+      __ jcc(Assembler::notEqual, notDouble);
+      // dtos
+      __ load_double(field);
+      __ push(dtos);
+      __ jmp(Done);
+
+      __ bind(notDouble);
+      break;
+    }
+
+  default:
+    ShouldNotReachHere();
+  }
+
+  __ stop("bad ldc/condy");
+}
+
 void TemplateTable::locals_index(Register reg, int offset) {
   __ load_unsigned_byte(reg, at_bcp(offset));
   __ negptr(reg);
--- a/src/hotspot/cpu/x86/x86_32.ad	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/cpu/x86/x86_32.ad	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 //----------REGISTER DEFINITION BLOCK------------------------------------------
 // This information is used by the matcher and the register allocator to
 // describe individual registers and classes of registers within the target
-// archtecture.
+// architecture.
 
 register %{
 //----------Architecture Description Register Definitions----------------------
--- a/src/hotspot/os/aix/libperfstat_aix.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/aix/libperfstat_aix.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #include "misc_aix.hpp"
 
 #include <dlfcn.h>
-#include <sys/systemcfg.h>
 
 // Handle to the libperfstat.
 static void* g_libhandle = NULL;
@@ -158,17 +157,6 @@
 
 //////////////////// convenience functions, release-independent /////////////////////////////
 
-// Excerpts from systemcfg.h definitions newer than AIX 5.3 (our oldest build platform)
-
-#define PV_6 0x100000          /* Power PC 6 */
-#define PV_6_1 0x100001        /* Power PC 6 DD1.x */
-#define PV_7 0x200000          /* Power PC 7 */
-#define PV_5_Compat 0x0F8000   /* Power PC 5 */
-#define PV_6_Compat 0x108000   /* Power PC 6 */
-#define PV_7_Compat 0x208000   /* Power PC 7 */
-#define PV_8 0x300000          /* Power PC 8 */
-#define PV_8_Compat 0x308000   /* Power PC 8 */
-
 
 // Retrieve global cpu information.
 bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
@@ -191,7 +179,7 @@
   }
 
   // Global cpu information.
-  strcpy (pci->description, psct.description);
+  strcpy(pci->description, psct.description);
   pci->processorHZ = psct.processorHZ;
   pci->ncpus = psct.ncpus;
   for (int i = 0; i < 3; i++) {
@@ -203,45 +191,6 @@
   pci->idle_clock_ticks = psct.idle;
   pci->wait_clock_ticks = psct.wait;
 
-  // Get the processor version from _system_configuration.
-  switch (_system_configuration.version) {
-  case PV_8:
-    strcpy(pci->version, "Power PC 8");
-    break;
-  case PV_7:
-    strcpy(pci->version, "Power PC 7");
-    break;
-  case PV_6_1:
-    strcpy(pci->version, "Power PC 6 DD1.x");
-    break;
-  case PV_6:
-    strcpy(pci->version, "Power PC 6");
-    break;
-  case PV_5:
-    strcpy(pci->version, "Power PC 5");
-    break;
-  case PV_5_2:
-    strcpy(pci->version, "Power PC 5_2");
-    break;
-  case PV_5_3:
-    strcpy(pci->version, "Power PC 5_3");
-    break;
-  case PV_5_Compat:
-    strcpy(pci->version, "PV_5_Compat");
-    break;
-  case PV_6_Compat:
-    strcpy(pci->version, "PV_6_Compat");
-    break;
-  case PV_7_Compat:
-    strcpy(pci->version, "PV_7_Compat");
-    break;
-  case PV_8_Compat:
-    strcpy(pci->version, "PV_8_Compat");
-    break;
-  default:
-    strcpy(pci->version, "unknown");
-  }
-
   return true;
 }
 
--- a/src/hotspot/os/aix/libperfstat_aix.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/aix/libperfstat_aix.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -942,7 +942,6 @@
     int ncpus;                            // number of active logical processors
     double loadavg[3];                    // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
                                           // To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
-    char version[20];                     // processor version from _system_configuration (sys/systemcfg.h)
     unsigned long long user_clock_ticks;  // raw total number of clock ticks spent in user mode
     unsigned long long sys_clock_ticks;   // raw total number of clock ticks spent in system mode
     unsigned long long idle_clock_ticks;  // raw total number of clock ticks spent idle
@@ -965,7 +964,6 @@
   static bool get_partitioninfo(partitioninfo_t* ppi);
   static bool get_cpuinfo(cpuinfo_t* pci);
   static bool get_wparinfo(wparinfo_t* pwi);
-
 };
 
 #endif // OS_AIX_VM_LIBPERFSTAT_AIX_HPP
--- a/src/hotspot/os/aix/os_aix.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/aix/os_aix.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -117,7 +117,7 @@
 #if !defined(_AIXVERSION_610)
 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
-extern "C" int getargs   (procsinfo*, int, char*, int);
+extern "C" int getargs(procsinfo*, int, char*, int);
 #endif
 
 #define MAX_PATH (2 * K)
@@ -130,6 +130,32 @@
 #define ERROR_MP_VMGETINFO_FAILED                    102
 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 
+// excerpts from systemcfg.h that might be missing on older os levels
+#ifndef PV_5_Compat
+  #define PV_5_Compat 0x0F8000   /* Power PC 5 */
+#endif
+#ifndef PV_6
+  #define PV_6 0x100000          /* Power PC 6 */
+#endif
+#ifndef PV_6_1
+  #define PV_6_1 0x100001        /* Power PC 6 DD1.x */
+#endif
+#ifndef PV_6_Compat
+  #define PV_6_Compat 0x108000   /* Power PC 6 */
+#endif
+#ifndef PV_7
+  #define PV_7 0x200000          /* Power PC 7 */
+#endif
+#ifndef PV_7_Compat
+  #define PV_7_Compat 0x208000   /* Power PC 7 */
+#endif
+#ifndef PV_8
+  #define PV_8 0x300000          /* Power PC 8 */
+#endif
+#ifndef PV_8_Compat
+  #define PV_8_Compat 0x308000   /* Power PC 8 */
+#endif
+
 static address resolve_function_descriptor_to_code_pointer(address p);
 
 static void vmembk_print_on(outputStream* os);
@@ -1443,17 +1469,48 @@
 
 // Get a string for the cpuinfo that is a summary of the cpu type
 void os::get_summary_cpu_info(char* buf, size_t buflen) {
-  // This looks good
-  libperfstat::cpuinfo_t ci;
-  if (libperfstat::get_cpuinfo(&ci)) {
-    strncpy(buf, ci.version, buflen);
-  } else {
-    strncpy(buf, "AIX", buflen);
+  // read _system_configuration.version
+  switch (_system_configuration.version) {
+  case PV_8:
+    strncpy(buf, "Power PC 8", buflen);
+    break;
+  case PV_7:
+    strncpy(buf, "Power PC 7", buflen);
+    break;
+  case PV_6_1:
+    strncpy(buf, "Power PC 6 DD1.x", buflen);
+    break;
+  case PV_6:
+    strncpy(buf, "Power PC 6", buflen);
+    break;
+  case PV_5:
+    strncpy(buf, "Power PC 5", buflen);
+    break;
+  case PV_5_2:
+    strncpy(buf, "Power PC 5_2", buflen);
+    break;
+  case PV_5_3:
+    strncpy(buf, "Power PC 5_3", buflen);
+    break;
+  case PV_5_Compat:
+    strncpy(buf, "PV_5_Compat", buflen);
+    break;
+  case PV_6_Compat:
+    strncpy(buf, "PV_6_Compat", buflen);
+    break;
+  case PV_7_Compat:
+    strncpy(buf, "PV_7_Compat", buflen);
+    break;
+  case PV_8_Compat:
+    strncpy(buf, "PV_8_Compat", buflen);
+    break;
+  default:
+    strncpy(buf, "unknown", buflen);
   }
 }
 
 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
-  // Nothing to do beyond what os::print_cpu_info() does.
+  // Nothing to do beyond of what os::print_cpu_info() does.
 }
 
 static void print_signal_handler(outputStream* st, int sig,
@@ -4242,48 +4299,6 @@
   return -1;
 }
 
-// is_headless_jre()
-//
-// Test for the existence of xawt/libmawt.so or libawt_xawt.so
-// in order to report if we are running in a headless jre.
-//
-// Since JDK8 xawt/libmawt.so is moved into the same directory
-// as libawt.so, and renamed libawt_xawt.so
-bool os::is_headless_jre() {
-  struct stat statbuf;
-  char buf[MAXPATHLEN];
-  char libmawtpath[MAXPATHLEN];
-  const char *xawtstr = "/xawt/libmawt.so";
-  const char *new_xawtstr = "/libawt_xawt.so";
-
-  char *p;
-
-  // Get path to libjvm.so
-  os::jvm_path(buf, sizeof(buf));
-
-  // Get rid of libjvm.so
-  p = strrchr(buf, '/');
-  if (p == NULL) return false;
-  else *p = '\0';
-
-  // Get rid of client or server
-  p = strrchr(buf, '/');
-  if (p == NULL) return false;
-  else *p = '\0';
-
-  // check xawt/libmawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  // check libawt_xawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, new_xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  return true;
-}
-
 // Get the default path to the core file
 // Returns the length of the string
 int os::get_core_path(char* buffer, size_t bufferSize) {
--- a/src/hotspot/os/bsd/os_bsd.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -3894,59 +3894,6 @@
   }
 }
 
-// is_headless_jre()
-//
-// Test for the existence of xawt/libmawt.so or libawt_xawt.so
-// in order to report if we are running in a headless jre
-//
-// Since JDK8 xawt/libmawt.so was moved into the same directory
-// as libawt.so, and renamed libawt_xawt.so
-//
-bool os::is_headless_jre() {
-#ifdef __APPLE__
-  // We no longer build headless-only on Mac OS X
-  return false;
-#else
-  struct stat statbuf;
-  char buf[MAXPATHLEN];
-  char libmawtpath[MAXPATHLEN];
-  const char *xawtstr  = "/xawt/libmawt" JNI_LIB_SUFFIX;
-  const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
-  char *p;
-
-  // Get path to libjvm.so
-  os::jvm_path(buf, sizeof(buf));
-
-  // Get rid of libjvm.so
-  p = strrchr(buf, '/');
-  if (p == NULL) {
-    return false;
-  } else {
-    *p = '\0';
-  }
-
-  // Get rid of client or server
-  p = strrchr(buf, '/');
-  if (p == NULL) {
-    return false;
-  } else {
-    *p = '\0';
-  }
-
-  // check xawt/libmawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  // check libawt_xawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, new_xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  return true;
-#endif
-}
-
 // Get the default path to the core file
 // Returns the length of the string
 int os::get_core_path(char* buffer, size_t bufferSize) {
--- a/src/hotspot/os/linux/os_linux.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -5690,54 +5690,6 @@
   }
 }
 
-// is_headless_jre()
-//
-// Test for the existence of xawt/libmawt.so or libawt_xawt.so
-// in order to report if we are running in a headless jre
-//
-// Since JDK8 xawt/libmawt.so was moved into the same directory
-// as libawt.so, and renamed libawt_xawt.so
-//
-bool os::is_headless_jre() {
-  struct stat statbuf;
-  char buf[MAXPATHLEN];
-  char libmawtpath[MAXPATHLEN];
-  const char *xawtstr  = "/xawt/libmawt.so";
-  const char *new_xawtstr = "/libawt_xawt.so";
-  char *p;
-
-  // Get path to libjvm.so
-  os::jvm_path(buf, sizeof(buf));
-
-  // Get rid of libjvm.so
-  p = strrchr(buf, '/');
-  if (p == NULL) {
-    return false;
-  } else {
-    *p = '\0';
-  }
-
-  // Get rid of client or server
-  p = strrchr(buf, '/');
-  if (p == NULL) {
-    return false;
-  } else {
-    *p = '\0';
-  }
-
-  // check xawt/libmawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  // check libawt_xawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, new_xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  return true;
-}
-
 // Get the default path to the core file
 // Returns the length of the string
 int os::get_core_path(char* buffer, size_t bufferSize) {
--- a/src/hotspot/os/linux/perfMemory_linux.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/linux/perfMemory_linux.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -147,13 +147,26 @@
 // which is always a local file system and is sometimes a RAM based file
 // system.
 
+
 // return the user specific temporary directory name.
 //
+// If containerized process, get dirname of
+// /proc/{vmid}/root/tmp/{PERFDATA_NAME_user}
+// otherwise /tmp/{PERFDATA_NAME_user}
+//
 // the caller is expected to free the allocated memory.
 //
-static char* get_user_tmp_dir(const char* user) {
+#define TMP_BUFFER_LEN (4+22)
+static char* get_user_tmp_dir(const char* user, int vmid, int nspid) {
+  char buffer[TMP_BUFFER_LEN];
+  char* tmpdir = (char *)os::get_temp_directory();
+  assert(strlen(tmpdir) == 4, "No longer using /tmp - update buffer size");
 
-  const char* tmpdir = os::get_temp_directory();
+  if (nspid != -1) {
+    jio_snprintf(buffer, TMP_BUFFER_LEN, "/proc/%d/root%s", vmid, tmpdir);
+    tmpdir = buffer;
+  }
+
   const char* perfdir = PERFDATA_NAME;
   size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
@@ -502,7 +515,10 @@
 //
 // the caller is expected to free the allocated memory.
 //
-static char* get_user_name_slow(int vmid, TRAPS) {
+// If nspid != -1, look in /proc/{vmid}/root/tmp for directories
+// containing nspid, otherwise just look for vmid in /tmp
+//
+static char* get_user_name_slow(int vmid, int nspid, TRAPS) {
 
   // short circuit the directory search if the process doesn't even exist.
   if (kill(vmid, 0) == OS_ERR) {
@@ -518,8 +534,19 @@
   // directory search
   char* oldest_user = NULL;
   time_t oldest_ctime = 0;
+  char buffer[TMP_BUFFER_LEN];
+  int searchpid;
+  char* tmpdirname = (char *)os::get_temp_directory();
+  assert(strlen(tmpdirname) == 4, "No longer using /tmp - update buffer size");
 
-  const char* tmpdirname = os::get_temp_directory();
+  if (nspid == -1) {
+    searchpid = vmid;
+  }
+  else {
+    jio_snprintf(buffer, MAXPATHLEN, "/proc/%d/root%s", vmid, tmpdirname);
+    tmpdirname = buffer;
+    searchpid = nspid;
+  }
 
   // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
@@ -530,7 +557,7 @@
   }
 
   // for each entry in the directory that matches the pattern hsperfdata_*,
-  // open the directory and check if the file for the given vmid exists.
+  // open the directory and check if the file for the given vmid or nspid exists.
   // The file with the expected name and the latest creation date is used
   // to determine the user name for the process id.
   //
@@ -575,7 +602,7 @@
     errno = 0;
     while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
 
-      if (filename_to_pid(udentry->d_name) == vmid) {
+      if (filename_to_pid(udentry->d_name) == searchpid) {
         struct stat statbuf;
         int result;
 
@@ -626,10 +653,51 @@
   return(oldest_user);
 }
 
+// Determine if the vmid is the parent pid
+// for a child in a PID namespace.
+// return the namespace pid if so, otherwise -1
+static int get_namespace_pid(int vmid) {
+  char fname[24];
+  int retpid = -1;
+
+  snprintf(fname, sizeof(fname), "/proc/%d/status", vmid);
+  FILE *fp = fopen(fname, "r");
+
+  if (fp) {
+    int pid, nspid;
+    int ret;
+    while (!feof(fp)) {
+      ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid);
+      if (ret == 1) {
+        break;
+      }
+      if (ret == 2) {
+        retpid = nspid;
+        break;
+      }
+      for (;;) {
+        int ch = fgetc(fp);
+        if (ch == EOF || ch == (int)'\n') break;
+      }
+    }
+    fclose(fp);
+  }
+  return retpid;
+}
+
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
-static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, THREAD);
+static char* get_user_name(int vmid, int *nspid, TRAPS) {
+  char *result = get_user_name_slow(vmid, *nspid, THREAD);
+
+  // If we are examining a container process without PID namespaces enabled
+  // we need to use /proc/{pid}/root/tmp to find hsperfdata files.
+  if (result == NULL) {
+    result = get_user_name_slow(vmid, vmid, THREAD);
+    // Enable nspid logic going forward
+    if (result != NULL) *nspid = vmid;
+  }
+  return result;
 }
 
 // return the file name of the backing store file for the named
@@ -637,13 +705,15 @@
 //
 // the caller is expected to free the allocated memory.
 //
-static char* get_sharedmem_filename(const char* dirname, int vmid) {
+static char* get_sharedmem_filename(const char* dirname, int vmid, int nspid) {
+
+  int pid = (nspid == -1) ? vmid : nspid;
 
   // add 2 for the file separator and a null terminator.
   size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
 
   char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-  snprintf(name, nbytes, "%s/%d", dirname, vmid);
+  snprintf(name, nbytes, "%s/%d", dirname, pid);
 
   return name;
 }
@@ -940,8 +1010,8 @@
   if (user_name == NULL)
     return NULL;
 
-  char* dirname = get_user_tmp_dir(user_name);
-  char* filename = get_sharedmem_filename(dirname, vmid);
+  char* dirname = get_user_tmp_dir(user_name, vmid, -1);
+  char* filename = get_sharedmem_filename(dirname, vmid, -1);
 
   // get the short filename
   char* short_filename = strrchr(filename, '/');
@@ -1088,8 +1158,11 @@
               "Illegal access mode");
   }
 
+  // determine if vmid is for a containerized process
+  int nspid = get_namespace_pid(vmid);
+
   if (user == NULL || strlen(user) == 0) {
-    luser = get_user_name(vmid, CHECK);
+    luser = get_user_name(vmid, &nspid, CHECK);
   }
   else {
     luser = user;
@@ -1100,7 +1173,7 @@
               "Could not map vmid to user Name");
   }
 
-  char* dirname = get_user_tmp_dir(luser);
+  char* dirname = get_user_tmp_dir(luser, vmid, nspid);
 
   // since we don't follow symbolic links when creating the backing
   // store file, we don't follow them when attaching either.
@@ -1114,7 +1187,7 @@
               "Process not found");
   }
 
-  char* filename = get_sharedmem_filename(dirname, vmid);
+  char* filename = get_sharedmem_filename(dirname, vmid, nspid);
 
   // copy heap memory to resource memory. the open_sharedmem_file
   // method below need to use the filename, but could throw an
--- a/src/hotspot/os/posix/vmError_posix.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/posix/vmError_posix.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "memory/filemap.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
@@ -153,8 +153,7 @@
     if (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) {
       const void* const fault_addr = si->si_addr;
       if (fault_addr != NULL) {
-        FileMapInfo* const mapinfo = FileMapInfo::current_info();
-        if (mapinfo->is_in_shared_space(fault_addr)) {
+        if (MetaspaceShared::is_in_shared_metaspace(fault_addr)) {
           st->print("Error accessing class data sharing archive. "
             "Mapped file inaccessible during execution, possible disk/network problem.");
         }
--- a/src/hotspot/os/solaris/os_solaris.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -5366,54 +5366,6 @@
   }
 }
 
-// is_headless_jre()
-//
-// Test for the existence of xawt/libmawt.so or libawt_xawt.so
-// in order to report if we are running in a headless jre
-//
-// Since JDK8 xawt/libmawt.so was moved into the same directory
-// as libawt.so, and renamed libawt_xawt.so
-//
-bool os::is_headless_jre() {
-  struct stat statbuf;
-  char buf[MAXPATHLEN];
-  char libmawtpath[MAXPATHLEN];
-  const char *xawtstr  = "/xawt/libmawt.so";
-  const char *new_xawtstr = "/libawt_xawt.so";
-  char *p;
-
-  // Get path to libjvm.so
-  os::jvm_path(buf, sizeof(buf));
-
-  // Get rid of libjvm.so
-  p = strrchr(buf, '/');
-  if (p == NULL) {
-    return false;
-  } else {
-    *p = '\0';
-  }
-
-  // Get rid of client or server
-  p = strrchr(buf, '/');
-  if (p == NULL) {
-    return false;
-  } else {
-    *p = '\0';
-  }
-
-  // check xawt/libmawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  // check libawt_xawt.so
-  strcpy(libmawtpath, buf);
-  strcat(libmawtpath, new_xawtstr);
-  if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-  return true;
-}
-
 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
   size_t res;
   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
--- a/src/hotspot/os/windows/os_windows.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/windows/os_windows.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -5262,9 +5262,6 @@
   return EXCEPTION_CONTINUE_SEARCH;
 }
 
-// We don't build a headless jre for Windows
-bool os::is_headless_jre() { return false; }
-
 static jint initSock() {
   WSADATA wsadata;
 
--- a/src/hotspot/os/windows/vmError_windows.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os/windows/vmError_windows.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "memory/filemap.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
@@ -58,8 +58,7 @@
         er->NumberParameters >= 2) {
       const void* const fault_addr = (const void*) er->ExceptionInformation[1];
       if (fault_addr != NULL) {
-        FileMapInfo* const mapinfo = FileMapInfo::current_info();
-        if (mapinfo->is_in_shared_space(fault_addr)) {
+        if (MetaspaceShared::is_in_shared_metaspace(fault_addr)) {
           st->print("Error accessing class data sharing archive. "
             "Mapped file inaccessible during execution, possible disk/network problem.");
         }
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,6 +111,10 @@
   uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc;
 }
 
+static address ucontext_get_lr(const ucontext_t * uc) {
+  return (address)uc->uc_mcontext.jmp_context.lr;
+}
+
 ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                                         intptr_t** ret_sp, intptr_t** ret_fp) {
 
@@ -167,7 +171,8 @@
       return false;
     } else {
       intptr_t* sp = os::Aix::ucontext_get_sp(uc);
-      *fr = frame(sp, (address)*sp);
+      address lr = ucontext_get_lr(uc);
+      *fr = frame(sp, lr);
       if (!fr->is_java_frame()) {
         assert(fr->safe_for_sender(thread), "Safety check");
         assert(!fr->is_first_frame(), "Safety check");
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -279,11 +279,11 @@
 address os::current_stack_pointer() {
 #if defined(__clang__) || defined(__llvm__)
   register void *esp;
-  __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
+  __asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
   return (address) esp;
 #elif defined(SPARC_WORKS)
   register void *esp;
-  __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
+  __asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
   return (address) ((char*)esp + sizeof(long)*2);
 #else
   register void *esp __asm__ (SPELL_REG_SP);
@@ -415,7 +415,7 @@
 intptr_t* _get_previous_fp() {
 #if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
   register intptr_t **ebp;
-  __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
+  __asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
 #else
   register intptr_t **ebp __asm__ (SPELL_REG_FP);
 #endif
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,6 +121,10 @@
   uc->uc_mcontext.regs->nip = (unsigned long)pc;
 }
 
+static address ucontext_get_lr(const ucontext_t * uc) {
+  return (address)uc->uc_mcontext.regs->link;
+}
+
 intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
   return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
 }
@@ -178,9 +182,9 @@
       // the frame is complete.
       return false;
     } else {
-      intptr_t* fp = os::Linux::ucontext_get_fp(uc);
       intptr_t* sp = os::Linux::ucontext_get_sp(uc);
-      *fr = frame(sp, (address)*sp);
+      address lr = ucontext_get_lr(uc);
+      *fr = frame(sp, lr);
       if (!fr->is_java_frame()) {
         assert(fr->safe_for_sender(thread), "Safety check");
         assert(!fr->is_first_frame(), "Safety check");
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,6 +108,10 @@
   uc->uc_mcontext.psw.addr = (unsigned long)pc;
 }
 
+static address ucontext_get_lr(const ucontext_t * uc) {
+  return (address)uc->uc_mcontext.gregs[14/*LINK*/];
+}
+
 intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
   return (intptr_t*)uc->uc_mcontext.gregs[15/*REG_SP*/];
 }
@@ -165,9 +169,9 @@
       // the frame is complete.
       return false;
     } else {
-      intptr_t* fp = os::Linux::ucontext_get_fp(uc);
       intptr_t* sp = os::Linux::ucontext_get_sp(uc);
-      *fr = frame(sp, (address)*sp);
+      address lr = ucontext_get_lr(uc);
+      *fr = frame(sp, lr);
       if (!fr->is_java_frame()) {
         assert(fr->safe_for_sender(thread), "Safety check");
         assert(!fr->is_first_frame(), "Safety check");
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -874,6 +874,8 @@
 void GraphBuilder::load_constant() {
   ciConstant con = stream()->get_constant();
   if (con.basic_type() == T_ILLEGAL) {
+    // FIXME: an unresolved Dynamic constant can get here,
+    // and that should not terminate the whole compilation.
     BAILOUT("could not resolve a constant");
   } else {
     ValueType* t = illegalType;
@@ -893,11 +895,19 @@
         ciObject* obj = con.as_object();
         if (!obj->is_loaded()
             || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
+          // A Class, MethodType, MethodHandle, or String.
+          // Unloaded condy nodes show up as T_ILLEGAL, above.
           patch_state = copy_state_before();
           t = new ObjectConstant(obj);
         } else {
-          assert(obj->is_instance(), "must be java_mirror of klass");
-          t = new InstanceConstant(obj->as_instance());
+          // Might be a Class, MethodType, MethodHandle, or Dynamic constant
+          // result, which might turn out to be an array.
+          if (obj->is_null_object())
+            t = objectNull;
+          else if (obj->is_array())
+            t = new ArrayConstant(obj->as_array());
+          else
+            t = new InstanceConstant(obj->as_instance());
         }
         break;
        }
--- a/src/hotspot/share/ci/ciEnv.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/ci/ciEnv.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -584,8 +584,34 @@
   int index = pool_index;
   if (cache_index >= 0) {
     assert(index < 0, "only one kind of index at a time");
+    index = cpool->object_to_cp_index(cache_index);
     oop obj = cpool->resolved_references()->obj_at(cache_index);
     if (obj != NULL) {
+      if (obj == Universe::the_null_sentinel()) {
+        return ciConstant(T_OBJECT, get_object(NULL));
+      }
+      BasicType bt = T_OBJECT;
+      if (cpool->tag_at(index).is_dynamic_constant())
+        bt = FieldType::basic_type(cpool->uncached_signature_ref_at(index));
+      if (is_reference_type(bt)) {
+      } else {
+        // we have to unbox the primitive value
+        if (!is_java_primitive(bt))  return ciConstant();
+        jvalue value;
+        BasicType bt2 = java_lang_boxing_object::get_value(obj, &value);
+        assert(bt2 == bt, "");
+        switch (bt2) {
+        case T_DOUBLE:  return ciConstant(value.d);
+        case T_FLOAT:   return ciConstant(value.f);
+        case T_LONG:    return ciConstant(value.j);
+        case T_INT:     return ciConstant(bt2, value.i);
+        case T_SHORT:   return ciConstant(bt2, value.s);
+        case T_BYTE:    return ciConstant(bt2, value.b);
+        case T_CHAR:    return ciConstant(bt2, value.c);
+        case T_BOOLEAN: return ciConstant(bt2, value.z);
+        default:  return ciConstant();
+        }
+      }
       ciObject* ciobj = get_object(obj);
       if (ciobj->is_array()) {
         return ciConstant(T_ARRAY, ciobj);
@@ -594,7 +620,6 @@
         return ciConstant(T_OBJECT, ciobj);
       }
     }
-    index = cpool->object_to_cp_index(cache_index);
   }
   constantTag tag = cpool->tag_at(index);
   if (tag.is_int()) {
@@ -650,6 +675,8 @@
     ciSymbol* signature = get_symbol(cpool->method_handle_signature_ref_at(index));
     ciObject* ciobj     = get_unloaded_method_handle_constant(callee, name, signature, ref_kind);
     return ciConstant(T_OBJECT, ciobj);
+  } else if (tag.is_dynamic_constant()) {
+    return ciConstant();
   } else {
     ShouldNotReachHere();
     return ciConstant();
--- a/src/hotspot/share/ci/ciReplay.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/ci/ciReplay.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -721,6 +721,7 @@
         case JVM_CONSTANT_Float:
         case JVM_CONSTANT_MethodHandle:
         case JVM_CONSTANT_MethodType:
+        case JVM_CONSTANT_Dynamic:
         case JVM_CONSTANT_InvokeDynamic:
           if (tag != cp->tag_at(i).value()) {
             report_error("tag mismatch: wrong class files?");
--- a/src/hotspot/share/ci/ciStreams.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/ci/ciStreams.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -254,7 +254,8 @@
 // constant.
 constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
   VM_ENTRY_MARK;
-  return _method->get_Method()->constants()->tag_at(index);
+  BasicType bt = _method->get_Method()->constants()->basic_type_for_constant_at(index);
+  return constantTag::ofBasicType(bt);
 }
 
 // ------------------------------------------------------------------
--- a/src/hotspot/share/classfile/classFileParser.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -204,6 +204,21 @@
         }
         break;
       }
+      case JVM_CONSTANT_Dynamic : {
+        if (_major_version < Verifier::DYNAMICCONSTANT_MAJOR_VERSION) {
+          classfile_parse_error(
+              "Class file version does not support constant tag %u in class file %s",
+              tag, CHECK);
+        }
+        cfs->guarantee_more(5, CHECK);  // bsm_index, nt, tag/access_flags
+        const u2 bootstrap_specifier_index = cfs->get_u2_fast();
+        const u2 name_and_type_index = cfs->get_u2_fast();
+        if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) {
+          _max_bootstrap_specifier_index = (int) bootstrap_specifier_index;  // collect for later
+        }
+        cp->dynamic_constant_at_put(index, bootstrap_specifier_index, name_and_type_index);
+        break;
+      }
       case JVM_CONSTANT_InvokeDynamic : {
         if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
           classfile_parse_error(
@@ -536,6 +551,21 @@
           ref_index, CHECK);
         break;
       }
+      case JVM_CONSTANT_Dynamic: {
+        const int name_and_type_ref_index =
+          cp->invoke_dynamic_name_and_type_ref_index_at(index);
+
+        check_property(valid_cp_range(name_and_type_ref_index, length) &&
+          cp->tag_at(name_and_type_ref_index).is_name_and_type(),
+          "Invalid constant pool index %u in class file %s",
+          name_and_type_ref_index, CHECK);
+        // bootstrap specifier index must be checked later,
+        // when BootstrapMethods attr is available
+
+        // Mark the constant pool as having a CONSTANT_Dynamic_info structure
+        cp->set_has_dynamic_constant();
+        break;
+      }
       case JVM_CONSTANT_InvokeDynamic: {
         const int name_and_type_ref_index =
           cp->invoke_dynamic_name_and_type_ref_index_at(index);
@@ -628,6 +658,27 @@
         }
         break;
       }
+      case JVM_CONSTANT_Dynamic: {
+        const int name_and_type_ref_index =
+          cp->name_and_type_ref_index_at(index);
+        // already verified to be utf8
+        const int name_ref_index =
+          cp->name_ref_index_at(name_and_type_ref_index);
+        // already verified to be utf8
+        const int signature_ref_index =
+          cp->signature_ref_index_at(name_and_type_ref_index);
+        const Symbol* const name = cp->symbol_at(name_ref_index);
+        const Symbol* const signature = cp->symbol_at(signature_ref_index);
+        if (_need_verify) {
+          // CONSTANT_Dynamic's name and signature are verified above, when iterating NameAndType_info.
+          // Need only to be sure signature is non-zero length and the right type.
+          if (signature->utf8_length() == 0 ||
+              signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
+            throwIllegalSignature("CONSTANT_Dynamic", name, signature, CHECK);
+          }
+        }
+        break;
+      }
       case JVM_CONSTANT_InvokeDynamic:
       case JVM_CONSTANT_Fieldref:
       case JVM_CONSTANT_Methodref:
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
  /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1292,7 +1292,7 @@
       // Remove entries in the dictionary of live class loader that have
       // initiated loading classes in a dead class loader.
       if (data->dictionary() != NULL) {
-        data->dictionary()->do_unloading();
+        data->dictionary()->do_unloading(is_alive_closure);
       }
       // Walk a ModuleEntry's reads, and a PackageEntry's exports
       // lists to determine if there are modules on those lists that are now
--- a/src/hotspot/share/classfile/dictionary.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/dictionary.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -167,7 +167,7 @@
     for (ProtectionDomainEntry* current = pd_set_acquire();
                                 current != NULL;
                                 current = current->next()) {
-      if (current->protection_domain() == protection_domain) {
+      if (current->object_no_keepalive() == protection_domain) {
         in_pd_set = true;
         break;
       }
@@ -187,7 +187,7 @@
   for (ProtectionDomainEntry* current = pd_set_acquire();
                               current != NULL;
                               current = current->next()) {
-    if (current->protection_domain() == protection_domain) return true;
+    if (current->object_no_keepalive() == protection_domain) return true;
   }
   return false;
 }
@@ -212,8 +212,44 @@
   }
 }
 
+// During class loading we may have cached a protection domain that has
+// since been unreferenced, so this entry should be cleared.
+void Dictionary::clean_cached_protection_domains(BoolObjectClosure* is_alive, DictionaryEntry* probe) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
 
-void Dictionary::do_unloading() {
+  ProtectionDomainEntry* current = probe->pd_set();
+  ProtectionDomainEntry* prev = NULL;
+  while (current != NULL) {
+    if (!is_alive->do_object_b(current->object_no_keepalive())) {
+      LogTarget(Debug, protectiondomain) lt;
+      if (lt.is_enabled()) {
+        ResourceMark rm;
+        // Print out trace information
+        LogStream ls(lt);
+        ls.print_cr("PD in set is not alive:");
+        ls.print("class loader: "); loader_data()->class_loader()->print_value_on(&ls);
+        ls.print(" protection domain: "); current->object_no_keepalive()->print_value_on(&ls);
+        ls.print(" loading: "); probe->instance_klass()->print_value_on(&ls);
+        ls.cr();
+      }
+      if (probe->pd_set() == current) {
+        probe->set_pd_set(current->next());
+      } else {
+        assert(prev != NULL, "should be set by alive entry");
+        prev->set_next(current->next());
+      }
+      ProtectionDomainEntry* to_delete = current;
+      current = current->next();
+      delete to_delete;
+    } else {
+      prev = current;
+      current = current->next();
+    }
+  }
+}
+
+
+void Dictionary::do_unloading(BoolObjectClosure* is_alive) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 
   // The NULL class loader doesn't initiate loading classes from other class loaders
@@ -239,6 +275,8 @@
         free_entry(probe);
         continue;
       }
+      // Clean pd_set
+      clean_cached_protection_domains(is_alive, probe);
       p = probe->next_addr();
     }
   }
@@ -412,6 +450,10 @@
 
   entry->add_protection_domain(this, protection_domain);
 
+#ifdef ASSERT
+  assert(loader_data() != ClassLoaderData::the_null_class_loader_data(), "doesn't make sense");
+#endif
+
   assert(entry->contains_protection_domain(protection_domain()),
          "now protection domain should be present");
 }
--- a/src/hotspot/share/classfile/dictionary.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/dictionary.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,8 @@
 
   DictionaryEntry* get_entry(int index, unsigned int hash, Symbol* name);
 
+  void clean_cached_protection_domains(BoolObjectClosure* is_alive, DictionaryEntry* probe);
+
 protected:
   static size_t entry_size();
 public:
@@ -84,7 +86,7 @@
   void remove_classes_in_error_state();
 
   // Unload classes whose defining loaders are unloaded
-  void do_unloading();
+  void do_unloading(BoolObjectClosure* is_alive);
 
   // Protection domains
   InstanceKlass* find(unsigned int hash, Symbol* name, Handle protection_domain);
@@ -189,7 +191,7 @@
     for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
                                 current != NULL;
                                 current = current->_next) {
-      current->_pd_cache->protection_domain()->verify();
+      current->_pd_cache->object_no_keepalive()->verify();
     }
   }
 
--- a/src/hotspot/share/classfile/javaClasses.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1702,7 +1702,7 @@
       method = mhandle();
     }
 
-    _methods->short_at_put(_index, method->orig_method_idnum());
+    _methods->ushort_at_put(_index, method->orig_method_idnum());
     _bcis->int_at_put(_index, Backtrace::merge_bci_and_version(bci, method->constants()->version()));
 
     // Note:this doesn't leak symbols because the mirror in the backtrace keeps the
@@ -1756,7 +1756,7 @@
 
   BacktraceElement next(Thread* thread) {
     BacktraceElement e (Handle(thread, _mirrors->obj_at(_index)),
-                        _methods->short_at(_index),
+                        _methods->ushort_at(_index),
                         Backtrace::version_at(_bcis->int_at(_index)),
                         Backtrace::bci_at(_bcis->int_at(_index)),
                         _names->symbol_at(_index));
@@ -1968,7 +1968,7 @@
   bool skip_throwableInit_check = false;
   bool skip_hidden = !ShowHiddenFrames;
 
-  for (frame fr = thread->last_frame(); max_depth != total_count;) {
+  for (frame fr = thread->last_frame(); max_depth == 0 || max_depth != total_count;) {
     Method* method = NULL;
     int bci = 0;
 
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,14 +52,14 @@
     ProtectionDomainCacheEntry** p = bucket_addr(i);
     ProtectionDomainCacheEntry* entry = bucket(i);
     while (entry != NULL) {
-      if (is_alive->do_object_b(entry->literal())) {
+      if (is_alive->do_object_b(entry->object_no_keepalive())) {
         p = entry->next_addr();
       } else {
         LogTarget(Debug, protectiondomain) lt;
         if (lt.is_enabled()) {
           LogStream ls(lt);
           ls.print("protection domain unlinked: ");
-          entry->literal()->print_value_on(&ls);
+          entry->object_no_keepalive()->print_value_on(&ls);
           ls.cr();
         }
         *p = entry->next();
@@ -87,7 +87,7 @@
     for (ProtectionDomainCacheEntry* probe = bucket(index);
                                      probe != NULL;
                                      probe = probe->next()) {
-      st->print_cr("%4d: protection_domain: " PTR_FORMAT, index, p2i(probe->literal()));
+      st->print_cr("%4d: protection_domain: " PTR_FORMAT, index, p2i(probe->object_no_keepalive()));
     }
   }
 }
@@ -96,8 +96,27 @@
   verify_table<ProtectionDomainCacheEntry>("Protection Domain Table");
 }
 
+oop ProtectionDomainCacheEntry::object() {
+  return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(literal_addr());
+}
+
+oop ProtectionDomainEntry::object() {
+  return _pd_cache->object();
+}
+
+// The object_no_keepalive() call peeks at the phantomly reachable oop without
+// keeping it alive. This is okay to do in the VM thread state if it is not
+// leaked out to become strongly reachable.
+oop ProtectionDomainCacheEntry::object_no_keepalive() {
+  return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(literal_addr());
+}
+
+oop ProtectionDomainEntry::object_no_keepalive() {
+  return _pd_cache->object_no_keepalive();
+}
+
 void ProtectionDomainCacheEntry::verify() {
-  guarantee(oopDesc::is_oop(literal()), "must be an oop");
+  guarantee(oopDesc::is_oop(object_no_keepalive()), "must be an oop");
 }
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(Handle protection_domain) {
@@ -113,7 +132,7 @@
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
   for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
-    if (e->protection_domain() == protection_domain()) {
+    if (e->object_no_keepalive() == protection_domain()) {
       return e;
     }
   }
--- a/src/hotspot/share/classfile/protectionDomainCache.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/protectionDomainCache.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,8 @@
 class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
   friend class VMStructs;
  public:
-  oop protection_domain() { return literal(); }
+  oop object();
+  oop object_no_keepalive();
 
   ProtectionDomainCacheEntry* next() {
     return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
@@ -112,6 +113,8 @@
   }
 
   ProtectionDomainEntry* next() { return _next; }
-  oop protection_domain() { return _pd_cache->protection_domain(); }
+  void set_next(ProtectionDomainEntry* entry) { _next = entry; }
+  oop object();
+  oop object_no_keepalive();
 };
 #endif // SHARE_VM_CLASSFILE_PROTECTIONDOMAINCACHE_HPP
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -2641,6 +2641,81 @@
           InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::MethodHandle_klass()));  // java.lang.invoke
 }
 
+
+// Return the Java mirror (java.lang.Class instance) for a single-character
+// descriptor.  This result, when available, is the same as produced by the
+// heavier API point of the same name that takes a Symbol.
+oop SystemDictionary::find_java_mirror_for_type(char signature_char) {
+  return java_lang_Class::primitive_mirror(char2type(signature_char));
+}
+
+// Find or construct the Java mirror (java.lang.Class instance) for a
+// for the given field type signature, as interpreted relative to the
+// given class loader.  Handles primitives, void, references, arrays,
+// and all other reflectable types, except method types.
+// N.B.  Code in reflection should use this entry point.
+Handle SystemDictionary::find_java_mirror_for_type(Symbol* signature,
+                                                   Klass* accessing_klass,
+                                                   Handle class_loader,
+                                                   Handle protection_domain,
+                                                   SignatureStream::FailureMode failure_mode,
+                                                   TRAPS) {
+  Handle empty;
+
+  assert(accessing_klass == NULL || (class_loader.is_null() && protection_domain.is_null()),
+         "one or the other, or perhaps neither");
+
+  Symbol* type = signature;
+
+  // What we have here must be a valid field descriptor,
+  // and all valid field descriptors are supported.
+  // Produce the same java.lang.Class that reflection reports.
+  if (type->utf8_length() == 1) {
+
+    // It's a primitive.  (Void has a primitive mirror too.)
+    char ch = (char) type->byte_at(0);
+    assert(is_java_primitive(char2type(ch)) || ch == 'V', "");
+    return Handle(THREAD, find_java_mirror_for_type(ch));
+
+  } else if (FieldType::is_obj(type) || FieldType::is_array(type)) {
+
+    // It's a reference type.
+    if (accessing_klass != NULL) {
+      class_loader      = Handle(THREAD, accessing_klass->class_loader());
+      protection_domain = Handle(THREAD, accessing_klass->protection_domain());
+    }
+    Klass* constant_type_klass;
+    if (failure_mode == SignatureStream::ReturnNull) {
+      constant_type_klass = resolve_or_null(type, class_loader, protection_domain,
+                                            CHECK_(empty));
+    } else {
+      bool throw_error = (failure_mode == SignatureStream::NCDFError);
+      constant_type_klass = resolve_or_fail(type, class_loader, protection_domain,
+                                            throw_error, CHECK_(empty));
+    }
+    if (constant_type_klass == NULL) {
+      return Handle();  // report failure this way
+    }
+    Handle mirror(THREAD, constant_type_klass->java_mirror());
+
+    // Check accessibility, emulating ConstantPool::verify_constant_pool_resolve.
+    if (accessing_klass != NULL) {
+      Klass* sel_klass = constant_type_klass;
+      bool fold_type_to_class = true;
+      LinkResolver::check_klass_accessability(accessing_klass, sel_klass,
+                                              fold_type_to_class, CHECK_(empty));
+    }
+
+    return mirror;
+
+  }
+
+  // Fall through to an error.
+  assert(false, "unsupported mirror syntax");
+  THROW_MSG_(vmSymbols::java_lang_InternalError(), "unsupported mirror syntax", empty);
+}
+
+
 // Ask Java code to find or construct a java.lang.invoke.MethodType for the given
 // signature, as interpreted relative to the given class loader.
 // Because of class loader constraints, all method handle usage must be
@@ -2695,15 +2770,13 @@
       pts->obj_at_put(arg++, mirror);
 
     // Check accessibility.
-    if (ss.is_object() && accessing_klass != NULL) {
+    if (!java_lang_Class::is_primitive(mirror) && accessing_klass != NULL) {
       Klass* sel_klass = java_lang_Class::as_Klass(mirror);
       mirror = NULL;  // safety
       // Emulate ConstantPool::verify_constant_pool_resolve.
-      if (sel_klass->is_objArray_klass())
-        sel_klass = ObjArrayKlass::cast(sel_klass)->bottom_klass();
-      if (sel_klass->is_instance_klass()) {
-        LinkResolver::check_klass_accessability(accessing_klass, sel_klass, CHECK_(empty));
-      }
+      bool fold_type_to_class = true;
+      LinkResolver::check_klass_accessability(accessing_klass, sel_klass,
+                                              fold_type_to_class, CHECK_(empty));
     }
   }
   assert(arg == npts, "");
@@ -2806,9 +2879,60 @@
   return Handle(THREAD, (oop) result.get_jobject());
 }
 
+// Ask Java to compute a constant by invoking a BSM given a Dynamic_info CP entry
+Handle SystemDictionary::link_dynamic_constant(Klass* caller,
+                                               int condy_index,
+                                               Handle bootstrap_specifier,
+                                               Symbol* name,
+                                               Symbol* type,
+                                               TRAPS) {
+  Handle empty;
+  Handle bsm, info;
+  if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) {
+    bsm = bootstrap_specifier;
+  } else {
+    assert(bootstrap_specifier->is_objArray(), "");
+    objArrayOop args = (objArrayOop) bootstrap_specifier();
+    assert(args->length() == 2, "");
+    bsm  = Handle(THREAD, args->obj_at(0));
+    info = Handle(THREAD, args->obj_at(1));
+  }
+  guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
+            "caller must supply a valid BSM");
+
+  // This should not happen.  JDK code should take care of that.
+  if (caller == NULL) {
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad dynamic constant", empty);
+  }
+
+  Handle constant_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
+
+  // Resolve the constant type in the context of the caller class
+  Handle type_mirror = find_java_mirror_for_type(type, caller, SignatureStream::NCDFError,
+                                                 CHECK_(empty));
+
+  // call java.lang.invoke.MethodHandleNatives::linkConstantDyanmic(caller, condy_index, bsm, type, info)
+  JavaCallArguments args;
+  args.push_oop(Handle(THREAD, caller->java_mirror()));
+  args.push_int(condy_index);
+  args.push_oop(bsm);
+  args.push_oop(constant_name);
+  args.push_oop(type_mirror);
+  args.push_oop(info);
+  JavaValue result(T_OBJECT);
+  JavaCalls::call_static(&result,
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::linkDynamicConstant_name(),
+                         vmSymbols::linkDynamicConstant_signature(),
+                         &args, CHECK_(empty));
+
+  return Handle(THREAD, (oop) result.get_jobject());
+}
+
 // Ask Java code to find or construct a java.lang.invoke.CallSite for the given
 // name and signature, as interpreted relative to the given class loader.
 methodHandle SystemDictionary::find_dynamic_call_site_invoker(Klass* caller,
+                                                              int indy_index,
                                                               Handle bootstrap_specifier,
                                                               Symbol* name,
                                                               Symbol* type,
@@ -2820,17 +2944,10 @@
   if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) {
     bsm = bootstrap_specifier;
   } else {
-    assert(bootstrap_specifier->is_objArray(), "");
-    objArrayHandle args(THREAD, (objArrayOop) bootstrap_specifier());
-    int len = args->length();
-    assert(len >= 1, "");
-    bsm = Handle(THREAD, args->obj_at(0));
-    if (len > 1) {
-      objArrayOop args1 = oopFactory::new_objArray(SystemDictionary::Object_klass(), len-1, CHECK_(empty));
-      for (int i = 1; i < len; i++)
-        args1->obj_at_put(i-1, args->obj_at(i));
-      info = Handle(THREAD, args1);
-    }
+    objArrayOop args = (objArrayOop) bootstrap_specifier();
+    assert(args->length() == 2, "");
+    bsm  = Handle(THREAD, args->obj_at(0));
+    info = Handle(THREAD, args->obj_at(1));
   }
   guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
             "caller must supply a valid BSM");
@@ -2846,9 +2963,10 @@
   objArrayHandle appendix_box = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), 1, CHECK_(empty));
   assert(appendix_box->obj_at(0) == NULL, "");
 
-  // call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, bsm, name, mtype, info, &appendix)
+  // call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, indy_index, bsm, name, mtype, info, &appendix)
   JavaCallArguments args;
   args.push_oop(Handle(THREAD, caller->java_mirror()));
+  args.push_int(indy_index);
   args.push_oop(bsm);
   args.push_oop(method_name);
   args.push_oop(method_type);
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -32,6 +32,7 @@
 #include "oops/symbol.hpp"
 #include "runtime/java.hpp"
 #include "runtime/reflectionUtils.hpp"
+#include "runtime/signature.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
 
@@ -527,6 +528,28 @@
   static methodHandle find_method_handle_intrinsic(vmIntrinsics::ID iid,
                                                    Symbol* signature,
                                                    TRAPS);
+
+  // compute java_mirror (java.lang.Class instance) for a type ("I", "[[B", "LFoo;", etc.)
+  // Either the accessing_klass or the CL/PD can be non-null, but not both.
+  static Handle    find_java_mirror_for_type(Symbol* signature,
+                                             Klass* accessing_klass,
+                                             Handle class_loader,
+                                             Handle protection_domain,
+                                             SignatureStream::FailureMode failure_mode,
+                                             TRAPS);
+  static Handle    find_java_mirror_for_type(Symbol* signature,
+                                             Klass* accessing_klass,
+                                             SignatureStream::FailureMode failure_mode,
+                                             TRAPS) {
+    // callee will fill in CL/PD from AK, if they are needed
+    return find_java_mirror_for_type(signature, accessing_klass, Handle(), Handle(),
+                                     failure_mode, THREAD);
+  }
+
+
+  // fast short-cut for the one-character case:
+  static oop       find_java_mirror_for_type(char signature_char);
+
   // find a java.lang.invoke.MethodType object for a given signature
   // (asks Java to compute it if necessary, except in a compiler thread)
   static Handle    find_method_handle_type(Symbol* signature,
@@ -546,8 +569,17 @@
                                                Symbol* signature,
                                                TRAPS);
 
+  // ask Java to compute a constant by invoking a BSM given a Dynamic_info CP entry
+  static Handle    link_dynamic_constant(Klass* caller,
+                                         int condy_index,
+                                         Handle bootstrap_specifier,
+                                         Symbol* name,
+                                         Symbol* type,
+                                         TRAPS);
+
   // ask Java to create a dynamic call site, while linking an invokedynamic op
   static methodHandle find_dynamic_call_site_invoker(Klass* caller,
+                                                     int indy_index,
                                                      Handle bootstrap_method,
                                                      Symbol* name,
                                                      Symbol* type,
--- a/src/hotspot/share/classfile/verifier.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/verifier.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -2054,19 +2054,21 @@
     const constantPoolHandle& cp, u2 bci, TRAPS) {
   verify_cp_index(bci, cp, index, CHECK_VERIFY(this));
   constantTag tag = cp->tag_at(index);
-  unsigned int types;
+  unsigned int types = 0;
   if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
     if (!tag.is_unresolved_klass()) {
       types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
             | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class)
-            | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType);
+            | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType)
+            | (1 << JVM_CONSTANT_Dynamic);
       // Note:  The class file parser already verified the legality of
       // MethodHandle and MethodType constants.
       verify_cp_type(bci, index, cp, types, CHECK_VERIFY(this));
     }
   } else {
     assert(opcode == Bytecodes::_ldc2_w, "must be ldc2_w");
-    types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long);
+    types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long)
+          | (1 << JVM_CONSTANT_Dynamic);
     verify_cp_type(bci, index, cp, types, CHECK_VERIFY(this));
   }
   if (tag.is_string() && cp->is_pseudo_string_at(index)) {
@@ -2101,6 +2103,30 @@
     current_frame->push_stack(
       VerificationType::reference_type(
         vmSymbols::java_lang_invoke_MethodType()), CHECK_VERIFY(this));
+  } else if (tag.is_dynamic_constant()) {
+    Symbol* constant_type = cp->uncached_signature_ref_at(index);
+    if (!SignatureVerifier::is_valid_type_signature(constant_type)) {
+      class_format_error(
+        "Invalid type for dynamic constant in class %s referenced "
+        "from constant pool index %d", _klass->external_name(), index);
+      return;
+    }
+    assert(sizeof(VerificationType) == sizeof(uintptr_t),
+          "buffer type must match VerificationType size");
+    uintptr_t constant_type_buffer[2];
+    VerificationType* v_constant_type = (VerificationType*)constant_type_buffer;
+    SignatureStream sig_stream(constant_type, false);
+    int n = change_sig_to_verificationType(
+      &sig_stream, v_constant_type, CHECK_VERIFY(this));
+    int opcode_n = (opcode == Bytecodes::_ldc2_w ? 2 : 1);
+    if (n != opcode_n) {
+      // wrong kind of ldc; reverify against updated type mask
+      types &= ~(1 << JVM_CONSTANT_Dynamic);
+      verify_cp_type(bci, index, cp, types, CHECK_VERIFY(this));
+    }
+    for (int i = 0; i < n; i++) {
+      current_frame->push_stack(v_constant_type[i], CHECK_VERIFY(this));
+    }
   } else {
     /* Unreachable? verify_cp_type has already validated the cp type. */
     verify_error(
@@ -2665,7 +2691,7 @@
   // Make sure the constant pool item is the right type
   u2 index = bcs->get_index_u2();
   Bytecodes::Code opcode = bcs->raw_code();
-  unsigned int types;
+  unsigned int types = 0;
   switch (opcode) {
     case Bytecodes::_invokeinterface:
       types = 1 << JVM_CONSTANT_InterfaceMethodref;
--- a/src/hotspot/share/classfile/verifier.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/verifier.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -40,7 +40,8 @@
     STRICTER_ACCESS_CTRL_CHECK_VERSION  = 49,
     STACKMAP_ATTRIBUTE_MAJOR_VERSION    = 50,
     INVOKEDYNAMIC_MAJOR_VERSION         = 51,
-    NO_RELAX_ACCESS_CTRL_CHECK_VERSION  = 52
+    NO_RELAX_ACCESS_CTRL_CHECK_VERSION  = 52,
+    DYNAMICCONSTANT_MAJOR_VERSION       = 55
   };
   typedef enum { ThrowException, NoException } Mode;
 
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -98,6 +98,14 @@
     _type_signatures[T_BOOLEAN] = bool_signature();
     _type_signatures[T_VOID]    = void_signature();
     // no single signatures for T_OBJECT or T_ARRAY
+#ifdef ASSERT
+    for (int i = (int)T_BOOLEAN; i < (int)T_VOID+1; i++) {
+      Symbol* s = _type_signatures[i];
+      if (s == NULL)  continue;
+      BasicType st = signature_type(s);
+      assert(st == i, "");
+    }
+#endif
   }
 
 #ifdef ASSERT
@@ -202,9 +210,11 @@
 
 BasicType vmSymbols::signature_type(const Symbol* s) {
   assert(s != NULL, "checking");
-  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
-    if (s == _type_signatures[i]) {
-      return (BasicType)i;
+  if (s->utf8_length() == 1) {
+    BasicType result = char2type(s->byte_at(0));
+    if (is_java_primitive(result) || result == T_VOID) {
+      assert(s == _type_signatures[result], "");
+      return result;
     }
   }
   return T_OBJECT;
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -307,8 +307,10 @@
   template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
   template(linkMethod_name,                           "linkMethod")                               \
   template(linkMethod_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \
+  template(linkDynamicConstant_name,                  "linkDynamicConstant")                      \
+  template(linkDynamicConstant_signature, "(Ljava/lang/Object;ILjava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
   template(linkCallSite_name,                         "linkCallSite")                             \
-  template(linkCallSite_signature, "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \
+  template(linkCallSite_signature, "(Ljava/lang/Object;ILjava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \
   template(setTargetNormal_name,                      "setTargetNormal")                          \
   template(setTargetVolatile_name,                    "setTargetVolatile")                        \
   template(setTarget_signature,                       "(Ljava/lang/invoke/MethodHandle;)V")       \
--- a/src/hotspot/share/code/compiledIC.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/code/compiledIC.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -224,7 +224,7 @@
     assert(bytecode == Bytecodes::_invokeinterface, "");
     int itable_index = call_info->itable_index();
     entry = VtableStubs::find_itable_stub(itable_index);
-    if (entry == false) {
+    if (entry == NULL) {
       return false;
     }
 #ifdef ASSERT
--- a/src/hotspot/share/code/oopRecorder.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/code/oopRecorder.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -172,9 +172,8 @@
 }
 
 int ObjectLookup::sort_by_address(oop a, oop b) {
-  if (b > a) return 1;
-  if (a > b) return -1;
-  return 0;
+  // oopDesc::compare returns the opposite of what this function returned
+  return -(oopDesc::compare(a, b));
 }
 
 int ObjectLookup::sort_by_address(ObjectEntry* a, ObjectEntry* b) {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -246,11 +246,11 @@
 
   // Compute the least valued stack element.
   oop least_value(HeapWord* low) {
-     oop least = (oop)low;
-     for (size_t i = 0; i < _index; i++) {
-       least = MIN2(least, _base[i]);
-     }
-     return least;
+    HeapWord* least = low;
+    for (size_t i = 0; i < _index; i++) {
+      least = MIN2(least, (HeapWord*)_base[i]);
+    }
+    return (oop)least;
   }
 
   // Exposed here to allow stack expansion in || case.
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -54,7 +54,7 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  static void enqueue_if_weak(DecoratorSet decorators, oop value);
+  static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
   virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -60,12 +60,17 @@
   _byte_map[card_index] = val;
 }
 
-inline void G1SATBCardTableModRefBS::enqueue_if_weak(DecoratorSet decorators, oop value) {
+inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
   assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
+  // Archive roots need to be enqueued since they add subgraphs to the
+  // Java heap that were not there at the snapshot when marking started.
+  // Weak and phantom references also need enqueueing for similar reasons.
+  const bool in_archive_root   = (decorators & IN_ARCHIVE_ROOT) != 0;
   const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
   const bool peek              = (decorators & AS_NO_KEEPALIVE) != 0;
+  const bool needs_enqueue     = in_archive_root || (!peek && !on_strong_oop_ref);
 
-  if (!peek && !on_strong_oop_ref && value != NULL) {
+  if (needs_enqueue && value != NULL) {
     enqueue(value);
   }
 }
@@ -75,7 +80,7 @@
 inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
 oop_load_not_in_heap(T* addr) {
   oop value = ModRef::oop_load_not_in_heap(addr);
-  enqueue_if_weak(decorators, value);
+  enqueue_if_weak_or_archive(decorators, value);
   return value;
 }
 
@@ -84,7 +89,7 @@
 inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
 oop_load_in_heap(T* addr) {
   oop value = ModRef::oop_load_in_heap(addr);
-  enqueue_if_weak(decorators, value);
+  enqueue_if_weak_or_archive(decorators, value);
   return value;
 }
 
@@ -92,7 +97,7 @@
 inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
 oop_load_in_heap_at(oop base, ptrdiff_t offset) {
   oop value = ModRef::oop_load_in_heap_at(base, offset);
-  enqueue_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
+  enqueue_if_weak_or_archive(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
   return value;
 }
 
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -131,26 +131,28 @@
 }
 
 size_t
-ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const
+ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_oop) const
 {
   HeapWord* last_beg = cm->last_query_begin();
-  oop last_obj = cm->last_query_object();
+  HeapWord* last_obj = (HeapWord*)cm->last_query_object();
+  HeapWord* end_obj  = (HeapWord*)end_oop;
+
   size_t last_ret = cm->last_query_return();
   if (end_obj > last_obj) {
-    last_ret = last_ret + live_words_in_range_helper((HeapWord*)last_obj, end_obj);
+    last_ret = last_ret + live_words_in_range_helper(last_obj, end_oop);
     last_obj = end_obj;
   } else if (end_obj < last_obj) {
     // The cached value is for an object that is to the left (lower address) of the current
     // end_obj. Calculate back from that cached value.
-    if (pointer_delta((HeapWord*)end_obj, (HeapWord*)beg_addr) > pointer_delta((HeapWord*)last_obj, (HeapWord*)end_obj)) {
-      last_ret = last_ret - live_words_in_range_helper((HeapWord*)end_obj, last_obj);
+    if (pointer_delta(end_obj, beg_addr) > pointer_delta(last_obj, end_obj)) {
+      last_ret = last_ret - live_words_in_range_helper(end_obj, (oop)last_obj);
     } else {
-      last_ret = live_words_in_range_helper(beg_addr, end_obj);
+      last_ret = live_words_in_range_helper(beg_addr, end_oop);
     }
     last_obj = end_obj;
   }
 
-  update_live_words_in_range_cache(cm, last_beg, last_obj, last_ret);
+  update_live_words_in_range_cache(cm, last_beg, (oop)last_obj, last_ret);
   return last_ret;
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageParState.inline.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/align.hpp"
+#include "utilities/count_trailing_zeros.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/ostream.hpp"
+
+OopStorage::BlockEntry::BlockEntry() : _prev(NULL), _next(NULL) {}
+
+OopStorage::BlockEntry::~BlockEntry() {
+  assert(_prev == NULL, "deleting attached block");
+  assert(_next == NULL, "deleting attached block");
+}
+
+OopStorage::BlockList::BlockList(const BlockEntry& (*get_entry)(const Block& block)) :
+  _head(NULL), _tail(NULL), _get_entry(get_entry)
+{}
+
+OopStorage::BlockList::~BlockList() {
+  // ~OopStorage() empties its lists before destroying them.
+  assert(_head == NULL, "deleting non-empty block list");
+  assert(_tail == NULL, "deleting non-empty block list");
+}
+
+void OopStorage::BlockList::push_front(const Block& block) {
+  const Block* old = _head;
+  if (old == NULL) {
+    assert(_tail == NULL, "invariant");
+    _head = _tail = &block;
+  } else {
+    _get_entry(block)._next = old;
+    _get_entry(*old)._prev = &block;
+    _head = &block;
+  }
+}
+
+void OopStorage::BlockList::push_back(const Block& block) {
+  const Block* old = _tail;
+  if (old == NULL) {
+    assert(_head == NULL, "invariant");
+    _head = _tail = &block;
+  } else {
+    _get_entry(*old)._next = &block;
+    _get_entry(block)._prev = old;
+    _tail = &block;
+  }
+}
+
+void OopStorage::BlockList::unlink(const Block& block) {
+  const BlockEntry& block_entry = _get_entry(block);
+  const Block* prev_blk = block_entry._prev;
+  const Block* next_blk = block_entry._next;
+  block_entry._prev = NULL;
+  block_entry._next = NULL;
+  if ((prev_blk == NULL) && (next_blk == NULL)) {
+    assert(_head == &block, "invariant");
+    assert(_tail == &block, "invariant");
+    _head = _tail = NULL;
+  } else if (prev_blk == NULL) {
+    assert(_head == &block, "invariant");
+    _get_entry(*next_blk)._prev = NULL;
+    _head = next_blk;
+  } else if (next_blk == NULL) {
+    assert(_tail == &block, "invariant");
+    _get_entry(*prev_blk)._next = NULL;
+    _tail = prev_blk;
+  } else {
+    _get_entry(*next_blk)._prev = prev_blk;
+    _get_entry(*prev_blk)._next = next_blk;
+  }
+}
+
+// Blocks start with an array of BitsPerWord oop entries.  That array
+// is divided into conceptual BytesPerWord sections of BitsPerWord
+// entries.  Blocks are allocated aligned on section boundaries, for
+// the convenience of mapping from an entry to the containing block;
+// see block_for_ptr().  Aligning on section boundary rather than on
+// the full _data wastes a lot less space, but makes for a bit more
+// work in block_for_ptr().
+
+const unsigned section_size = BitsPerByte;
+const unsigned section_count = BytesPerWord;
+const unsigned block_alignment = sizeof(oop) * section_size;
+
+// VS2013 warns (C4351) that elements of _data will be *correctly* default
+// initialized, unlike earlier versions that *incorrectly* did not do so.
+#ifdef _WINDOWS
+#pragma warning(push)
+#pragma warning(disable: 4351)
+#endif // _WINDOWS
+OopStorage::Block::Block(const OopStorage* owner, void* memory) :
+  _data(),
+  _allocated_bitmask(0),
+  _owner(owner),
+  _memory(memory),
+  _active_entry(),
+  _allocate_entry()
+{
+  STATIC_ASSERT(_data_pos == 0);
+  STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
+  assert(offset_of(Block, _data) == _data_pos, "invariant");
+  assert(owner != NULL, "NULL owner");
+  assert(is_aligned(this, block_alignment), "misaligned block");
+}
+#ifdef _WINDOWS
+#pragma warning(pop)
+#endif
+
+OopStorage::Block::~Block() {
+  // Clear fields used by block_for_ptr and entry validation, which
+  // might help catch bugs.  Volatile to prevent dead-store elimination.
+  const_cast<uintx volatile&>(_allocated_bitmask) = 0;
+  const_cast<OopStorage* volatile&>(_owner) = NULL;
+}
+
+const OopStorage::BlockEntry& OopStorage::Block::get_active_entry(const Block& block) {
+  return block._active_entry;
+}
+
+const OopStorage::BlockEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
+  return block._allocate_entry;
+}
+
+size_t OopStorage::Block::allocation_size() {
+  // _data must be first member, so aligning Block aligns _data.
+  STATIC_ASSERT(_data_pos == 0);
+  return sizeof(Block) + block_alignment - sizeof(void*);
+}
+
+size_t OopStorage::Block::allocation_alignment_shift() {
+  return exact_log2(block_alignment);
+}
+
+inline bool is_full_bitmask(uintx bitmask) { return ~bitmask == 0; }
+inline bool is_empty_bitmask(uintx bitmask) { return bitmask == 0; }
+
+bool OopStorage::Block::is_full() const {
+  return is_full_bitmask(allocated_bitmask());
+}
+
+bool OopStorage::Block::is_empty() const {
+  return is_empty_bitmask(allocated_bitmask());
+}
+
+uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
+  return bitmask_for_index(get_index(ptr));
+}
+
+uintx OopStorage::Block::cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value) {
+  return Atomic::cmpxchg(new_value, &_allocated_bitmask, compare_value);
+}
+
+bool OopStorage::Block::contains(const oop* ptr) const {
+  const oop* base = get_pointer(0);
+  return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
+}
+
+unsigned OopStorage::Block::get_index(const oop* ptr) const {
+  assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
+  return static_cast<unsigned>(ptr - get_pointer(0));
+}
+
+oop* OopStorage::Block::allocate() {
+  // Use CAS loop because release may change bitmask outside of lock.
+  uintx allocated = allocated_bitmask();
+  while (true) {
+    assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
+    unsigned index = count_trailing_zeros(~allocated);
+    uintx new_value = allocated | bitmask_for_index(index);
+    uintx fetched = cmpxchg_allocated_bitmask(new_value, allocated);
+    if (fetched == allocated) {
+      return get_pointer(index); // CAS succeeded; return entry for index.
+    }
+    allocated = fetched;       // CAS failed; retry with latest value.
+  }
+}
+
+OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
+  // _data must be first member: aligning block => aligning _data.
+  STATIC_ASSERT(_data_pos == 0);
+  size_t size_needed = allocation_size();
+  void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
+  if (memory == NULL) {
+    return NULL;
+  }
+  void* block_mem = align_up(memory, block_alignment);
+  assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
+         "allocated insufficient space for aligned block");
+  return ::new (block_mem) Block(owner, memory);
+}
+
+void OopStorage::Block::delete_block(const Block& block) {
+  void* memory = block._memory;
+  block.Block::~Block();
+  FREE_C_HEAP_ARRAY(char, memory);
+}
+
+// This can return a false positive if ptr is not contained by some
+// block.  For some uses, it is a precondition that ptr is valid,
+// e.g. contained in some block in owner's _active_list.  Other uses
+// require additional validation of the result.
+OopStorage::Block*
+OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
+  assert(CanUseSafeFetchN(), "precondition");
+  STATIC_ASSERT(_data_pos == 0);
+  // Const-ness of ptr is not related to const-ness of containing block.
+  // Blocks are allocated section-aligned, so get the containing section.
+  oop* section_start = align_down(const_cast<oop*>(ptr), block_alignment);
+  // Start with a guess that the containing section is the last section,
+  // so the block starts section_count-1 sections earlier.
+  oop* section = section_start - (section_size * (section_count - 1));
+  // Walk up through the potential block start positions, looking for
+  // the owner in the expected location.  If we're below the actual block
+  // start position, the value at the owner position will be some oop
+  // (possibly NULL), which can never match the owner.
+  intptr_t owner_addr = reinterpret_cast<intptr_t>(owner);
+  for (unsigned i = 0; i < section_count; ++i, section += section_size) {
+    Block* candidate = reinterpret_cast<Block*>(section);
+    intptr_t* candidate_owner_addr
+      = reinterpret_cast<intptr_t*>(&candidate->_owner);
+    if (SafeFetchN(candidate_owner_addr, 0) == owner_addr) {
+      return candidate;
+    }
+  }
+  return NULL;
+}
+
+bool OopStorage::is_valid_block_locked_or_safepoint(const Block* check_block) const {
+  assert_locked_or_safepoint(_allocate_mutex);
+  // For now, simple linear search.  Do something more clever if this
+  // is a performance bottleneck, particularly for allocation_status.
+  for (const Block* block = _active_list.chead();
+       block != NULL;
+       block = _active_list.next(*block)) {
+    if (check_block == block) {
+      return true;
+    }
+  }
+  return false;
+}
+
+#ifdef ASSERT
+void OopStorage::assert_at_safepoint() {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+}
+#endif // ASSERT
+
+//////////////////////////////////////////////////////////////////////////////
+// Allocation
+//
+// Allocation involves the _allocate_list, which contains a subset of the
+// blocks owned by a storage object.  This is a doubly-linked list, linked
+// through dedicated fields in the blocks.  Full blocks are removed from this
+// list, though they are still present in the _active_list.  Empty blocks are
+// kept at the end of the _allocate_list, to make it easy for empty block
+// deletion to find them.
+//
+// allocate(), release(), and delete_empty_blocks_concurrent() all lock the
+// _allocate_mutex while performing any list modifications.
+//
+// allocate() and release() update a block's _allocated_bitmask using CAS
+// loops.  This prevents loss of updates even though release() may perform
+// some updates without any locking.
+//
+// allocate() obtains the entry from the first block in the _allocate_list,
+// and updates that block's _allocated_bitmask to indicate the entry is in
+// use.  If this makes the block full (all entries in use), the block is
+// removed from the _allocate_list so it won't be considered by future
+// allocations until some entries in it are relased.
+//
+// release() looks up the block for the entry without locking.  Once the block
+// has been determined, its _allocated_bitmask needs to be updated, and its
+// position in the _allocate_list may need to be updated.  There are two
+// cases:
+//
+// (a) If the block is neither full nor would become empty with the release of
+// the entry, only its _allocated_bitmask needs to be updated.  But if the CAS
+// update fails, the applicable case may change for the retry.
+//
+// (b) Otherwise, the _allocate_list will also need to be modified.  This
+// requires locking the _allocate_mutex, and then attempting to CAS the
+// _allocated_bitmask.  If the CAS fails, the applicable case may change for
+// the retry.  If the CAS succeeds, then update the _allocate_list according
+// to the the state changes.  If the block changed from full to not full, then
+// it needs to be added to the _allocate_list, for use in future allocations.
+// If the block changed from not empty to empty, then it is moved to the end
+// of the _allocate_list, for ease of empty block deletion processing.
+
+oop* OopStorage::allocate() {
+  MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+  Block* block = _allocate_list.head();
+  if (block == NULL) {
+    // No available blocks; make a new one, and add to storage.
+    {
+      MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+      block = Block::new_block(this);
+    }
+    if (block != NULL) {
+      // Add new block to storage.
+      log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
+
+      // Add to end of _allocate_list.  The mutex release allowed
+      // other threads to add blocks to the _allocate_list.  We prefer
+      // to allocate from non-empty blocks, to allow empty blocks to
+      // be deleted.
+      _allocate_list.push_back(*block);
+      ++_empty_block_count;
+      // Add to front of _active_list, and then record as the head
+      // block, for concurrent iteration protocol.
+      _active_list.push_front(*block);
+      ++_block_count;
+      // Ensure all setup of block is complete before making it visible.
+      OrderAccess::release_store(&_active_head, block);
+    } else {
+      log_info(oopstorage, blocks)("%s: failed new block allocation", name());
+    }
+    block = _allocate_list.head();
+    if (block == NULL) {
+      // Failed to make new block, and no other thread made a block
+      // available while the mutex was released, so return failure.
+      return NULL;
+    }
+  }
+  // Allocate from first block.
+  assert(block != NULL, "invariant");
+  assert(!block->is_full(), "invariant");
+  if (block->is_empty()) {
+    // Transitioning from empty to not empty.
+    log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
+    --_empty_block_count;
+  }
+  oop* result = block->allocate();
+  assert(result != NULL, "allocation failed");
+  assert(!block->is_empty(), "postcondition");
+  Atomic::inc(&_allocation_count); // release updates outside lock.
+  if (block->is_full()) {
+    // Transitioning from not full to full.
+    // Remove full blocks from consideration by future allocates.
+    log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
+    _allocate_list.unlink(*block);
+  }
+  log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
+  return result;
+}
+
+OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
+  assert(ptr != NULL, "precondition");
+  return Block::block_for_ptr(this, ptr);
+}
+
+void OopStorage::release_from_block(Block& block, uintx releasing) {
+  assert(releasing != 0, "invariant");
+  uintx allocated = block.allocated_bitmask();
+  while (true) {
+    assert(releasing == (allocated & releasing), "invariant");
+    uintx new_value = allocated ^ releasing;
+    // CAS new_value into block's allocated bitmask, retrying with
+    // updated allocated bitmask until the CAS succeeds.
+    uintx fetched;
+    if (!is_full_bitmask(allocated) && !is_empty_bitmask(new_value)) {
+      fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
+      if (fetched == allocated) return;
+    } else {
+      // Need special handling if transitioning from full to not full,
+      // or from not empty to empty.  For those cases, must hold the
+      // _allocation_mutex when updating the allocated bitmask, to
+      // ensure the associated list manipulations will be consistent
+      // with the allocation bitmask that is visible to other threads
+      // in allocate() or deleting empty blocks.
+      MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+      fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
+      if (fetched == allocated) {
+        // CAS succeeded; handle special cases, which might no longer apply.
+        if (is_full_bitmask(allocated)) {
+          // Transitioning from full to not-full; add to _allocate_list.
+          log_debug(oopstorage, blocks)("%s: block not full " PTR_FORMAT, name(), p2i(&block));
+          _allocate_list.push_front(block);
+          assert(!block.is_full(), "invariant"); // Still not full.
+        }
+        if (is_empty_bitmask(new_value)) {
+          // Transitioning from not-empty to empty; move to end of
+          // _allocate_list, to make it a deletion candidate.
+          log_debug(oopstorage, blocks)("%s: block empty " PTR_FORMAT, name(), p2i(&block));
+          _allocate_list.unlink(block);
+          _allocate_list.push_back(block);
+          ++_empty_block_count;
+          assert(block.is_empty(), "invariant"); // Still empty.
+        }
+        return;                 // Successful CAS and transitions handled.
+      }
+    }
+    // CAS failed; retry with latest value.
+    allocated = fetched;
+  }
+}
+
+#ifdef ASSERT
+void OopStorage::check_release(const Block* block, const oop* ptr) const {
+  switch (allocation_status_validating_block(block, ptr)) {
+  case INVALID_ENTRY:
+    fatal("Releasing invalid entry: " PTR_FORMAT, p2i(ptr));
+    break;
+
+  case UNALLOCATED_ENTRY:
+    fatal("Releasing unallocated entry: " PTR_FORMAT, p2i(ptr));
+    break;
+
+  case ALLOCATED_ENTRY:
+    assert(block->contains(ptr), "invariant");
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+}
+#endif // ASSERT
+
+inline void check_release_entry(const oop* entry) {
+  assert(entry != NULL, "Releasing NULL");
+  assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
+}
+
+void OopStorage::release(const oop* ptr) {
+  check_release_entry(ptr);
+  Block* block = find_block_or_null(ptr);
+  check_release(block, ptr);
+  log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
+  release_from_block(*block, block->bitmask_for_entry(ptr));
+  Atomic::dec(&_allocation_count);
+}
+
+void OopStorage::release(const oop* const* ptrs, size_t size) {
+  size_t i = 0;
+  while (i < size) {
+    check_release_entry(ptrs[i]);
+    Block* block = find_block_or_null(ptrs[i]);
+    check_release(block, ptrs[i]);
+    log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
+    size_t count = 0;
+    uintx releasing = 0;
+    for ( ; i < size; ++i) {
+      const oop* entry = ptrs[i];
+      // If entry not in block, finish block and resume outer loop with entry.
+      if (!block->contains(entry)) break;
+      check_release_entry(entry);
+      // Add entry to releasing bitmap.
+      log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
+      uintx entry_bitmask = block->bitmask_for_entry(entry);
+      assert((releasing & entry_bitmask) == 0,
+             "Duplicate entry: " PTR_FORMAT, p2i(entry));
+      releasing |= entry_bitmask;
+      ++count;
+    }
+    // Release the contiguous entries that are in block.
+    release_from_block(*block, releasing);
+    Atomic::sub(count, &_allocation_count);
+  }
+}
+
+const char* dup_name(const char* name) {
+  char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
+  strcpy(dup, name);
+  return dup;
+}
+
+OopStorage::OopStorage(const char* name,
+                       Mutex* allocate_mutex,
+                       Mutex* active_mutex) :
+  _name(dup_name(name)),
+  _active_list(&Block::get_active_entry),
+  _allocate_list(&Block::get_allocate_entry),
+  _active_head(NULL),
+  _allocate_mutex(allocate_mutex),
+  _active_mutex(active_mutex),
+  _allocation_count(0),
+  _block_count(0),
+  _empty_block_count(0),
+  _concurrent_iteration_active(false)
+{
+  assert(_active_mutex->rank() < _allocate_mutex->rank(),
+         "%s: active_mutex must have lower rank than allocate_mutex", _name);
+  assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
+         "%s: active mutex requires safepoint check", _name);
+  assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
+         "%s: allocate mutex requires safepoint check", _name);
+}
+
+void OopStorage::delete_empty_block(const Block& block) {
+  assert(block.is_empty(), "discarding non-empty block");
+  log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
+  Block::delete_block(block);
+}
+
+OopStorage::~OopStorage() {
+  Block* block;
+  while ((block = _allocate_list.head()) != NULL) {
+    _allocate_list.unlink(*block);
+  }
+  while ((block = _active_list.head()) != NULL) {
+    _active_list.unlink(*block);
+    Block::delete_block(*block);
+  }
+  FREE_C_HEAP_ARRAY(char, _name);
+}
+
+void OopStorage::delete_empty_blocks_safepoint(size_t retain) {
+  assert_at_safepoint();
+  // Don't interfere with a concurrent iteration.
+  if (_concurrent_iteration_active) return;
+  // Compute the number of blocks to remove, to minimize volatile accesses.
+  size_t empty_blocks = _empty_block_count;
+  if (retain < empty_blocks) {
+    size_t remove_count = empty_blocks - retain;
+    // Update volatile counters once.
+    _block_count -= remove_count;
+    _empty_block_count -= remove_count;
+    do {
+      const Block* block = _allocate_list.ctail();
+      assert(block != NULL, "invariant");
+      assert(block->is_empty(), "invariant");
+      // Remove block from lists, and delete it.
+      _active_list.unlink(*block);
+      _allocate_list.unlink(*block);
+      delete_empty_block(*block);
+    } while (--remove_count > 0);
+    // Update _active_head, in case current value was in deleted set.
+    _active_head = _active_list.head();
+  }
+}
+
+void OopStorage::delete_empty_blocks_concurrent(size_t retain) {
+  MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+  // Other threads could be adding to the empty block count while we
+  // release the mutex across the block deletions.  Set an upper bound
+  // on how many blocks we'll try to release, so other threads can't
+  // cause an unbounded stay in this function.
+  if (_empty_block_count <= retain) return;
+  size_t limit = _empty_block_count - retain;
+  for (size_t i = 0; (i < limit) && (retain < _empty_block_count); ++i) {
+    const Block* block = _allocate_list.ctail();
+    assert(block != NULL, "invariant");
+    assert(block->is_empty(), "invariant");
+    {
+      MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
+      // Don't interfere with a concurrent iteration.
+      if (_concurrent_iteration_active) return;
+      // Remove block from _active_list, updating head if needed.
+      _active_list.unlink(*block);
+      --_block_count;
+      if (block == _active_head) {
+        _active_head = _active_list.head();
+      }
+    }
+    // Remove block from _allocate_list and delete it.
+    _allocate_list.unlink(*block);
+    --_empty_block_count;
+    // Release mutex while deleting block.
+    MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+    delete_empty_block(*block);
+  }
+}
+
+OopStorage::EntryStatus
+OopStorage::allocation_status_validating_block(const Block* block,
+                                               const oop* ptr) const {
+  MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+  if ((block == NULL) || !is_valid_block_locked_or_safepoint(block)) {
+    return INVALID_ENTRY;
+  } else if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
+    return ALLOCATED_ENTRY;
+  } else {
+    return UNALLOCATED_ENTRY;
+  }
+}
+
+OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
+  return allocation_status_validating_block(find_block_or_null(ptr), ptr);
+}
+
+size_t OopStorage::allocation_count() const {
+  return _allocation_count;
+}
+
+size_t OopStorage::block_count() const {
+  return _block_count;
+}
+
+size_t OopStorage::empty_block_count() const {
+  return _empty_block_count;
+}
+
+size_t OopStorage::total_memory_usage() const {
+  size_t total_size = sizeof(OopStorage);
+  total_size += strlen(name()) + 1;
+  total_size += block_count() * Block::allocation_size();
+  return total_size;
+}
+
+// Parallel iteration support
+#if INCLUDE_ALL_GCS
+
+static char* not_started_marker_dummy = NULL;
+static void* const not_started_marker = &not_started_marker_dummy;
+
+OopStorage::BasicParState::BasicParState(OopStorage* storage, bool concurrent) :
+  _storage(storage),
+  _next_block(not_started_marker),
+  _concurrent(concurrent)
+{
+  update_iteration_state(true);
+}
+
+OopStorage::BasicParState::~BasicParState() {
+  update_iteration_state(false);
+}
+
+void OopStorage::BasicParState::update_iteration_state(bool value) {
+  if (_concurrent) {
+    MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
+    assert(_storage->_concurrent_iteration_active != value, "precondition");
+    _storage->_concurrent_iteration_active = value;
+  }
+}
+
+void OopStorage::BasicParState::ensure_iteration_started() {
+  if (!_concurrent) assert_at_safepoint();
+  assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant");
+  // Ensure _next_block is not the not_started_marker, setting it to
+  // the _active_head to start the iteration if necessary.
+  if (OrderAccess::load_acquire(&_next_block) == not_started_marker) {
+    Atomic::cmpxchg(_storage->_active_head, &_next_block, not_started_marker);
+  }
+  assert(_next_block != not_started_marker, "postcondition");
+}
+
+OopStorage::Block* OopStorage::BasicParState::claim_next_block() {
+  assert(_next_block != not_started_marker, "Iteration not started");
+  void* next = _next_block;
+  while (next != NULL) {
+    void* new_next = _storage->_active_list.next(*static_cast<Block*>(next));
+    void* fetched = Atomic::cmpxchg(new_next, &_next_block, next);
+    if (fetched == next) break; // Claimed.
+    next = fetched;
+  }
+  return static_cast<Block*>(next);
+}
+
+#endif // INCLUDE_ALL_GCS
+
+const char* OopStorage::name() const { return _name; }
+
+#ifndef PRODUCT
+
+void OopStorage::print_on(outputStream* st) const {
+  size_t allocations = _allocation_count;
+  size_t blocks = _block_count;
+  size_t empties = _empty_block_count;
+  // Comparison is being careful about racy accesses.
+  size_t used = (blocks < empties) ? 0 : (blocks - empties);
+
+  double data_size = section_size * section_count;
+  double alloc_percentage = percent_of((double)allocations, used * data_size);
+
+  st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), "
+            SIZE_FORMAT " empties, " SIZE_FORMAT " bytes",
+            name(), allocations, used, alloc_percentage,
+            empties, total_memory_usage());
+  if (_concurrent_iteration_active) {
+    st->print(", concurrent iteration active");
+  }
+}
+
+#endif // !PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
+#define SHARE_GC_SHARED_OOPSTORAGE_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class Mutex;
+class outputStream;
+
+// OopStorage supports management of off-heap references to objects allocated
+// in the Java heap.  An OopStorage object provides a set of Java object
+// references (oop values), which clients refer to via oop* handles to the
+// associated OopStorage entries.  Clients allocate entries to create a
+// (possibly weak) reference to a Java object, use that reference, and release
+// the reference when no longer needed.
+//
+// The garbage collector must know about all OopStorage objects and their
+// reference strength.  OopStorage provides the garbage collector with support
+// for iteration over all the allocated entries.
+//
+// There are several categories of interaction with an OopStorage object.
+//
+// (1) allocation and release of entries, by the mutator or the VM.
+// (2) iteration by the garbage collector, possibly concurrent with mutator.
+// (3) iteration by other, non-GC, tools (only at safepoints).
+// (4) cleanup of unused internal storage, possibly concurrent with mutator.
+//
+// A goal of OopStorage is to make these interactions thread-safe, while
+// minimizing potential lock contention issues within and between these
+// categories.  In particular, support for concurrent iteration by the garbage
+// collector, under certain restrictions, is required.  Further, it must not
+// block nor be blocked by other operations for long periods.
+//
+// Internally, OopStorage is a set of Block objects, from which entries are
+// allocated and released.  A block contains an oop[] and a bitmask indicating
+// which entries are in use (have been allocated and not yet released).  New
+// blocks are constructed and added to the storage object when an entry
+// allocation request is made and there are no blocks with unused entries.
+// Blocks may be removed and deleted when empty.
+//
+// There are two important (and somewhat intertwined) protocols governing
+// concurrent access to a storage object.  These are the Concurrent Iteration
+// Protocol and the Allocation Protocol.  See the ParState class for a
+// discussion of concurrent iteration and the management of thread
+// interactions for this protocol.  Similarly, see the allocate() function for
+// a discussion of allocation.
+
+class OopStorage : public CHeapObj<mtGC> {
+public:
+  OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex);
+  ~OopStorage();
+
+  // These count and usage accessors are racy unless at a safepoint.
+
+  // The number of allocated and not yet released entries.
+  size_t allocation_count() const;
+
+  // The number of blocks of entries.  Useful for sizing parallel iteration.
+  size_t block_count() const;
+
+  // The number of blocks with no allocated entries.  Useful for sizing
+  // parallel iteration and scheduling block deletion.
+  size_t empty_block_count() const;
+
+  // Total number of blocks * memory allocation per block, plus
+  // bookkeeping overhead, including this storage object.
+  size_t total_memory_usage() const;
+
+  enum EntryStatus {
+    INVALID_ENTRY,
+    UNALLOCATED_ENTRY,
+    ALLOCATED_ENTRY
+  };
+
+  // Locks _allocate_mutex.
+  // precondition: ptr != NULL.
+  EntryStatus allocation_status(const oop* ptr) const;
+
+  // Allocates and returns a new entry.  Returns NULL if memory allocation
+  // failed.  Locks _allocate_mutex.
+  // postcondition: *result == NULL.
+  oop* allocate();
+
+  // Deallocates ptr, after setting its value to NULL. Locks _allocate_mutex.
+  // precondition: ptr is a valid allocated entry.
+  // precondition: *ptr == NULL.
+  void release(const oop* ptr);
+
+  // Releases all the ptrs.  Possibly faster than individual calls to
+  // release(oop*).  Best if ptrs is sorted by address.  Locks
+  // _allocate_mutex.
+  // precondition: All elements of ptrs are valid allocated entries.
+  // precondition: *ptrs[i] == NULL, for i in [0,size).
+  void release(const oop* const* ptrs, size_t size);
+
+  // Applies f to each allocated entry's location.  f must be a function or
+  // function object.  Assume p is either a const oop* or an oop*, depending
+  // on whether the associated storage is const or non-const, respectively.
+  // Then f(p) must be a valid expression.  The result of invoking f(p) must
+  // be implicitly convertible to bool.  Iteration terminates and returns
+  // false if any invocation of f returns false.  Otherwise, the result of
+  // iteration is true.
+  // precondition: at safepoint.
+  template<typename F> inline bool iterate_safepoint(F f);
+  template<typename F> inline bool iterate_safepoint(F f) const;
+
+  // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
+  // an adaptation layer allowing the use of existing is-alive closures and
+  // OopClosures.  Assume p is either const oop* or oop*, depending on whether
+  // the associated storage is const or non-const, respectively.  Then
+  //
+  // - closure->do_oop(p) must be a valid expression whose value is ignored.
+  //
+  // - is_alive->do_object_b(*p) must be a valid expression whose value is
+  // convertible to bool.
+  //
+  // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
+  // invoked for p.  If is_alive->do_object_b(*p) is false, then closure will
+  // not be invoked on p, and *p will be set to NULL.
+
+  template<typename Closure> inline void oops_do(Closure* closure);
+  template<typename Closure> inline void oops_do(Closure* closure) const;
+  template<typename Closure> inline void weak_oops_do(Closure* closure);
+
+  template<typename IsAliveClosure, typename Closure>
+  inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
+
+#if INCLUDE_ALL_GCS
+  // Parallel iteration is for the exclusive use of the GC.
+  // Other clients must use serial iteration.
+  template<bool concurrent, bool is_const> class ParState;
+#endif // INCLUDE_ALL_GCS
+
+  // Block cleanup functions are for the exclusive use of the GC.
+  // Both stop deleting if there is an in-progress concurrent iteration.
+  // Concurrent deletion locks both the allocate_mutex and the active_mutex.
+  void delete_empty_blocks_safepoint(size_t retain = 1);
+  void delete_empty_blocks_concurrent(size_t retain = 1);
+
+  // Debugging and logging support.
+  const char* name() const;
+  void print_on(outputStream* st) const PRODUCT_RETURN;
+
+  // Provides access to storage internals, for unit testing.
+  // Declare, but not define, the public class OopStorage::TestAccess.
+  // That class is defined as part of the unit-test. It "exports" the needed
+  // private types by providing public typedefs for them.
+  class TestAccess;
+
+  // xlC on AIX can't compile test_oopStorage.cpp with following private
+  // classes. C++03 introduced access for nested classes with DR45, but xlC
+  // version 12 rejects it.
+NOT_AIX( private: )
+  class Block;                  // Forward decl; defined in .inline.hpp file.
+  class BlockList;              // Forward decl for BlockEntry friend decl.
+
+  class BlockEntry VALUE_OBJ_CLASS_SPEC {
+    friend class BlockList;
+
+    // Members are mutable, and we deal exclusively with pointers to
+    // const, to make const blocks easier to use; a block being const
+    // doesn't prevent modifying its list state.
+    mutable const Block* _prev;
+    mutable const Block* _next;
+
+    // Noncopyable.
+    BlockEntry(const BlockEntry&);
+    BlockEntry& operator=(const BlockEntry&);
+
+  public:
+    BlockEntry();
+    ~BlockEntry();
+  };
+
+  class BlockList VALUE_OBJ_CLASS_SPEC {
+    const Block* _head;
+    const Block* _tail;
+    const BlockEntry& (*_get_entry)(const Block& block);
+
+    // Noncopyable.
+    BlockList(const BlockList&);
+    BlockList& operator=(const BlockList&);
+
+  public:
+    BlockList(const BlockEntry& (*get_entry)(const Block& block));
+    ~BlockList();
+
+    Block* head();
+    const Block* chead() const;
+    const Block* ctail() const;
+
+    Block* prev(Block& block);
+    Block* next(Block& block);
+
+    const Block* prev(const Block& block) const;
+    const Block* next(const Block& block) const;
+
+    void push_front(const Block& block);
+    void push_back(const Block& block);
+    void unlink(const Block& block);
+  };
+
+private:
+  const char* _name;
+  BlockList _active_list;
+  BlockList _allocate_list;
+  Block* volatile _active_head;
+
+  Mutex* _allocate_mutex;
+  Mutex* _active_mutex;
+
+  // Counts are volatile for racy unlocked accesses.
+  volatile size_t _allocation_count;
+  volatile size_t _block_count;
+  volatile size_t _empty_block_count;
+  // mutable because this gets set even for const iteration.
+  mutable bool _concurrent_iteration_active;
+
+  Block* find_block_or_null(const oop* ptr) const;
+  bool is_valid_block_locked_or_safepoint(const Block* block) const;
+  EntryStatus allocation_status_validating_block(const Block* block, const oop* ptr) const;
+  void check_release(const Block* block, const oop* ptr) const NOT_DEBUG_RETURN;
+  void release_from_block(Block& block, uintx release_bitmask);
+  void delete_empty_block(const Block& block);
+
+  static void assert_at_safepoint() NOT_DEBUG_RETURN;
+
+  template<typename F, typename Storage>
+  static bool iterate_impl(F f, Storage* storage);
+
+#if INCLUDE_ALL_GCS
+  // Implementation support for parallel iteration
+  class BasicParState;
+#endif // INCLUDE_ALL_GCS
+
+  // Wrapper for OopClosure-style function, so it can be used with
+  // iterate.  Assume p is of type oop*.  Then cl->do_oop(p) must be a
+  // valid expression whose value may be ignored.
+  template<typename Closure> class OopFn;
+  template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl);
+
+  // Wrapper for BoolObjectClosure + iteration handler pair, so they
+  // can be used with iterate.
+  template<typename IsAlive, typename F> class IfAliveFn;
+  template<typename IsAlive, typename F>
+  static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
+
+  // Wrapper for iteration handler, automatically skipping NULL entries.
+  template<typename F> class SkipNullFn;
+  template<typename F> static SkipNullFn<F> skip_null_fn(F f);
+};
+
+#endif // include guard
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
+#define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
+
+#include "gc/shared/oopStorage.hpp"
+#include "memory/allocation.hpp"
+#include "metaprogramming/conditional.hpp"
+#include "metaprogramming/isConst.hpp"
+#include "oops/oop.hpp"
+#include "utilities/count_trailing_zeros.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
+  // _data must be the first non-static data member, for alignment.
+  oop _data[BitsPerWord];
+  static const unsigned _data_pos = 0; // Position of _data.
+
+  volatile uintx _allocated_bitmask; // One bit per _data element.
+  const OopStorage* _owner;
+  void* _memory;              // Unaligned storage containing block.
+  BlockEntry _active_entry;
+  BlockEntry _allocate_entry;
+
+  Block(const OopStorage* owner, void* memory);
+  ~Block();
+
+  void check_index(unsigned index) const;
+  unsigned get_index(const oop* ptr) const;
+
+  template<typename F, typename BlockPtr>
+  static bool iterate_impl(F f, BlockPtr b);
+
+  // Noncopyable.
+  Block(const Block&);
+  Block& operator=(const Block&);
+
+public:
+  static const BlockEntry& get_active_entry(const Block& block);
+  static const BlockEntry& get_allocate_entry(const Block& block);
+
+  static size_t allocation_size();
+  static size_t allocation_alignment_shift();
+
+  oop* get_pointer(unsigned index);
+  const oop* get_pointer(unsigned index) const;
+
+  uintx bitmask_for_index(unsigned index) const;
+  uintx bitmask_for_entry(const oop* ptr) const;
+
+  // Allocation bitmask accessors are racy.
+  bool is_full() const;
+  bool is_empty() const;
+  uintx allocated_bitmask() const;
+  uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value);
+
+  bool contains(const oop* ptr) const;
+
+  // Returns NULL if ptr is not in a block or not allocated in that block.
+  static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
+
+  oop* allocate();
+  static Block* new_block(const OopStorage* owner);
+  static void delete_block(const Block& block);
+
+  template<typename F> bool iterate(F f);
+  template<typename F> bool iterate(F f) const;
+}; // class Block
+
+inline OopStorage::Block* OopStorage::BlockList::head() {
+  return const_cast<Block*>(_head);
+}
+
+inline const OopStorage::Block* OopStorage::BlockList::chead() const {
+  return _head;
+}
+
+inline const OopStorage::Block* OopStorage::BlockList::ctail() const {
+  return _tail;
+}
+
+inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) {
+  return const_cast<Block*>(_get_entry(block)._prev);
+}
+
+inline OopStorage::Block* OopStorage::BlockList::next(Block& block) {
+  return const_cast<Block*>(_get_entry(block)._next);
+}
+
+inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const {
+  return _get_entry(block)._prev;
+}
+
+inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const {
+  return _get_entry(block)._next;
+}
+
+template<typename Closure>
+class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC {
+public:
+  explicit OopFn(Closure* cl) : _cl(cl) {}
+
+  template<typename OopPtr>     // [const] oop*
+  bool operator()(OopPtr ptr) const {
+    _cl->do_oop(ptr);
+    return true;
+  }
+
+private:
+  Closure* _cl;
+};
+
+template<typename Closure>
+inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
+  return OopFn<Closure>(cl);
+}
+
+template<typename IsAlive, typename F>
+class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC {
+public:
+  IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
+
+  bool operator()(oop* ptr) const {
+    bool result = true;
+    oop v = *ptr;
+    if (v != NULL) {
+      if (_is_alive->do_object_b(v)) {
+        result = _f(ptr);
+      } else {
+        *ptr = NULL;            // Clear dead value.
+      }
+    }
+    return result;
+  }
+
+private:
+  IsAlive* _is_alive;
+  F _f;
+};
+
+template<typename IsAlive, typename F>
+inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
+  return IfAliveFn<IsAlive, F>(is_alive, f);
+}
+
+template<typename F>
+class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC {
+public:
+  SkipNullFn(F f) : _f(f) {}
+
+  template<typename OopPtr>     // [const] oop*
+  bool operator()(OopPtr ptr) const {
+    return (*ptr != NULL) ? _f(ptr) : true;
+  }
+
+private:
+  F _f;
+};
+
+template<typename F>
+inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
+  return SkipNullFn<F>(f);
+}
+
+// Inline Block accesses for use in iteration inner loop.
+
+inline void OopStorage::Block::check_index(unsigned index) const {
+  assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
+}
+
+inline oop* OopStorage::Block::get_pointer(unsigned index) {
+  check_index(index);
+  return &_data[index];
+}
+
+inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
+  check_index(index);
+  return &_data[index];
+}
+
+inline uintx OopStorage::Block::allocated_bitmask() const {
+  return _allocated_bitmask;
+}
+
+inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
+  check_index(index);
+  return uintx(1) << index;
+}
+
+// Provide const or non-const iteration, depending on whether BlockPtr
+// is const Block* or Block*, respectively.
+template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
+inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
+  uintx bitmask = block->allocated_bitmask();
+  while (bitmask != 0) {
+    unsigned index = count_trailing_zeros(bitmask);
+    bitmask ^= block->bitmask_for_index(index);
+    if (!f(block->get_pointer(index))) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template<typename F>
+inline bool OopStorage::Block::iterate(F f) {
+  return iterate_impl(f, this);
+}
+
+template<typename F>
+inline bool OopStorage::Block::iterate(F f) const {
+  return iterate_impl(f, this);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Support for serial iteration, always at a safepoint.
+
+// Provide const or non-const iteration, depending on whether Storage is
+// const OopStorage* or OopStorage*, respectively.
+template<typename F, typename Storage> // Storage := [const] OopStorage
+inline bool OopStorage::iterate_impl(F f, Storage* storage) {
+  assert_at_safepoint();
+  // Propagate const/non-const iteration to the block layer, by using
+  // const or non-const blocks as corresponding to Storage.
+  typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
+  for (BlockPtr block = storage->_active_head;
+       block != NULL;
+       block = storage->_active_list.next(*block)) {
+    if (!block->iterate(f)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template<typename F>
+inline bool OopStorage::iterate_safepoint(F f) {
+  return iterate_impl(f, this);
+}
+
+template<typename F>
+inline bool OopStorage::iterate_safepoint(F f) const {
+  return iterate_impl(f, this);
+}
+
+template<typename Closure>
+inline void OopStorage::oops_do(Closure* cl) {
+  iterate_safepoint(oop_fn(cl));
+}
+
+template<typename Closure>
+inline void OopStorage::oops_do(Closure* cl) const {
+  iterate_safepoint(oop_fn(cl));
+}
+
+template<typename Closure>
+inline void OopStorage::weak_oops_do(Closure* cl) {
+  iterate_safepoint(skip_null_fn(oop_fn(cl)));
+}
+
+template<typename IsAliveClosure, typename Closure>
+inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
+  iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
+}
+
+#endif // include guard
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_OOPSTORAGEPARSTATE_INLINE_HPP
+#define SHARE_GC_SHARED_OOPSTORAGEPARSTATE_INLINE_HPP
+
+#include "gc/shared/oopStorage.inline.hpp"
+#include "memory/allocation.hpp"
+#include "metaprogramming/conditional.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_ALL_GCS
+
+//////////////////////////////////////////////////////////////////////////////
+// Support for parallel and optionally concurrent state iteration.
+//
+// Parallel iteration is for the exclusive use of the GC.  Other iteration
+// clients must use serial iteration.
+//
+// Concurrent Iteration
+//
+// Iteration involves the _active_list, which contains all of the blocks owned
+// by a storage object.  This is a doubly-linked list, linked through
+// dedicated fields in the blocks.
+//
+// At most one concurrent ParState can exist at a time for a given storage
+// object.
+//
+// A concurrent ParState sets the associated storage's
+// _concurrent_iteration_active flag true when the state is constructed, and
+// sets it false when the state is destroyed.  These assignments are made with
+// _active_mutex locked.  Meanwhile, empty block deletion is not done while
+// _concurrent_iteration_active is true.  The flag check and the dependent
+// removal of a block from the _active_list is performed with _active_mutex
+// locked.  This prevents concurrent iteration and empty block deletion from
+// interfering with with each other.
+//
+// Both allocate() and delete_empty_blocks_concurrent() lock the
+// _allocate_mutex while performing their respective list manipulations,
+// preventing them from interfering with each other.
+//
+// When allocate() creates a new block, it is added to the front of the
+// _active_list.  Then _active_head is set to the new block.  When concurrent
+// iteration is started (by a parallel worker thread calling the state's
+// iterate() function), the current _active_head is used as the initial block
+// for the iteration, with iteration proceeding down the list headed by that
+// block.
+//
+// As a result, the list over which concurrent iteration operates is stable.
+// However, once the iteration is started, later allocations may add blocks to
+// the front of the list that won't be examined by the iteration.  And while
+// the list is stable, concurrent allocate() and release() operations may
+// change the set of allocated entries in a block at any time during the
+// iteration.
+//
+// As a result, a concurrent iteration handler must accept that some
+// allocations and releases that occur after the iteration started will not be
+// seen by the iteration.  Further, some may overlap examination by the
+// iteration.  To help with this, allocate() and release() have an invariant
+// that an entry's value must be NULL when it is not in use.
+//
+// An in-progress delete_empty_blocks_concurrent() operation can contend with
+// the start of a concurrent iteration over the _active_mutex.  Since both are
+// under GC control, that potential contention can be eliminated by never
+// scheduling both operations to run at the same time.
+//
+// ParState<concurrent, is_const>
+//   concurrent must be true if iteration is concurrent with the
+//   mutator, false if iteration is at a safepoint.
+//
+//   is_const must be true if the iteration is over a constant storage
+//   object, false if the iteration may modify the storage object.
+//
+// ParState([const] OopStorage* storage)
+//   Construct an object for managing an iteration over storage.  For a
+//   concurrent ParState, empty block deletion for the associated storage
+//   is inhibited for the life of the ParState.  There can be no more
+//   than one live concurrent ParState at a time for a given storage object.
+//
+// template<typename F> void iterate(F f)
+//   Repeatedly claims a block from the associated storage that has
+//   not been processed by this iteration (possibly by other threads),
+//   and applies f to each entry in the claimed block. Assume p is of
+//   type const oop* or oop*, according to is_const. Then f(p) must be
+//   a valid expression whose value is ignored.  Concurrent uses must
+//   be prepared for an entry's value to change at any time, due to
+//   mutator activity.
+//
+// template<typename Closure> void oops_do(Closure* cl)
+//   Wrapper around iterate, providing an adaptation layer allowing
+//   the use of OopClosures and similar objects for iteration.  Assume
+//   p is of type const oop* or oop*, according to is_const.  Then
+//   cl->do_oop(p) must be a valid expression whose value is ignored.
+//   Concurrent uses must be prepared for the entry's value to change
+//   at any time, due to mutator activity.
+//
+// Optional operations, provided only if !concurrent && !is_const.
+// These are not provided when is_const, because the storage object
+// may be modified by the iteration infrastructure, even if the
+// provided closure doesn't modify the storage object.  These are not
+// provided when concurrent because any pre-filtering behavior by the
+// iteration infrastructure is inappropriate for concurrent iteration;
+// modifications of the storage by the mutator could result in the
+// pre-filtering being applied (successfully or not) to objects that
+// are unrelated to what the closure finds in the entry.
+//
+// template<typename Closure> void weak_oops_do(Closure* cl)
+// template<typename IsAliveClosure, typename Closure>
+// void weak_oops_do(IsAliveClosure* is_alive, Closure* cl)
+//   Wrappers around iterate, providing an adaptation layer allowing
+//   the use of is-alive closures and OopClosures for iteration.
+//   Assume p is of type oop*.  Then
+//
+//   - cl->do_oop(p) must be a valid expression whose value is ignored.
+//
+//   - is_alive->do_object_b(*p) must be a valid expression whose value
+//   is convertible to bool.
+//
+//   If *p == NULL then neither is_alive nor cl will be invoked for p.
+//   If is_alive->do_object_b(*p) is false, then cl will not be
+//   invoked on p.
+
+class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC {
+  OopStorage* _storage;
+  void* volatile _next_block;
+  bool _concurrent;
+
+  // Noncopyable.
+  BasicParState(const BasicParState&);
+  BasicParState& operator=(const BasicParState&);
+
+  void update_iteration_state(bool value);
+  void ensure_iteration_started();
+  Block* claim_next_block();
+
+  // Wrapper for iteration handler; ignore handler result and return true.
+  template<typename F> class AlwaysTrueFn;
+
+public:
+  BasicParState(OopStorage* storage, bool concurrent);
+  ~BasicParState();
+
+  template<bool is_const, typename F> void iterate(F f) {
+    // Wrap f in ATF so we can use Block::iterate.
+    AlwaysTrueFn<F> atf_f(f);
+    ensure_iteration_started();
+    typename Conditional<is_const, const Block*, Block*>::type block;
+    while ((block = claim_next_block()) != NULL) {
+      block->iterate(atf_f);
+    }
+  }
+};
+
+template<typename F>
+class OopStorage::BasicParState::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC {
+  F _f;
+
+public:
+  AlwaysTrueFn(F f) : _f(f) {}
+
+  template<typename OopPtr>     // [const] oop*
+  bool operator()(OopPtr ptr) const { _f(ptr); return true; }
+};
+
+template<bool concurrent, bool is_const>
+class OopStorage::ParState VALUE_OBJ_CLASS_SPEC {
+  BasicParState _basic_state;
+
+public:
+  ParState(const OopStorage* storage) :
+    // For simplicity, always recorded as non-const.
+    _basic_state(const_cast<OopStorage*>(storage), concurrent)
+  {}
+
+  template<typename F>
+  void iterate(F f) {
+    _basic_state.template iterate<is_const>(f);
+  }
+
+  template<typename Closure>
+  void oops_do(Closure* cl) {
+    this->iterate(oop_fn(cl));
+  }
+};
+
+template<>
+class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC {
+  BasicParState _basic_state;
+
+public:
+  ParState(OopStorage* storage) :
+    _basic_state(storage, false)
+  {}
+
+  template<typename F>
+  void iterate(F f) {
+    _basic_state.template iterate<false>(f);
+  }
+
+  template<typename Closure>
+  void oops_do(Closure* cl) {
+    this->iterate(oop_fn(cl));
+  }
+
+  template<typename Closure>
+  void weak_oops_do(Closure* cl) {
+    this->iterate(skip_null_fn(oop_fn(cl)));
+  }
+
+  template<typename IsAliveClosure, typename Closure>
+  void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
+    this->iterate(if_alive_fn(is_alive, oop_fn(cl)));
+  }
+};
+
+#endif // INCLUDE_ALL_GCS
+
+#endif // include guard
--- a/src/hotspot/share/interpreter/bytecode.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/bytecode.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -207,8 +207,7 @@
 
 BasicType Bytecode_loadconstant::result_type() const {
   int index = pool_index();
-  constantTag tag = _method->constants()->tag_at(index);
-  return tag.basic_type();
+  return _method->constants()->basic_type_for_constant_at(index);
 }
 
 oop Bytecode_loadconstant::resolve_constant(TRAPS) const {
@@ -217,6 +216,8 @@
   ConstantPool* constants = _method->constants();
   if (has_cache_index()) {
     return constants->resolve_cached_constant_at(index, THREAD);
+  } else if (_method->constants()->tag_at(index).is_dynamic_constant()) {
+    return constants->resolve_possibly_cached_constant_at(index, THREAD);
   } else {
     return constants->resolve_constant_at(index, THREAD);
   }
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -2368,6 +2368,30 @@
             THREAD->set_vm_result(NULL);
             break;
 
+          case JVM_CONSTANT_Dynamic:
+            {
+              oop result = constants->resolved_references()->obj_at(index);
+              if (result == NULL) {
+                CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
+                result = THREAD->vm_result();
+              }
+              VERIFY_OOP(result);
+
+              jvalue value;
+              BasicType type = java_lang_boxing_object::get_value(result, &value);
+              switch (type) {
+              case T_FLOAT:   SET_STACK_FLOAT(value.f, 0); break;
+              case T_INT:     SET_STACK_INT(value.i, 0); break;
+              case T_SHORT:   SET_STACK_INT(value.s, 0); break;
+              case T_BYTE:    SET_STACK_INT(value.b, 0); break;
+              case T_CHAR:    SET_STACK_INT(value.c, 0); break;
+              case T_BOOLEAN: SET_STACK_INT(value.z, 0); break;
+              default:  ShouldNotReachHere();
+              }
+
+              break;
+            }
+
           default:  ShouldNotReachHere();
           }
           UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
@@ -2387,6 +2411,27 @@
           case JVM_CONSTANT_Double:
              SET_STACK_DOUBLE(constants->double_at(index), 1);
             break;
+
+          case JVM_CONSTANT_Dynamic:
+            {
+              oop result = constants->resolved_references()->obj_at(index);
+              if (result == NULL) {
+                CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
+                result = THREAD->vm_result();
+              }
+              VERIFY_OOP(result);
+
+              jvalue value;
+              BasicType type = java_lang_boxing_object::get_value(result, &value);
+              switch (type) {
+              case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break;
+              case T_LONG:   SET_STACK_LONG(value.j, 1); break;
+              default:  ShouldNotReachHere();
+              }
+
+              break;
+            }
+
           default:  ShouldNotReachHere();
           }
           UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
@@ -2404,7 +2449,7 @@
           incr = 3;
         }
 
-        // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+        // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
         // This kind of CP cache entry does not need to match the flags byte, because
         // there is a 1-1 relation between bytecode type and CP entry type.
         ConstantPool* constants = METHOD->constants();
@@ -2414,6 +2459,8 @@
                   handle_exception);
           result = THREAD->vm_result();
         }
+        if (result == Universe::the_null_sentinel())
+          result = NULL;
 
         VERIFY_OOP(result);
         SET_STACK_OBJECT(result, 0);
@@ -2425,7 +2472,7 @@
         u4 index = Bytes::get_native_u4(pc+1);
         ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
 
-        // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
+        // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
         // This kind of CP cache entry does not need to match the flags byte, because
         // there is a 1-1 relation between bytecode type and CP entry type.
         if (! cache->is_resolved((Bytecodes::Code) opcode)) {
--- a/src/hotspot/share/interpreter/bytecodeTracer.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -367,6 +367,7 @@
   case JVM_CONSTANT_Fieldref:
     break;
   case JVM_CONSTANT_NameAndType:
+  case JVM_CONSTANT_Dynamic:
   case JVM_CONSTANT_InvokeDynamic:
     has_klass = false;
     break;
@@ -382,7 +383,7 @@
     Symbol* klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i));
     st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
   } else {
-    if (tag.is_invoke_dynamic()) {
+    if (tag.is_dynamic_constant() || tag.is_invoke_dynamic()) {
       int bsm = constants->invoke_dynamic_bootstrap_method_ref_index_at(i);
       st->print(" bsm=%d", bsm);
     }
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -118,22 +118,54 @@
 IRT_END
 
 IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) {
-  assert(bytecode == Bytecodes::_fast_aldc ||
+  assert(bytecode == Bytecodes::_ldc ||
+         bytecode == Bytecodes::_ldc_w ||
+         bytecode == Bytecodes::_ldc2_w ||
+         bytecode == Bytecodes::_fast_aldc ||
          bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
   ResourceMark rm(thread);
+  const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc ||
+                             bytecode == Bytecodes::_fast_aldc_w);
   LastFrameAccessor last_frame(thread);
   methodHandle m (thread, last_frame.method());
   Bytecode_loadconstant ldc(m, last_frame.bci());
+
+  // Double-check the size.  (Condy can have any type.)
+  BasicType type = ldc.result_type();
+  switch (type2size[type]) {
+  case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break;
+  case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break;
+  default: ShouldNotReachHere();
+  }
+
+  // Resolve the constant.  This does not do unboxing.
+  // But it does replace Universe::the_null_sentinel by null.
   oop result = ldc.resolve_constant(CHECK);
+  assert(result != NULL || is_fast_aldc, "null result only valid for fast_aldc");
+
 #ifdef ASSERT
   {
     // The bytecode wrappers aren't GC-safe so construct a new one
     Bytecode_loadconstant ldc2(m, last_frame.bci());
-    oop coop = m->constants()->resolved_references()->obj_at(ldc2.cache_index());
-    assert(result == coop, "expected result for assembly code");
+    int rindex = ldc2.cache_index();
+    if (rindex < 0)
+      rindex = m->constants()->cp_to_object_index(ldc2.pool_index());
+    if (rindex >= 0) {
+      oop coop = m->constants()->resolved_references()->obj_at(rindex);
+      oop roop = (result == NULL ? Universe::the_null_sentinel() : result);
+      assert(roop == coop, "expected result for assembly code");
+    }
   }
 #endif
   thread->set_vm_result(result);
+  if (!is_fast_aldc) {
+    // Tell the interpreter how to unbox the primitive.
+    guarantee(java_lang_boxing_object::is_instance(result, type), "");
+    int offset = java_lang_boxing_object::value_offset_in_bytes(type);
+    intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift)
+                      | (offset & ConstantPoolCacheEntry::field_index_mask));
+    thread->set_vm_result_2((Metadata*)flags);
+  }
 }
 IRT_END
 
--- a/src/hotspot/share/interpreter/linkResolver.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/linkResolver.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -41,6 +41,7 @@
 #include "memory/universe.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
+#include "oops/objArrayKlass.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/methodHandles.hpp"
@@ -54,7 +55,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 
-
 //------------------------------------------------------------------------------------------------------------------------
 // Implementation of CallInfo
 
@@ -284,20 +284,32 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Klass resolution
 
-void LinkResolver::check_klass_accessability(Klass* ref_klass, Klass* sel_klass, TRAPS) {
+void LinkResolver::check_klass_accessability(Klass* ref_klass, Klass* sel_klass,
+                                             bool fold_type_to_class, TRAPS) {
+  Klass* base_klass = sel_klass;
+  if (fold_type_to_class) {
+    if (sel_klass->is_objArray_klass()) {
+      base_klass = ObjArrayKlass::cast(sel_klass)->bottom_klass();
+    }
+    // The element type could be a typeArray - we only need the access
+    // check if it is an reference to another class.
+    if (!base_klass->is_instance_klass()) {
+      return;  // no relevant check to do
+    }
+  }
   Reflection::VerifyClassAccessResults vca_result =
-    Reflection::verify_class_access(ref_klass, InstanceKlass::cast(sel_klass), true);
+    Reflection::verify_class_access(ref_klass, InstanceKlass::cast(base_klass), true);
   if (vca_result != Reflection::ACCESS_OK) {
     ResourceMark rm(THREAD);
     char* msg = Reflection::verify_class_access_msg(ref_klass,
-                                                    InstanceKlass::cast(sel_klass),
+                                                    InstanceKlass::cast(base_klass),
                                                     vca_result);
     if (msg == NULL) {
       Exceptions::fthrow(
         THREAD_AND_LOCATION,
         vmSymbols::java_lang_IllegalAccessError(),
         "failed to access class %s from class %s",
-        sel_klass->external_name(),
+        base_klass->external_name(),
         ref_klass->external_name());
     } else {
       // Use module specific message returned by verify_class_access_msg().
@@ -1663,31 +1675,6 @@
   result.set_handle(resolved_klass, resolved_method, resolved_appendix, resolved_method_type, CHECK);
 }
 
-static void wrap_invokedynamic_exception(TRAPS) {
-  if (HAS_PENDING_EXCEPTION) {
-    // See the "Linking Exceptions" section for the invokedynamic instruction
-    // in JVMS 6.5.
-    if (PENDING_EXCEPTION->is_a(SystemDictionary::Error_klass())) {
-      // Pass through an Error, including BootstrapMethodError, any other form
-      // of linkage error, or say ThreadDeath/OutOfMemoryError
-      if (TraceMethodHandles) {
-        tty->print_cr("invokedynamic passes through an Error for " INTPTR_FORMAT, p2i((void *)PENDING_EXCEPTION));
-        PENDING_EXCEPTION->print();
-      }
-      return;
-    }
-
-    // Otherwise wrap the exception in a BootstrapMethodError
-    if (TraceMethodHandles) {
-      tty->print_cr("invokedynamic throws BSME for " INTPTR_FORMAT, p2i((void *)PENDING_EXCEPTION));
-      PENDING_EXCEPTION->print();
-    }
-    Handle nested_exception(THREAD, PENDING_EXCEPTION);
-    CLEAR_PENDING_EXCEPTION;
-    THROW_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), nested_exception)
-  }
-}
-
 void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
   Symbol* method_name       = pool->name_ref_at(index);
   Symbol* method_signature  = pool->signature_ref_at(index);
@@ -1714,7 +1701,7 @@
     // set the indy_rf flag since any subsequent invokedynamic instruction which shares
     // this bootstrap method will encounter the resolution of MethodHandleInError.
     oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, THREAD);
-    wrap_invokedynamic_exception(CHECK);
+    Exceptions::wrap_dynamic_exception(CHECK);
     assert(bsm_info != NULL, "");
     // FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_InvokeDynamic.
     bootstrap_specifier = Handle(THREAD, bsm_info);
@@ -1724,7 +1711,7 @@
     Handle       appendix(   THREAD, cpce->appendix_if_resolved(pool));
     Handle       method_type(THREAD, cpce->method_type_if_resolved(pool));
     result.set_handle(method, appendix, method_type, THREAD);
-    wrap_invokedynamic_exception(CHECK);
+    Exceptions::wrap_dynamic_exception(CHECK);
     return;
   }
 
@@ -1737,7 +1724,7 @@
     tty->print("  BSM info: "); bootstrap_specifier->print();
   }
 
-  resolve_dynamic_call(result, bootstrap_specifier, method_name,
+  resolve_dynamic_call(result, pool_index, bootstrap_specifier, method_name,
                        method_signature, current_klass, THREAD);
   if (HAS_PENDING_EXCEPTION && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
     int encoded_index = ResolutionErrorTable::encode_cpcache_index(index);
@@ -1753,7 +1740,7 @@
         Handle       appendix(   THREAD, cpce->appendix_if_resolved(pool));
         Handle       method_type(THREAD, cpce->method_type_if_resolved(pool));
         result.set_handle(method, appendix, method_type, THREAD);
-        wrap_invokedynamic_exception(CHECK);
+        Exceptions::wrap_dynamic_exception(CHECK);
       } else {
         assert(cpce->indy_resolution_failed(), "Resolution failure flag not set");
         ConstantPool::throw_resolution_error(pool, encoded_index, CHECK);
@@ -1765,6 +1752,7 @@
 }
 
 void LinkResolver::resolve_dynamic_call(CallInfo& result,
+                                        int pool_index,
                                         Handle bootstrap_specifier,
                                         Symbol* method_name, Symbol* method_signature,
                                         Klass* current_klass,
@@ -1775,12 +1763,13 @@
   Handle       resolved_method_type;
   methodHandle resolved_method =
     SystemDictionary::find_dynamic_call_site_invoker(current_klass,
+                                                     pool_index,
                                                      bootstrap_specifier,
                                                      method_name, method_signature,
                                                      &resolved_appendix,
                                                      &resolved_method_type,
                                                      THREAD);
-  wrap_invokedynamic_exception(CHECK);
+  Exceptions::wrap_dynamic_exception(CHECK);
   result.set_handle(resolved_method, resolved_appendix, resolved_method_type, THREAD);
-  wrap_invokedynamic_exception(CHECK);
+  Exceptions::wrap_dynamic_exception(CHECK);
 }
--- a/src/hotspot/share/interpreter/linkResolver.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/linkResolver.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -274,7 +274,16 @@
                                       const constantPoolHandle& pool, int index, TRAPS);
  public:
   // constant pool resolving
-  static void check_klass_accessability(Klass* ref_klass, Klass* sel_klass, TRAPS);
+  static void check_klass_accessability(Klass* ref_klass, Klass* sel_klass,
+                                        bool fold_type_to_class, TRAPS);
+  // The optional 'fold_type_to_class' means that a derived type (array)
+  // is first converted to the class it is derived from (element type).
+  // If this element type is not a class, then the check passes quietly.
+  // This is usually what is needed, but a few existing uses might break
+  // if this flag were always turned on.  FIXME: See if it can be, always.
+  static void check_klass_accessability(Klass* ref_klass, Klass* sel_klass, TRAPS) {
+    return check_klass_accessability(ref_klass, sel_klass, false, THREAD);
+  }
 
   // static resolving calls (will not run any Java code);
   // used only from Bytecode_invoke::static_target
@@ -306,7 +315,7 @@
                                      bool check_null_and_abstract, TRAPS);
   static void resolve_handle_call   (CallInfo& result,
                                      const LinkInfo& link_info, TRAPS);
-  static void resolve_dynamic_call  (CallInfo& result, Handle bootstrap_specifier,
+  static void resolve_dynamic_call  (CallInfo& result, int pool_index, Handle bootstrap_specifier,
                                      Symbol* method_name, Symbol* method_signature,
                                      Klass* current_klass, TRAPS);
 
--- a/src/hotspot/share/interpreter/rewriter.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/rewriter.hpp"
 #include "memory/metadataFactory.hpp"
-#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/generateOopMap.hpp"
 #include "prims/methodHandles.hpp"
@@ -49,7 +48,11 @@
       case JVM_CONSTANT_Methodref         : // fall through
         add_cp_cache_entry(i);
         break;
-      case JVM_CONSTANT_String:
+      case JVM_CONSTANT_Dynamic:
+        assert(_pool->has_dynamic_constant(), "constant pool's _has_dynamic_constant flag not set");
+        add_resolved_references_entry(i);
+        break;
+      case JVM_CONSTANT_String            : // fall through
       case JVM_CONSTANT_MethodHandle      : // fall through
       case JVM_CONSTANT_MethodType        : // fall through
         add_resolved_references_entry(i);
@@ -322,7 +325,14 @@
     address p = bcp + offset;
     int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
     constantTag tag = _pool->tag_at(cp_index).value();
-    if (tag.is_method_handle() || tag.is_method_type() || tag.is_string()) {
+
+    if (tag.is_method_handle() ||
+        tag.is_method_type() ||
+        tag.is_string() ||
+        (tag.is_dynamic_constant() &&
+         // keep regular ldc interpreter logic for condy primitives
+         is_reference_type(FieldType::basic_type(_pool->uncached_signature_ref_at(cp_index))))
+        ) {
       int ref_index = cp_entry_to_resolved_references(cp_index);
       if (is_wide) {
         (*bcp) = Bytecodes::_fast_aldc_w;
@@ -556,7 +566,7 @@
 
 void Rewriter::rewrite(InstanceKlass* klass, TRAPS) {
   if (!DumpSharedSpaces) {
-    assert(!MetaspaceShared::is_in_shared_space(klass), "archive methods must not be rewritten at run time");
+    assert(!klass->is_shared(), "archive methods must not be rewritten at run time");
   }
   ResourceMark rm(THREAD);
   Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
--- a/src/hotspot/share/interpreter/templateTable.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/templateTable.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -278,7 +278,7 @@
   def(Bytecodes::_sipush              , ubcp|____|____|____, vtos, itos, sipush              ,  _           );
   def(Bytecodes::_ldc                 , ubcp|____|clvm|____, vtos, vtos, ldc                 ,  false       );
   def(Bytecodes::_ldc_w               , ubcp|____|clvm|____, vtos, vtos, ldc                 ,  true        );
-  def(Bytecodes::_ldc2_w              , ubcp|____|____|____, vtos, vtos, ldc2_w              ,  _           );
+  def(Bytecodes::_ldc2_w              , ubcp|____|clvm|____, vtos, vtos, ldc2_w              ,  _           );
   def(Bytecodes::_iload               , ubcp|____|clvm|____, vtos, itos, iload               ,  _           );
   def(Bytecodes::_lload               , ubcp|____|____|____, vtos, ltos, lload               ,  _           );
   def(Bytecodes::_fload               , ubcp|____|____|____, vtos, ftos, fload               ,  _           );
--- a/src/hotspot/share/interpreter/templateTable.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/interpreter/templateTable.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -295,6 +295,7 @@
   static void getstatic(int byte_no);
   static void putstatic(int byte_no);
   static void pop_and_check_object(Register obj);
+  static void condy_helper(Label& Done);  // shared by ldc instances
 
   static void _new();
   static void newarray();
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -766,11 +766,10 @@
 
 C2V_VMENTRY(jboolean, isCompilable,(JNIEnv *, jobject, jobject jvmci_method))
   methodHandle method = CompilerToVM::asMethod(jvmci_method);
-  // Skip redefined methods
-  if (method->is_old()) {
-    return false;
-  }
-  return !method->is_not_compilable(CompLevel_full_optimization);
+  constantPoolHandle cp = method->constMethod()->constants();
+  assert(!cp.is_null(), "npe");
+  // don't inline method when constant pool contains a CONSTANT_Dynamic
+  return !method->is_not_compilable(CompLevel_full_optimization) && !cp->has_dynamic_constant();
 C2V_END
 
 C2V_VMENTRY(jboolean, hasNeverInlineDirective,(JNIEnv *, jobject, jobject jvmci_method))
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -119,6 +119,7 @@
   nonstatic_field(ConstantPool,                _tags,                                  Array<u1>*)                                   \
   nonstatic_field(ConstantPool,                _pool_holder,                           InstanceKlass*)                               \
   nonstatic_field(ConstantPool,                _length,                                int)                                          \
+  nonstatic_field(ConstantPool,                _flags,                                 int)                                          \
                                                                                                                                      \
   nonstatic_field(ConstMethod,                 _constants,                             ConstantPool*)                                \
   nonstatic_field(ConstMethod,                 _flags,                                 u2)                                           \
@@ -415,6 +416,7 @@
   declare_constant(JVM_CONSTANT_UnresolvedClassInError)                   \
   declare_constant(JVM_CONSTANT_MethodHandleInError)                      \
   declare_constant(JVM_CONSTANT_MethodTypeInError)                        \
+  declare_constant(JVM_CONSTANT_DynamicInError)                           \
   declare_constant(JVM_CONSTANT_InternalMax)                              \
                                                                           \
   declare_constant(ArrayData::array_len_off_set)                          \
@@ -452,6 +454,7 @@
   declare_constant(CodeInstaller::INVOKE_INVALID)                         \
                                                                           \
   declare_constant(ConstantPool::CPCACHE_INDEX_TAG)                       \
+  declare_constant(ConstantPool::_has_dynamic_constant)                   \
                                                                           \
   declare_constant(ConstMethod::_has_linenumber_table)                    \
   declare_constant(ConstMethod::_has_localvariable_table)                 \
--- a/src/hotspot/share/logging/logConfiguration.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/logging/logConfiguration.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -296,7 +296,7 @@
   notify_update_listeners();
 }
 
-void LogConfiguration::configure_stdout(LogLevelType level, bool exact_match, ...) {
+void LogConfiguration::configure_stdout(LogLevelType level, int exact_match, ...) {
   size_t i;
   va_list ap;
   LogTagLevelExpression expr;
--- a/src/hotspot/share/logging/logConfiguration.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/logging/logConfiguration.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -102,7 +102,7 @@
   // (exact_match=false is the same as "-Xlog:<tags>*=<level>", and exact_match=true is "-Xlog:<tags>=<level>").
   // Tags should be specified using the LOG_TAGS macro, e.g.
   // LogConfiguration::configure_stdout(LogLevel::<level>, <true/false>, LOG_TAGS(<tags>));
-  static void configure_stdout(LogLevelType level, bool exact_match, ...);
+  static void configure_stdout(LogLevelType level, int exact_match, ...);
 
   // Parse command line configuration. Parameter 'opts' is the string immediately following the -Xlog: argument ("gc" for -Xlog:gc).
   static bool parse_command_line_arguments(const char* opts = "all");
--- a/src/hotspot/share/logging/logTag.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/logging/logTag.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,6 +101,7 @@
   LOG_TAG(objecttagging) \
   LOG_TAG(obsolete) \
   LOG_TAG(oopmap) \
+  LOG_TAG(oopstorage) \
   LOG_TAG(os) \
   LOG_TAG(pagesize) \
   LOG_TAG(patch) \
--- a/src/hotspot/share/logging/logTagLevelExpression.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/logging/logTagLevelExpression.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -40,7 +40,7 @@
   static const size_t MaxCombinations = 256;
 
  private:
-  friend void LogConfiguration::configure_stdout(LogLevelType, bool, ...);
+  friend void LogConfiguration::configure_stdout(LogLevelType, int, ...);
 
   static const char* DefaultExpressionString;
 
--- a/src/hotspot/share/memory/allocation.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/allocation.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,9 @@
 #include "services/memTracker.hpp"
 #include "utilities/ostream.hpp"
 
+void* MetaspaceObj::_shared_metaspace_base = NULL;
+void* MetaspaceObj::_shared_metaspace_top  = NULL;
+
 void* StackObj::operator new(size_t size)     throw() { ShouldNotCallThis(); return 0; }
 void  StackObj::operator delete(void* p)              { ShouldNotCallThis(); }
 void* StackObj::operator new [](size_t size)  throw() { ShouldNotCallThis(); return 0; }
@@ -54,10 +57,6 @@
   return Metaspace::allocate(loader_data, word_size, type, THREAD);
 }
 
-bool MetaspaceObj::is_shared() const {
-  return MetaspaceShared::is_in_shared_space(this);
-}
-
 bool MetaspaceObj::is_metaspace_object() const {
   return Metaspace::contains((void*)this);
 }
--- a/src/hotspot/share/memory/allocation.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/allocation.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,9 +227,23 @@
 class MetaspaceClosure;
 
 class MetaspaceObj {
+  friend class MetaspaceShared;
+  // When CDS is enabled, all shared metaspace objects are mapped
+  // into a single contiguous memory block, so we can use these
+  // two pointers to quickly determine if something is in the
+  // shared metaspace.
+  //
+  // When CDS is not enabled, both pointers are set to NULL.
+  static void* _shared_metaspace_base; // (inclusive) low address
+  static void* _shared_metaspace_top;  // (exclusive) high address
+
  public:
   bool is_metaspace_object() const;
-  bool is_shared() const;
+  bool is_shared() const {
+    // If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will
+    // both be NULL and all values of p will be rejected quickly.
+    return (((void*)this) < _shared_metaspace_top && ((void*)this) >= _shared_metaspace_base);
+  }
   void print_address_on(outputStream* st) const;  // nonvirtual address printing
 
 #define METASPACE_OBJ_TYPES_DO(f) \
--- a/src/hotspot/share/memory/filemap.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/filemap.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -659,7 +659,7 @@
 static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", "OptionalData",
                                             "String1", "String2", "OpenArchive1", "OpenArchive2" };
 
-char* FileMapInfo::map_region(int i) {
+char* FileMapInfo::map_region(int i, char** top_ret) {
   assert(!MetaspaceShared::is_heap_region(i), "sanity");
   struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
@@ -686,6 +686,12 @@
   MemTracker::record_virtual_memory_type((address)base, mtClassShared);
 #endif
 
+
+  if (!verify_region_checksum(i)) {
+    return NULL;
+  }
+
+  *top_ret = base + size;
   return base;
 }
 
@@ -1040,27 +1046,6 @@
   return status;
 }
 
-// The following method is provided to see whether a given pointer
-// falls in the mapped shared metadata space.
-// Param:
-// p, The given pointer
-// Return:
-// True if the p is within the mapped shared space, otherwise, false.
-bool FileMapInfo::is_in_shared_space(const void* p) {
-  for (int i = 0; i < MetaspaceShared::num_non_heap_spaces; i++) {
-    char *base;
-    if (_header->_space[i]._used == 0) {
-      continue;
-    }
-    base = _header->region_addr(i);
-    if (p >= base && p < base + _header->_space[i]._used) {
-      return true;
-    }
-  }
-
-  return false;
-}
-
 // Check if a given address is within one of the shared regions
 bool FileMapInfo::is_in_shared_region(const void* p, int idx) {
   assert(idx == MetaspaceShared::ro ||
--- a/src/hotspot/share/memory/filemap.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/filemap.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -248,7 +248,7 @@
                                     int first_region_id, int max_num_regions);
   void  write_bytes(const void* buffer, int count);
   void  write_bytes_aligned(const void* buffer, int count);
-  char* map_region(int i);
+  char* map_region(int i, char** top_ret);
   void  map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
   void  fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
   void  unmap_region(int i);
@@ -265,8 +265,6 @@
   static void fail_stop(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
   static void fail_continue(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
 
-  // Return true if given address is in the mapped shared space.
-  bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
   bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
   void print_shared_spaces() NOT_CDS_RETURN;
 
--- a/src/hotspot/share/memory/metaspace.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/metaspace.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4070,7 +4070,7 @@
 }
 
 bool Metaspace::contains(const void* ptr) {
-  if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
+  if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
     return true;
   }
   return contains_non_shared(ptr);
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -746,7 +746,7 @@
 }
 
 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
-  assert(is_in_shared_space(m), "must be");
+  assert(is_in_shared_metaspace(m), "must be");
   return CppVtableCloner<Method>::is_valid_shared_object(m);
 }
 
@@ -1819,11 +1819,6 @@
   bool reading() const { return true; }
 };
 
-// Return true if given address is in the mapped shared space.
-bool MetaspaceShared::is_in_shared_space(const void* p) {
-  return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
-}
-
 // Return true if given address is in the misc data region
 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
@@ -1857,35 +1852,46 @@
 
   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 
-  char* _ro_base = NULL;
-  char* _rw_base = NULL;
-  char* _mc_base = NULL;
-  char* _md_base = NULL;
-  char* _od_base = NULL;
+  char* ro_base = NULL; char* ro_top;
+  char* rw_base = NULL; char* rw_top;
+  char* mc_base = NULL; char* mc_top;
+  char* md_base = NULL; char* md_top;
+  char* od_base = NULL; char* od_top;
 
   // Map each shared region
-  if ((_mc_base = mapinfo->map_region(mc)) != NULL &&
-      mapinfo->verify_region_checksum(mc) &&
-      (_rw_base = mapinfo->map_region(rw)) != NULL &&
-      mapinfo->verify_region_checksum(rw) &&
-      (_ro_base = mapinfo->map_region(ro)) != NULL &&
-      mapinfo->verify_region_checksum(ro) &&
-      (_md_base = mapinfo->map_region(md)) != NULL &&
-      mapinfo->verify_region_checksum(md) &&
-      (_od_base = mapinfo->map_region(od)) != NULL &&
-      mapinfo->verify_region_checksum(od) &&
+  if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
+      (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
+      (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
+      (md_base = mapinfo->map_region(md, &md_top)) != NULL &&
+      (od_base = mapinfo->map_region(od, &od_top)) != NULL &&
       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
       mapinfo->validate_classpath_entry_table()) {
-    // Success (no need to do anything)
+    // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
+    // fast checking in MetaspaceShared::is_in_shared_metaspace() and
+    // MetaspaceObj::is_shared().
+    //
+    // We require that mc->rw->ro->md->od to be laid out consecutively, with no
+    // gaps between them. That way, we can ensure that the OS won't be able to
+    // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
+    // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
+    assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be");
+    assert(od_top  > ro_top  && od_top  > rw_top  && od_top  > md_top  && od_top  > mc_top , "must be");
+    assert(mc_top == rw_base, "must be");
+    assert(rw_top == ro_base, "must be");
+    assert(ro_top == md_base, "must be");
+    assert(md_top == od_base, "must be");
+
+    MetaspaceObj::_shared_metaspace_base = (void*)mc_base;
+    MetaspaceObj::_shared_metaspace_top  = (void*)od_top;
     return true;
   } else {
     // If there was a failure in mapping any of the spaces, unmap the ones
     // that succeeded
-    if (_ro_base != NULL) mapinfo->unmap_region(ro);
-    if (_rw_base != NULL) mapinfo->unmap_region(rw);
-    if (_mc_base != NULL) mapinfo->unmap_region(mc);
-    if (_md_base != NULL) mapinfo->unmap_region(md);
-    if (_od_base != NULL) mapinfo->unmap_region(od);
+    if (ro_base != NULL) mapinfo->unmap_region(ro);
+    if (rw_base != NULL) mapinfo->unmap_region(rw);
+    if (mc_base != NULL) mapinfo->unmap_region(mc);
+    if (md_base != NULL) mapinfo->unmap_region(md);
+    if (od_base != NULL) mapinfo->unmap_region(od);
 #ifndef _WINDOWS
     // Release the entire mapped region
     shared_rs.release();
--- a/src/hotspot/share/memory/metaspaceShared.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/metaspaceShared.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -164,8 +164,13 @@
   static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
   static void initialize_shared_spaces() NOT_CDS_RETURN;
 
-  // Return true if given address is in the mapped shared space.
-  static bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
+  // Return true if given address is in the shared metaspace regions (i.e., excluding any
+  // mapped shared heap regions.)
+  static bool is_in_shared_metaspace(const void* p) {
+    // If no shared metaspace regions are mapped, MetaspceObj::_shared_metaspace_{base,top} will
+    // both be NULL and all values of p will be rejected quickly.
+    return (p < MetaspaceObj::_shared_metaspace_top && p >= MetaspaceObj::_shared_metaspace_base);
+  }
 
   // Return true if given address is in the shared region corresponding to the idx
   static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
--- a/src/hotspot/share/memory/universe.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/universe.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -111,6 +111,7 @@
 oop Universe::_system_thread_group                    = NULL;
 objArrayOop Universe::_the_empty_class_klass_array    = NULL;
 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
+oop Universe::_the_null_sentinel                      = NULL;
 oop Universe::_the_null_string                        = NULL;
 oop Universe::_the_min_jint_string                   = NULL;
 LatestMethodCache* Universe::_finalizer_register_cache = NULL;
@@ -195,6 +196,7 @@
   assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
 
   f->do_oop((oop*)&_the_empty_class_klass_array);
+  f->do_oop((oop*)&_the_null_sentinel);
   f->do_oop((oop*)&_the_null_string);
   f->do_oop((oop*)&_the_min_jint_string);
   f->do_oop((oop*)&_out_of_memory_error_java_heap);
@@ -381,6 +383,11 @@
     initialize_basic_type_klass(longArrayKlassObj(), CHECK);
   } // end of core bootstrapping
 
+  {
+    Handle tns = java_lang_String::create_from_str("<null_sentinel>", CHECK);
+    _the_null_sentinel = tns();
+  }
+
   // Maybe this could be lifted up now that object array can be initialized
   // during the bootstrapping.
 
--- a/src/hotspot/share/memory/universe.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/memory/universe.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -141,6 +141,7 @@
   static oop          _system_thread_group;           // Reference to the system thread group object
 
   static objArrayOop  _the_empty_class_klass_array;   // Canonicalized obj array of type java.lang.Class
+  static oop          _the_null_sentinel;             // A unique object pointer unused except as a sentinel for null.
   static oop          _the_null_string;               // A cache of "null" as a Java string
   static oop          _the_min_jint_string;          // A cache of "-2147483648" as a Java string
   static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
@@ -322,6 +323,9 @@
 
   static Method*      do_stack_walk_method()          { return _do_stack_walk_cache->get_method(); }
 
+  static oop          the_null_sentinel()             { return _the_null_sentinel;             }
+  static address      the_null_sentinel_addr()        { return (address) &_the_null_sentinel;  }
+
   // Function to initialize these
   static void initialize_known_methods(TRAPS);
 
--- a/src/hotspot/share/oops/access.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/access.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -200,12 +200,14 @@
 const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 19;
 const DecoratorSet IN_ROOT            = UCONST64(1) << 20;
 const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 21;
+const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 22;
 const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
-                                        IN_ROOT | IN_CONCURRENT_ROOT;
+                                        IN_ROOT | IN_CONCURRENT_ROOT |
+                                        IN_ARCHIVE_ROOT;
 
 // == Value Decorators ==
 // * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
-const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 22;
+const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 23;
 const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
 
 // == Arraycopy Decorators ==
--- a/src/hotspot/share/oops/access.inline.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/access.inline.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -788,7 +788,9 @@
       ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
     static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
       ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet value = conc_root_is_root | BT_BUILDTIME_DECORATORS;
+    static const DecoratorSet archive_root_is_root = conc_root_is_root |
+      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
   };
 
   // Step 2: Reduce types.
@@ -1082,7 +1084,8 @@
     (location_decorators ^ IN_ROOT) == 0 ||
     (location_decorators ^ IN_HEAP) == 0 ||
     (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0
+    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
   ));
 }
 
--- a/src/hotspot/share/oops/accessBackend.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/accessBackend.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -172,18 +172,3 @@
     Copy::conjoint_jlongs_atomic(src, dst, length);
   }
 }
-
-template void AccessInternal::arraycopy_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length);
-template void AccessInternal::arraycopy_conjoint<jshort>(jshort* src, jshort* dst, size_t length);
-template void AccessInternal::arraycopy_conjoint<jint>(jint* src, jint* dst, size_t length);
-template void AccessInternal::arraycopy_conjoint<jlong>(jlong* src, jlong* dst, size_t length);
-
-template void AccessInternal::arraycopy_arrayof_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length);
-template void AccessInternal::arraycopy_arrayof_conjoint<jshort>(jshort* src, jshort* dst, size_t length);
-template void AccessInternal::arraycopy_arrayof_conjoint<jint>(jint* src, jint* dst, size_t length);
-template void AccessInternal::arraycopy_arrayof_conjoint<jlong>(jlong* src, jlong* dst, size_t length);
-
-template void AccessInternal::arraycopy_conjoint_atomic<jbyte>(jbyte* src, jbyte* dst, size_t length);
-template void AccessInternal::arraycopy_conjoint_atomic<jshort>(jshort* src, jshort* dst, size_t length);
-template void AccessInternal::arraycopy_conjoint_atomic<jint>(jint* src, jint* dst, size_t length);
-template void AccessInternal::arraycopy_conjoint_atomic<jlong>(jlong* src, jlong* dst, size_t length);
--- a/src/hotspot/share/oops/constantPool.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/constantPool.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,9 +49,6 @@
 #include "runtime/signature.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/copy.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 
 ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
   Array<u1>* tags = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
@@ -333,13 +330,8 @@
     if (MetaspaceShared::open_archive_heap_region_mapped() &&
         _cache->archived_references() != NULL) {
       oop archived = _cache->archived_references();
-      // Make sure GC knows the cached object is now live. This is necessary after
-      // initial GC marking and during concurrent marking as strong roots are only
-      // scanned during initial marking (at the start of the GC marking).
-      assert(UseG1GC, "Requires G1 GC");
-      G1SATBCardTableModRefBS::enqueue(archived);
       // Create handle for the archived resolved reference array object
-      Handle refs_handle(THREAD, (oop)archived);
+      Handle refs_handle(THREAD, archived);
       set_resolved_references(loader_data->add_handle(refs_handle));
     } else
 #endif
@@ -615,7 +607,6 @@
   return symbol_at(signature_index);
 }
 
-
 int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) {
   int i = which;
   if (!uncached && cache() != NULL) {
@@ -629,14 +620,18 @@
     // change byte-ordering and go via cache
     i = remap_instruction_operand_from_cache(which);
   } else {
-    if (tag_at(which).is_invoke_dynamic()) {
+    if (tag_at(which).is_invoke_dynamic() ||
+        tag_at(which).is_dynamic_constant() ||
+        tag_at(which).is_dynamic_constant_in_error()) {
       int pool_index = invoke_dynamic_name_and_type_ref_index_at(which);
       assert(tag_at(pool_index).is_name_and_type(), "");
       return pool_index;
     }
   }
   assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
-  assert(!tag_at(i).is_invoke_dynamic(), "Must be handled above");
+  assert(!tag_at(i).is_invoke_dynamic() &&
+         !tag_at(i).is_dynamic_constant() &&
+         !tag_at(i).is_dynamic_constant_in_error(), "Must be handled above");
   jint ref_index = *int_at_addr(i);
   return extract_high_short_from_int(ref_index);
 }
@@ -680,16 +675,12 @@
 
 
 void ConstantPool::verify_constant_pool_resolve(const constantPoolHandle& this_cp, Klass* k, TRAPS) {
- if (k->is_instance_klass() || k->is_objArray_klass()) {
-    InstanceKlass* holder = this_cp->pool_holder();
-    Klass* elem = k->is_instance_klass() ? k : ObjArrayKlass::cast(k)->bottom_klass();
-
-    // The element type could be a typeArray - we only need the access check if it is
-    // an reference to another class
-    if (elem->is_instance_klass()) {
-      LinkResolver::check_klass_accessability(holder, elem, CHECK);
-    }
+  if (!(k->is_instance_klass() || k->is_objArray_klass())) {
+    return;  // short cut, typeArray klass is always accessible
   }
+  Klass* holder = this_cp->pool_holder();
+  bool fold_type_to_class = true;
+  LinkResolver::check_klass_accessability(holder, k, fold_type_to_class, CHECK);
 }
 
 
@@ -777,8 +768,8 @@
   THROW_MSG(error, message->as_C_string());
 }
 
-// If resolution for Class, MethodHandle or MethodType fails, save the exception
-// in the resolution error table, so that the same exception is thrown again.
+// If resolution for Class, Dynamic constant, MethodHandle or MethodType fails, save the
+// exception in the resolution error table, so that the same exception is thrown again.
 void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, int which,
                                             constantTag tag, TRAPS) {
   Symbol* error = PENDING_EXCEPTION->klass()->name();
@@ -814,16 +805,31 @@
   }
 }
 
+BasicType ConstantPool::basic_type_for_constant_at(int which) {
+  constantTag tag = tag_at(which);
+  if (tag.is_dynamic_constant() ||
+      tag.is_dynamic_constant_in_error()) {
+    // have to look at the signature for this one
+    Symbol* constant_type = uncached_signature_ref_at(which);
+    return FieldType::basic_type(constant_type);
+  }
+  return tag.basic_type();
+}
+
 // Called to resolve constants in the constant pool and return an oop.
 // Some constant pool entries cache their resolved oop. This is also
 // called to create oops from constants to use in arguments for invokedynamic
-oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, TRAPS) {
+oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
+                                           int index, int cache_index,
+                                           bool* status_return, TRAPS) {
   oop result_oop = NULL;
   Handle throw_exception;
 
   if (cache_index == _possible_index_sentinel) {
     // It is possible that this constant is one which is cached in the objects.
     // We'll do a linear search.  This should be OK because this usage is rare.
+    // FIXME: If bootstrap specifiers stress this code, consider putting in
+    // a reverse index.  Binary search over a short array should do it.
     assert(index > 0, "valid index");
     cache_index = this_cp->cp_to_object_index(index);
   }
@@ -833,6 +839,12 @@
   if (cache_index >= 0) {
     result_oop = this_cp->resolved_references()->obj_at(cache_index);
     if (result_oop != NULL) {
+      if (result_oop == Universe::the_null_sentinel()) {
+        DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index)));
+        assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel");
+        result_oop = NULL;
+      }
+      if (status_return != NULL)  (*status_return) = true;
       return result_oop;
       // That was easy...
     }
@@ -843,6 +855,35 @@
 
   constantTag tag = this_cp->tag_at(index);
 
+  if (status_return != NULL) {
+    // don't trigger resolution if the constant might need it
+    switch (tag.value()) {
+    case JVM_CONSTANT_Class:
+    {
+      CPKlassSlot kslot = this_cp->klass_slot_at(index);
+      int resolved_klass_index = kslot.resolved_klass_index();
+      if (this_cp->resolved_klasses()->at(resolved_klass_index) == NULL) {
+        (*status_return) = false;
+        return NULL;
+      }
+      // the klass is waiting in the CP; go get it
+      break;
+    }
+    case JVM_CONSTANT_String:
+    case JVM_CONSTANT_Integer:
+    case JVM_CONSTANT_Float:
+    case JVM_CONSTANT_Long:
+    case JVM_CONSTANT_Double:
+      // these guys trigger OOM at worst
+      break;
+    default:
+      (*status_return) = false;
+      return NULL;
+    }
+    // from now on there is either success or an OOME
+    (*status_return) = true;
+  }
+
   switch (tag.value()) {
 
   case JVM_CONSTANT_UnresolvedClass:
@@ -856,6 +897,63 @@
       break;
     }
 
+  case JVM_CONSTANT_Dynamic:
+    {
+      Klass* current_klass  = this_cp->pool_holder();
+      Symbol* constant_name = this_cp->uncached_name_ref_at(index);
+      Symbol* constant_type = this_cp->uncached_signature_ref_at(index);
+
+      // The initial step in resolving an unresolved symbolic reference to a
+      // dynamically-computed constant is to resolve the symbolic reference to a
+      // method handle which will be the bootstrap method for the dynamically-computed
+      // constant. If resolution of the java.lang.invoke.MethodHandle for the bootstrap
+      // method fails, then a MethodHandleInError is stored at the corresponding
+      // bootstrap method's CP index for the CONSTANT_MethodHandle_info. No need to
+      // set a DynamicConstantInError here since any subsequent use of this
+      // bootstrap method will encounter the resolution of MethodHandleInError.
+      oop bsm_info = this_cp->resolve_bootstrap_specifier_at(index, THREAD);
+      Exceptions::wrap_dynamic_exception(CHECK_NULL);
+      assert(bsm_info != NULL, "");
+      // FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_Dynamic.
+      Handle bootstrap_specifier = Handle(THREAD, bsm_info);
+
+      // Resolve the Dynamically-Computed constant to invoke the BSM in order to obtain the resulting oop.
+      Handle value = SystemDictionary::link_dynamic_constant(current_klass,
+                                                             index,
+                                                             bootstrap_specifier,
+                                                             constant_name,
+                                                             constant_type,
+                                                             THREAD);
+      result_oop = value();
+      Exceptions::wrap_dynamic_exception(THREAD);
+      if (HAS_PENDING_EXCEPTION) {
+        // Resolution failure of the dynamically-computed constant, save_and_throw_exception
+        // will check for a LinkageError and store a DynamicConstantInError.
+        save_and_throw_exception(this_cp, index, tag, CHECK_NULL);
+      }
+      BasicType type = FieldType::basic_type(constant_type);
+      if (!is_reference_type(type)) {
+        // Make sure the primitive value is properly boxed.
+        // This is a JDK responsibility.
+        const char* fail = NULL;
+        if (result_oop == NULL) {
+          fail = "null result instead of box";
+        } else if (!is_java_primitive(type)) {
+          // FIXME: support value types via unboxing
+          fail = "can only handle references and primitives";
+        } else if (!java_lang_boxing_object::is_instance(result_oop, type)) {
+          fail = "primitive is not properly boxed";
+        }
+        if (fail != NULL) {
+          // Since this exception is not a LinkageError, throw exception
+          // but do not save a DynamicInError resolution result.
+          // See section 5.4.3 of the VM spec.
+          THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), fail);
+        }
+      }
+      break;
+    }
+
   case JVM_CONSTANT_String:
     assert(cache_index != _no_index_sentinel, "should have been set");
     if (this_cp->is_pseudo_string_at(index)) {
@@ -865,6 +963,7 @@
     result_oop = string_at_impl(this_cp, index, cache_index, CHECK_NULL);
     break;
 
+  case JVM_CONSTANT_DynamicInError:
   case JVM_CONSTANT_MethodHandleInError:
   case JVM_CONSTANT_MethodTypeInError:
     {
@@ -965,15 +1064,20 @@
     // The important thing here is that all threads pick up the same result.
     // It doesn't matter which racing thread wins, as long as only one
     // result is used by all threads, and all future queries.
-    oop old_result = this_cp->resolved_references()->atomic_compare_exchange_oop(cache_index, result_oop, NULL);
+    oop new_result = (result_oop == NULL ? Universe::the_null_sentinel() : result_oop);
+    oop old_result = this_cp->resolved_references()
+      ->atomic_compare_exchange_oop(cache_index, new_result, NULL);
     if (old_result == NULL) {
       return result_oop;  // was installed
     } else {
       // Return the winning thread's result.  This can be different than
       // the result here for MethodHandles.
+      if (old_result == Universe::the_null_sentinel())
+        old_result = NULL;
       return old_result;
     }
   } else {
+    assert(result_oop != Universe::the_null_sentinel(), "");
     return result_oop;
   }
 }
@@ -987,13 +1091,14 @@
 
 
 oop ConstantPool::resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS) {
-  assert(this_cp->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool");
-
+  assert((this_cp->tag_at(index).is_invoke_dynamic() ||
+          this_cp->tag_at(index).is_dynamic_constant()), "Corrupted constant pool");
   Handle bsm;
   int argc;
   {
-    // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments
-    // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
+    // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&mtype], plus optional arguments
+    // JVM_CONSTANT_Dynamic is an ordered pair of [bootm, name&ftype], plus optional arguments
+    // In both cases, the bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
     // It is accompanied by the optional arguments.
     int bsm_index = this_cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
     oop bsm_oop = this_cp->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL);
@@ -1003,30 +1108,142 @@
 
     // Extract the optional static arguments.
     argc = this_cp->invoke_dynamic_argument_count_at(index);
-    if (argc == 0)  return bsm_oop;
+
+    // if there are no static arguments, return the bsm by itself:
+    if (argc == 0 && UseBootstrapCallInfo < 2)  return bsm_oop;
 
     bsm = Handle(THREAD, bsm_oop);
   }
 
+  // We are going to return an ordered pair of {bsm, info}, using a 2-array.
   objArrayHandle info;
   {
-    objArrayOop info_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1+argc, CHECK_NULL);
+    objArrayOop info_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), 2, CHECK_NULL);
     info = objArrayHandle(THREAD, info_oop);
   }
 
   info->obj_at_put(0, bsm());
-  for (int i = 0; i < argc; i++) {
-    int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
-    oop arg_oop = this_cp->resolve_possibly_cached_constant_at(arg_index, CHECK_NULL);
-    info->obj_at_put(1+i, arg_oop);
+
+  bool use_BSCI;
+  switch (UseBootstrapCallInfo) {
+  default: use_BSCI = true;  break;  // stress mode
+  case 0:  use_BSCI = false; break;  // stress mode
+  case 1:                            // normal mode
+    // If we were to support an alternative mode of BSM invocation,
+    // we'd convert to pull mode here if the BSM could be a candidate
+    // for that alternative mode.  We can't easily test for things
+    // like varargs here, but we can get away with approximate testing,
+    // since the JDK runtime will make up the difference either way.
+    // For now, exercise the pull-mode path if the BSM is of arity 2,
+    // or if there is a potential condy loop (see below).
+    oop mt_oop = java_lang_invoke_MethodHandle::type(bsm());
+    use_BSCI = (java_lang_invoke_MethodType::ptype_count(mt_oop) == 2);
+    break;
   }
 
+  // Here's a reason to use BSCI even if it wasn't requested:
+  // If a condy uses a condy argument, we want to avoid infinite
+  // recursion (condy loops) in the C code.  It's OK in Java,
+  // because Java has stack overflow checking, so we punt
+  // potentially cyclic cases from C to Java.
+  if (!use_BSCI && this_cp->tag_at(index).is_dynamic_constant()) {
+    bool found_unresolved_condy = false;
+    for (int i = 0; i < argc; i++) {
+      int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
+      if (this_cp->tag_at(arg_index).is_dynamic_constant()) {
+        // potential recursion point condy -> condy
+        bool found_it = false;
+        this_cp->find_cached_constant_at(arg_index, found_it, CHECK_NULL);
+        if (!found_it) { found_unresolved_condy = true; break; }
+      }
+    }
+    if (found_unresolved_condy)
+      use_BSCI = true;
+  }
+
+  const int SMALL_ARITY = 5;
+  if (use_BSCI && argc <= SMALL_ARITY && UseBootstrapCallInfo <= 2) {
+    // If there are only a few arguments, and none of them need linking,
+    // push them, instead of asking the JDK runtime to turn around and
+    // pull them, saving a JVM/JDK transition in some simple cases.
+    bool all_resolved = true;
+    for (int i = 0; i < argc; i++) {
+      bool found_it = false;
+      int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
+      this_cp->find_cached_constant_at(arg_index, found_it, CHECK_NULL);
+      if (!found_it) { all_resolved = false; break; }
+    }
+    if (all_resolved)
+      use_BSCI = false;
+  }
+
+  if (!use_BSCI) {
+    // return {bsm, {arg...}}; resolution of arguments is done immediately, before JDK code is called
+    objArrayOop args_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), argc, CHECK_NULL);
+    info->obj_at_put(1, args_oop);   // may overwrite with args[0] below
+    objArrayHandle args(THREAD, args_oop);
+    copy_bootstrap_arguments_at_impl(this_cp, index, 0, argc, args, 0, true, Handle(), CHECK_NULL);
+    if (argc == 1) {
+      // try to discard the singleton array
+      oop arg_oop = args->obj_at(0);
+      if (arg_oop != NULL && !arg_oop->is_array()) {
+        // JVM treats arrays and nulls specially in this position,
+        // but other things are just single arguments
+        info->obj_at_put(1, arg_oop);
+      }
+    }
+  } else {
+    // return {bsm, {arg_count, pool_index}}; JDK code must pull the arguments as needed
+    typeArrayOop ints_oop = oopFactory::new_typeArray(T_INT, 2, CHECK_NULL);
+    ints_oop->int_at_put(0, argc);
+    ints_oop->int_at_put(1, index);
+    info->obj_at_put(1, ints_oop);
+  }
   return info();
 }
 
+void ConstantPool::copy_bootstrap_arguments_at_impl(const constantPoolHandle& this_cp, int index,
+                                                    int start_arg, int end_arg,
+                                                    objArrayHandle info, int pos,
+                                                    bool must_resolve, Handle if_not_available,
+                                                    TRAPS) {
+  int argc;
+  int limit = pos + end_arg - start_arg;
+  // checks: index in range [0..this_cp->length),
+  // tag at index, start..end in range [0..argc],
+  // info array non-null, pos..limit in [0..info.length]
+  if ((0 >= index    || index >= this_cp->length())  ||
+      !(this_cp->tag_at(index).is_invoke_dynamic()    ||
+        this_cp->tag_at(index).is_dynamic_constant()) ||
+      (0 > start_arg || start_arg > end_arg) ||
+      (end_arg > (argc = this_cp->invoke_dynamic_argument_count_at(index))) ||
+      (0 > pos       || pos > limit)         ||
+      (info.is_null() || limit > info->length())) {
+    // An index or something else went wrong; throw an error.
+    // Since this is an internal API, we don't expect this,
+    // so we don't bother to craft a nice message.
+    THROW_MSG(vmSymbols::java_lang_LinkageError(), "bad BSM argument access");
+  }
+  // now we can loop safely
+  int info_i = pos;
+  for (int i = start_arg; i < end_arg; i++) {
+    int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
+    oop arg_oop;
+    if (must_resolve) {
+      arg_oop = this_cp->resolve_possibly_cached_constant_at(arg_index, CHECK);
+    } else {
+      bool found_it = false;
+      arg_oop = this_cp->find_cached_constant_at(arg_index, found_it, CHECK);
+      if (!found_it)  arg_oop = if_not_available();
+    }
+    info->obj_at_put(info_i++, arg_oop);
+  }
+}
+
 oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS) {
   // If the string has already been interned, this entry will be non-null
   oop str = this_cp->resolved_references()->obj_at(obj_index);
+  assert(str != Universe::the_null_sentinel(), "");
   if (str != NULL) return str;
   Symbol* sym = this_cp->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
@@ -1207,6 +1424,18 @@
     }
   } break;
 
+  case JVM_CONSTANT_Dynamic:
+  {
+    int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
+    int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
+    int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
+    int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
+    // separate statements and variables because CHECK_false is used
+    bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
+    bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
+    return (match_entry && match_operand);
+  } break;
+
   case JVM_CONSTANT_InvokeDynamic:
   {
     int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
@@ -1533,6 +1762,15 @@
     to_cp->method_handle_index_at_put(to_i, k1, k2);
   } break;
 
+  case JVM_CONSTANT_Dynamic:
+  case JVM_CONSTANT_DynamicInError:
+  {
+    int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
+    int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i);
+    k1 += operand_array_length(to_cp->operands());  // to_cp might already have operands
+    to_cp->dynamic_constant_at_put(to_i, k1, k2);
+  } break;
+
   case JVM_CONSTANT_InvokeDynamic:
   {
     int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
@@ -1794,6 +2032,8 @@
     case JVM_CONSTANT_NameAndType:
       return 5;
 
+    case JVM_CONSTANT_Dynamic:
+    case JVM_CONSTANT_DynamicInError:
     case JVM_CONSTANT_InvokeDynamic:
       // u1 tag, u2 bsm, u2 nt
       return 5;
@@ -1979,6 +2219,17 @@
         DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
         break;
       }
+      case JVM_CONSTANT_Dynamic:
+      case JVM_CONSTANT_DynamicInError: {
+        *bytes = tag;
+        idx1 = extract_low_short_from_int(*int_at_addr(idx));
+        idx2 = extract_high_short_from_int(*int_at_addr(idx));
+        assert(idx2 == invoke_dynamic_name_and_type_ref_index_at(idx), "correct half of u4");
+        Bytes::put_Java_u2((address) (bytes+1), idx1);
+        Bytes::put_Java_u2((address) (bytes+3), idx2);
+        DBG(printf("JVM_CONSTANT_Dynamic: %hd %hd", idx1, idx2));
+        break;
+      }
       case JVM_CONSTANT_InvokeDynamic: {
         *bytes = tag;
         idx1 = extract_low_short_from_int(*int_at_addr(idx));
@@ -2184,6 +2435,21 @@
     case JVM_CONSTANT_MethodTypeInError :
       st->print("signature_index=%d", method_type_index_at(index));
       break;
+    case JVM_CONSTANT_Dynamic :
+    case JVM_CONSTANT_DynamicInError :
+      {
+        st->print("bootstrap_method_index=%d", invoke_dynamic_bootstrap_method_ref_index_at(index));
+        st->print(" type_index=%d", invoke_dynamic_name_and_type_ref_index_at(index));
+        int argc = invoke_dynamic_argument_count_at(index);
+        if (argc > 0) {
+          for (int arg_i = 0; arg_i < argc; arg_i++) {
+            int arg = invoke_dynamic_argument_index_at(index, arg_i);
+            st->print((arg_i == 0 ? " arguments={%d" : ", %d"), arg);
+          }
+          st->print("}");
+        }
+      }
+      break;
     case JVM_CONSTANT_InvokeDynamic :
       {
         st->print("bootstrap_method_index=%d", invoke_dynamic_bootstrap_method_ref_index_at(index));
--- a/src/hotspot/share/oops/constantPool.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/constantPool.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -113,9 +113,10 @@
   Array<Klass*>*       _resolved_klasses;
 
   enum {
-    _has_preresolution = 1,           // Flags
-    _on_stack          = 2,
-    _is_shared         = 4
+    _has_preresolution    = 1,       // Flags
+    _on_stack             = 2,
+    _is_shared            = 4,
+    _has_dynamic_constant = 8
   };
 
   int                  _flags;  // old fashioned bit twiddling
@@ -207,6 +208,9 @@
   // Faster than MetaspaceObj::is_shared() - used by set_on_stack()
   bool is_shared() const                     { return (_flags & _is_shared) != 0; }
 
+  bool has_dynamic_constant() const       { return (_flags & _has_dynamic_constant) != 0; }
+  void set_has_dynamic_constant()         { _flags |= _has_dynamic_constant; }
+
   // Klass holding pool
   InstanceKlass* pool_holder() const      { return _pool_holder; }
   void set_pool_holder(InstanceKlass* k)  { _pool_holder = k; }
@@ -297,6 +301,11 @@
     *int_at_addr(which) = ref_index;
   }
 
+  void dynamic_constant_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
+    tag_at_put(which, JVM_CONSTANT_Dynamic);
+    *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
+  }
+
   void invoke_dynamic_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
     tag_at_put(which, JVM_CONSTANT_InvokeDynamic);
     *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
@@ -554,11 +563,15 @@
   }
 
   int invoke_dynamic_name_and_type_ref_index_at(int which) {
-    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    assert(tag_at(which).is_invoke_dynamic() ||
+           tag_at(which).is_dynamic_constant() ||
+           tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
     return extract_high_short_from_int(*int_at_addr(which));
   }
   int invoke_dynamic_bootstrap_specifier_index(int which) {
-    assert(tag_at(which).value() == JVM_CONSTANT_InvokeDynamic, "Corrupted constant pool");
+    assert(tag_at(which).is_invoke_dynamic() ||
+           tag_at(which).is_dynamic_constant() ||
+           tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
     return extract_low_short_from_int(*int_at_addr(which));
   }
   int invoke_dynamic_operand_base(int which) {
@@ -608,7 +621,7 @@
   }
 #endif //ASSERT
 
-  // layout of InvokeDynamic bootstrap method specifier (in second part of operands array):
+  // layout of InvokeDynamic and Dynamic bootstrap method specifier (in second part of operands array):
   enum {
          _indy_bsm_offset  = 0,  // CONSTANT_MethodHandle bsm
          _indy_argc_offset = 1,  // u2 argc
@@ -654,14 +667,17 @@
   // Shrink the operands array to a smaller array with new_len length
   void shrink_operands(int new_len, TRAPS);
 
-
   int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
-    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    assert(tag_at(which).is_invoke_dynamic() ||
+           tag_at(which).is_dynamic_constant() ||
+           tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
     int op_base = invoke_dynamic_operand_base(which);
     return operands()->at(op_base + _indy_bsm_offset);
   }
   int invoke_dynamic_argument_count_at(int which) {
-    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    assert(tag_at(which).is_invoke_dynamic() ||
+           tag_at(which).is_dynamic_constant() ||
+           tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
     int op_base = invoke_dynamic_operand_base(which);
     int argc = operands()->at(op_base + _indy_argc_offset);
     DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc;
@@ -731,20 +747,27 @@
   enum { _no_index_sentinel = -1, _possible_index_sentinel = -2 };
  public:
 
+  BasicType basic_type_for_constant_at(int which);
+
   // Resolve late bound constants.
   oop resolve_constant_at(int index, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return resolve_constant_at_impl(h_this, index, _no_index_sentinel, THREAD);
+    return resolve_constant_at_impl(h_this, index, _no_index_sentinel, NULL, THREAD);
   }
 
   oop resolve_cached_constant_at(int cache_index, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, THREAD);
+    return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, NULL, THREAD);
   }
 
   oop resolve_possibly_cached_constant_at(int pool_index, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, THREAD);
+    return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, NULL, THREAD);
+  }
+
+  oop find_cached_constant_at(int pool_index, bool& found_it, TRAPS) {
+    constantPoolHandle h_this(THREAD, this);
+    return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, &found_it, THREAD);
   }
 
   oop resolve_bootstrap_specifier_at(int index, TRAPS) {
@@ -752,6 +775,15 @@
     return resolve_bootstrap_specifier_at_impl(h_this, index, THREAD);
   }
 
+  void copy_bootstrap_arguments_at(int index,
+                                   int start_arg, int end_arg,
+                                   objArrayHandle info, int pos,
+                                   bool must_resolve, Handle if_not_available, TRAPS) {
+    constantPoolHandle h_this(THREAD, this);
+    copy_bootstrap_arguments_at_impl(h_this, index, start_arg, end_arg,
+                                     info, pos, must_resolve, if_not_available, THREAD);
+  }
+
   // Klass name matches name at offset
   bool klass_name_at_matches(const InstanceKlass* k, int which);
 
@@ -833,6 +865,7 @@
 
   Symbol* impl_name_ref_at(int which, bool uncached);
   Symbol* impl_signature_ref_at(int which, bool uncached);
+
   int       impl_klass_ref_index_at(int which, bool uncached);
   int       impl_name_and_type_ref_index_at(int which, bool uncached);
   constantTag impl_tag_ref_at(int which, bool uncached);
@@ -862,8 +895,13 @@
   // Resolve string constants (to prevent allocation during compilation)
   static void resolve_string_constants_impl(const constantPoolHandle& this_cp, TRAPS);
 
-  static oop resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, TRAPS);
+  static oop resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index,
+                                      bool* status_return, TRAPS);
   static oop resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS);
+  static void copy_bootstrap_arguments_at_impl(const constantPoolHandle& this_cp, int index,
+                                               int start_arg, int end_arg,
+                                               objArrayHandle info, int pos,
+                                               bool must_resolve, Handle if_not_available, TRAPS);
 
   // Exception handling
   static Symbol* exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception);
--- a/src/hotspot/share/oops/cpCache.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/cpCache.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -33,6 +33,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/cpCache.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -741,13 +742,16 @@
 
 #if INCLUDE_CDS_JAVA_HEAP
 oop ConstantPoolCache::archived_references() {
-  assert(UseSharedSpaces, "UseSharedSpaces expected.");
-  return oopDesc::decode_heap_oop(_archived_references);
+  // Loading an archive root forces the oop to become strongly reachable.
+  // For example, if it is loaded during concurrent marking in a SATB
+  // collector, it will be enqueued to the SATB queue, effectively
+  // shading the previously white object gray.
+  return RootAccess<IN_ARCHIVE_ROOT>::oop_load(&_archived_references);
 }
 
 void ConstantPoolCache::set_archived_references(oop o) {
   assert(DumpSharedSpaces, "called only during runtime");
-  _archived_references = oopDesc::encode_heap_oop(o);
+  RootAccess<IN_ARCHIVE_ROOT>::oop_store(&_archived_references, o);
 }
 #endif
 
--- a/src/hotspot/share/oops/generateOopMap.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/generateOopMap.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1878,13 +1878,15 @@
   ConstantPool* cp  = method()->constants();
   constantTag tag = cp->tag_at(ldc.pool_index()); // idx is index in resolved_references
   BasicType       bt  = ldc.result_type();
+#ifdef ASSERT
+  BasicType   tag_bt = tag.is_dynamic_constant() ? bt : tag.basic_type();
+  assert(bt == tag_bt, "same result");
+#endif
   CellTypeState   cts;
-  if (tag.basic_type() == T_OBJECT) {
+  if (is_reference_type(bt)) {  // could be T_ARRAY with condy
     assert(!tag.is_string_index() && !tag.is_klass_index(), "Unexpected index tag");
-    assert(bt == T_OBJECT, "Guard is incorrect");
     cts = CellTypeState::make_line_ref(bci);
   } else {
-    assert(bt != T_OBJECT, "Guard is incorrect");
     cts = valCTS;
   }
   ppush1(cts);
--- a/src/hotspot/share/oops/instanceKlass.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -2229,7 +2229,7 @@
   }
 
   // deallocate the cached class file
-  if (_cached_class_file != NULL && !MetaspaceShared::is_in_shared_space(_cached_class_file)) {
+  if (_cached_class_file != NULL && !MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
     os::free(_cached_class_file);
     _cached_class_file = NULL;
   }
@@ -3732,7 +3732,7 @@
 
 #if INCLUDE_JVMTI
 JvmtiCachedClassFileData* InstanceKlass::get_cached_class_file() {
-  if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
+  if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
     // Ignore the archived class stream data
     return NULL;
   } else {
@@ -3754,7 +3754,7 @@
     return _cached_class_file;
   } else {
     assert(this->is_shared(), "class should be shared");
-    if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
+    if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
       return _cached_class_file;
     } else {
       return NULL;
--- a/src/hotspot/share/oops/klassVtable.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/klassVtable.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1040,7 +1040,7 @@
   if (m == NULL) return;
 
 #ifdef ASSERT
-  if (MetaspaceShared::is_in_shared_space((void*)&_method) &&
+  if (MetaspaceShared::is_in_shared_metaspace((void*)&_method) &&
      !MetaspaceShared::remapped_readwrite()) {
     // At runtime initialize_itable is rerun as part of link_class_impl()
     // for a shared class loaded by the non-boot loader.
--- a/src/hotspot/share/oops/method.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/method.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2180,7 +2180,7 @@
   } else if ((intptr_t(this) & (wordSize-1)) != 0) {
     // Quick sanity check on pointer.
     return false;
-  } else if (MetaspaceShared::is_in_shared_space(this)) {
+  } else if (is_shared()) {
     return MetaspaceShared::is_valid_shared_method(this);
   } else if (Metaspace::contains_non_shared(this)) {
     return has_method_vptr((const void*)this);
--- a/src/hotspot/share/oops/oop.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/oop.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -143,6 +143,21 @@
   inline static bool is_null(oop obj)       { return obj == NULL; }
   inline static bool is_null(narrowOop obj) { return obj == 0; }
 
+  // Standard compare function returns negative value if o1 < o2
+  //                                   0              if o1 == o2
+  //                                   positive value if o1 > o2
+  inline static int  compare(oop o1, oop o2) {
+    void* o1_addr = (void*)o1;
+    void* o2_addr = (void*)o2;
+    if (o1_addr < o2_addr) {
+      return -1;
+    } else if (o1_addr > o2_addr) {
+      return 1;
+    } else {
+      return 0;
+    }
+  }
+
   // Decode an oop pointer from a narrowOop if compressed.
   // These are overloaded for oop and narrowOop as are the other functions
   // below so that they can be called in template functions.
--- a/src/hotspot/share/oops/oopsHierarchy.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -106,12 +106,6 @@
   bool operator!=(const volatile oop o) const  { return obj() != o.obj(); }
   bool operator!=(void *p) const      { return obj() != p; }
 
-  bool operator<(oop o) const         { return obj() < o.obj(); }
-  bool operator>(oop o) const         { return obj() > o.obj(); }
-  bool operator<=(oop o) const        { return obj() <= o.obj(); }
-  bool operator>=(oop o) const        { return obj() >= o.obj(); }
-  bool operator!() const              { return !obj(); }
-
   // Assignment
   oop& operator=(const oop& o)                            { _o = o.obj(); return *this; }
   volatile oop& operator=(const oop& o) volatile          { _o = o.obj(); return *this; }
--- a/src/hotspot/share/opto/parse2.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/opto/parse2.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1483,8 +1483,9 @@
     // If the constant is unresolved, run this BC once in the interpreter.
     {
       ciConstant constant = iter().get_constant();
-      if (constant.basic_type() == T_OBJECT &&
-          !constant.as_object()->is_loaded()) {
+      if (!constant.is_valid() ||
+          (constant.basic_type() == T_OBJECT &&
+           !constant.as_object()->is_loaded())) {
         int index = iter().get_constant_pool_index();
         constantTag tag = iter().get_constant_pool_tag(index);
         uncommon_trap(Deoptimization::make_trap_request
--- a/src/hotspot/share/prims/forte.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/forte.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,7 +133,7 @@
     // By the time we get here we should never see unsafe but better
     // safe then segv'd
 
-    if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
+    if ((loop_max != 0 && loop_count > loop_max) || !_frame.safe_for_sender(_thread)) {
       _mode = at_end_mode;
       return;
     }
@@ -324,7 +324,7 @@
     int loop_max = MaxJavaStackTraceDepth * 2;
     RegisterMap map(thread, false);
 
-    for (loop_count = 0; loop_count < loop_max; loop_count++) {
+    for (loop_count = 0; loop_max == 0 || loop_count < loop_max; loop_count++) {
       if (!candidate.safe_for_sender(thread)) return false;
       candidate = candidate.sender(&map);
       if (candidate.cb() != NULL) break;
@@ -338,7 +338,7 @@
   int loop_max = MaxJavaStackTraceDepth * 2;
   RegisterMap map(thread, false);
 
-  for (loop_count = 0; loop_count < loop_max; loop_count++) {
+  for (loop_count = 0; loop_max == 0 || loop_count < loop_max; loop_count++) {
 
     if (candidate.is_entry_frame()) {
       // jcw is NULL if the java call wrapper couldn't be found
--- a/src/hotspot/share/prims/jni.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jni.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -862,16 +862,10 @@
 
   HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(env, obj);
 
-  jobjectRefType ret;
-  if (JNIHandles::is_local_handle(thread, obj) ||
-      JNIHandles::is_frame_handle(thread, obj))
-    ret = JNILocalRefType;
-  else if (JNIHandles::is_global_handle(obj))
-    ret = JNIGlobalRefType;
-  else if (JNIHandles::is_weak_global_handle(obj))
-    ret = JNIWeakGlobalRefType;
-  else
-    ret = JNIInvalidRefType;
+  jobjectRefType ret = JNIInvalidRefType;
+  if (obj != NULL) {
+    ret = JNIHandles::handle_type(thread, obj);
+  }
 
   HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN((void *) ret);
   return ret;
--- a/src/hotspot/share/prims/jniCheck.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jniCheck.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -288,7 +288,7 @@
   /* validate the object being passed and then get its class */
   ASSERT_OOPS_ALLOWED;
   oop oopObj = jniCheck::validate_object(thr, obj);
-  if (!oopObj) {
+  if (oopObj == NULL) {
     ReportJNIFatalError(thr, fatal_null_object);
   }
   Klass* k_oop = oopObj->klass();
@@ -318,7 +318,7 @@
 {
   ASSERT_OOPS_ALLOWED;
   oop s = jniCheck::validate_object(thr, js);
-  if (!s || !java_lang_String::is_instance(s))
+  if ((s == NULL) || !java_lang_String::is_instance(s))
     ReportJNIFatalError(thr, fatal_non_string);
 }
 
@@ -435,10 +435,7 @@
 }
 
 oop jniCheck::validate_handle(JavaThread* thr, jobject obj) {
-  if (JNIHandles::is_frame_handle(thr, obj) ||
-      JNIHandles::is_local_handle(thr, obj) ||
-      JNIHandles::is_global_handle(obj) ||
-      JNIHandles::is_weak_global_handle(obj)) {
+  if ((obj != NULL) && (JNIHandles::handle_type(thr, obj) != JNIInvalidRefType)) {
     ASSERT_OOPS_ALLOWED;
     return JNIHandles::resolve_external_guard(obj);
   }
@@ -464,14 +461,13 @@
 
 
 oop jniCheck::validate_object(JavaThread* thr, jobject obj) {
-    if (!obj)
-        return NULL;
-    ASSERT_OOPS_ALLOWED;
-    oop oopObj = jniCheck::validate_handle(thr, obj);
-    if (!oopObj) {
-      ReportJNIFatalError(thr, fatal_bad_ref_to_jni);
-    }
-    return oopObj;
+  if (obj == NULL) return NULL;
+  ASSERT_OOPS_ALLOWED;
+  oop oopObj = jniCheck::validate_handle(thr, obj);
+  if (oopObj == NULL) {
+    ReportJNIFatalError(thr, fatal_bad_ref_to_jni);
+  }
+  return oopObj;
 }
 
 // Warn if a class descriptor is in decorated form; class descriptors
@@ -495,7 +491,7 @@
 Klass* jniCheck::validate_class(JavaThread* thr, jclass clazz, bool allow_primitive) {
   ASSERT_OOPS_ALLOWED;
   oop mirror = jniCheck::validate_handle(thr, clazz);
-  if (!mirror) {
+  if (mirror == NULL) {
     ReportJNIFatalError(thr, fatal_received_null_class);
   }
 
--- a/src/hotspot/share/prims/jvm.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvm.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -2212,6 +2212,8 @@
       result = JVM_CONSTANT_MethodType;
   } else if (tag.is_method_handle_in_error()) {
       result = JVM_CONSTANT_MethodHandle;
+  } else if (tag.is_dynamic_constant_in_error()) {
+      result = JVM_CONSTANT_Dynamic;
   }
   return result;
 }
--- a/src/hotspot/share/prims/jvmti.xml	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvmti.xml	Mon Feb 05 23:12:03 2018 +0100
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="ISO-8859-1"?>
 <?xml-stylesheet type="text/xsl" href="jvmti.xsl"?>
 <!--
- Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -333,6 +333,8 @@
 
    <!ELEMENT p EMPTY>
 
+   <!ELEMENT blockquote ANY>
+
    <!ELEMENT dl  (dt|dd)+>
 
    <!ELEMENT dd  ANY>
@@ -344,10 +346,10 @@
    <!ELEMENT tr  (td|th)*>
 
    <!ELEMENT td  ANY>
-   <!ATTLIST td align (left|right|center) "center">
+   <!ATTLIST td class CDATA #IMPLIED>
 
    <!ELEMENT th  ANY>
-   <!ATTLIST th align (left|right|center) "center">
+   <!ATTLIST th class CDATA #IMPLIED>
 
    <!ELEMENT ul  (li)+>
    <!ATTLIST ul type (disc|circle|square) "disc">
@@ -3527,15 +3529,15 @@
             returned in the order: q, r.
             An instance of class <code>C1</code> will have the
             following field indices:
-            <dl><dd><table>
+            <blockquote><table>
               <tr>
-                <td>
+                <td class="centered">
                   a
                 </td>
-                <td>
+                <td class="centered">
                   2
                 </td>
-                <td align="left">
+                <td>
                   The count of the fields in the interfaces
                   implemented by <code>C1</code> is two (<i>n</i>=2):
                   <code>p</code> of <code>I0</code>
@@ -3543,30 +3545,30 @@
                 </td>
               </tr>
               <tr>
-                <td>
+                <td class="centered">
                   b
                 </td>
-                <td>
+                <td class="centered">
                   3
                 </td>
-                <td align="left">
+                <td>
                   the subsequent index.
                 </td>
               </tr>
-            </table></dd></dl>
+            </table></blockquote>
             The class <code>C1</code> will have the same field indices.
             <p/>
             An instance of class <code>C2</code> will have the
             following field indices:
-            <dl><dd><table>
+            <blockquote><table>
               <tr>
-                <td>
+                <td class="centered">
                   a
                 </td>
-                <td>
+                <td class="centered">
                   3
                 </td>
-                <td align="left">
+                <td>
                   The count of the fields in the interfaces
                   implemented by <code>C2</code> is three (<i>n</i>=3):
                   <code>p</code> of <code>I0</code>,
@@ -3576,39 +3578,39 @@
                 </td>
               </tr>
               <tr>
-                <td>
+                <td class="centered">
                   b
                 </td>
-                <td>
+                <td class="centered">
                   4
                 </td>
-                <td align="left">
+                <td>
                   the subsequent index to "a".
                 </td>
               </tr>
               <tr>
-                <td>
+                <td class="centered">
                   q
                 </td>
-                <td>
+                <td class="centered">
                   5
                 </td>
-                <td align="left">
+                <td>
                   the subsequent index to "b".
                 </td>
               </tr>
               <tr>
-                <td>
+                <td class="centered">
                   r
                 </td>
-                <td>
+                <td class="centered">
                   6
                 </td>
-                <td align="left">
+                <td>
                   the subsequent index to "q".
                 </td>
               </tr>
-            </table></dd></dl>
+            </table></blockquote>
             The class <code>C2</code> will have the same field indices.
             Note that a field may have a different index depending on the
             object that is viewing it -- for example field "a" above.
@@ -3617,21 +3619,21 @@
             <p/>
             The interface <code>I1</code> will have the
             following field indices:
-            <dl><dd><table>
+            <blockquote><table>
               <tr>
-                <td>
+                <td class="centered">
                   x
                 </td>
-                <td>
+                <td class="centered">
                   1
                 </td>
-                <td align="left">
+                <td>
                   The count of the fields in the superinterfaces
                   of <code>I1</code> is one (<i>n</i>=1):
                   <code>p</code> of <code>I0</code>.
                 </td>
               </tr>
-            </table></dd></dl>
+            </table></blockquote>
 	  </description>
 	</field>
       </typedef>
@@ -4486,61 +4488,61 @@
             </th>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               the
               <datalink id="jvmtiHeapVisitControl">Heap Visit Control Flags</datalink>
               returned by <functionlink id="jvmtiHeapReferenceCallback"/>
             </th>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>, since visits are controlled
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>, since visits are controlled
             </td>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               <fieldlink id="array_primitive_value_callback" struct="jvmtiHeapCallbacks"/>
               in <paramlink id="callbacks"/> set
             </th>
-            <td>
+            <td class="centered">
               No
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               No
             </td>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               <paramlink id="heap_filter"/>
             </th>
-            <td>
+            <td class="centered">
               No
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               <paramlink id="klass"/>
             </th>
-            <td>
+            <td class="centered">
               No
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
           </tr>
@@ -4678,61 +4680,61 @@
             </th>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               the
               <datalink id="jvmtiHeapVisitControl">Heap Visit Control Flags</datalink>
               returned by <functionlink id="jvmtiHeapIterationCallback"/>
             </th>
-            <td>
+            <td class="centered">
               No<br/>(unless they abort the iteration)
             </td>
-            <td>
+            <td class="centered">
               No<br/>(unless they abort the iteration)
             </td>
-            <td>
+            <td class="centered">
               No<br/>(unless they abort the iteration)
             </td>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               <fieldlink id="array_primitive_value_callback" struct="jvmtiHeapCallbacks"/>
               in <paramlink id="callbacks"/> set
             </th>
-            <td>
+            <td class="centered">
               No
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               No
             </td>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               <paramlink id="heap_filter"/>
             </th>
-            <td>
+            <td class="centered">
               No
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
           </tr>
           <tr>
-            <th align="left">
+            <th class="leftAligned">
               <paramlink id="klass"/>
             </th>
-            <td>
+            <td class="centered">
               No
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
-            <td>
+            <td class="centered">
               <b>Yes</b>
             </td>
           </tr>
--- a/src/hotspot/share/prims/jvmti.xsl	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvmti.xsl	Mon Feb 05 23:12:03 2018 +0100
@@ -1,6 +1,6 @@
 <?xml version="1.0"?> 
 <!--
- Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -28,31 +28,40 @@
 <xsl:import href="jvmtiLib.xsl"/>
 
 <xsl:output method="html" indent="yes" 
-  doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN" 
-  doctype-system="http://www.w3.org/TR/html4/loose.dtd"/>
+  doctype-system="about:legacy-compat"/>
 
 <xsl:param name="development"></xsl:param>
 
 <xsl:template match="specification">
-  <html>
+  <html lang="en">
   <head>
         <title>
           <xsl:value-of select="@label"/>
           <xsl:text> </xsl:text>
           <xsl:call-template name="showversion"/>
         </title>
-        <style type="text/css">
-          td.tableHeader {font-size: larger}
+        <style>
+          .centered { text-align: center; }
+          .leftAligned { text-align: left; }
+          .rightAligned { text-align: right; }
+          .bgLight { background-color: #EEEEFF; }
+          .bgDark { background-color: #CCCCFF}
+          th { background-color: #EEEEFF; }
+          td.tableHeader {font-size: larger; text-align:center; }
+          div.sep { height: 10px; }
+          div.callbackCtnr { margin: 0 5%; }
+          hr { border-width:0; color:gray; background-color:gray; }
+          hr.thick { height:3px; }
+          hr.thin { height:1px; }
+          table.bordered { border: 1px solid gray; border-spacing: 0; border-collapse: separate; }
+          table.bordered td, table.bordered th { padding: 3px; border: 1px solid black; }
+          table.wide { width: 100%; }
         </style>
   </head>
   <body>
-    <table border="0" width="100%">
-      <tr>
-        <td align="center">
-          <xsl:apply-templates select="title"/>
-        </td>
-      </tr>
-    </table>
+    <div class="centered">
+      <xsl:apply-templates select="title"/>
+    </div>
     <ul>
       <li>
         <a href="#SpecificationIntro"><b>Introduction</b></a>
@@ -158,9 +167,9 @@
       </li>
     </ul>
     <!-- end table of contents, begin body -->
-    <p/>
-    <hr noshade="noshade" size="3"/>
-    <p/>
+    <div class="sep"/>
+    <hr class="thick"/>
+    <div class="sep"/>
     <p id="SpecificationIntro"/>
       <xsl:apply-templates select="intro"/>
     <p id="FunctionSection"/>
@@ -172,8 +181,8 @@
     <p id="EventSection"/>
       <xsl:apply-templates select="eventsection"/>
     <p id="ConstantIndex"/>
-      <p/>
-      <hr noshade="noshade" size="3"/>
+      <div class="sep"/>
+      <hr class="thick"/>
       <h2>
         Constant Index
       </h2>
@@ -184,8 +193,8 @@
       </blockquote>
     <xsl:if test="$development = 'Show'">
       <p id="SpecificationIssues"/>
-      <p/>
-      <hr noshade="noshade" size="3"/>
+      <div class="sep"/>
+      <hr class="thick"/>
       <h2>
         <xsl:value-of select="issuessection/@label"/>
       </h2>
@@ -209,8 +218,8 @@
 </xsl:template>
 
 <xsl:template match="functionsection">
-  <p/>
-  <hr noshade="noshade" size="3"/>
+  <div class="sep"/>
+  <hr class="thick"/>
   <h2>
     <xsl:value-of select="@label"/>
   </h2>
@@ -255,8 +264,8 @@
       <xsl:value-of select="@id"/>
     </xsl:attribute>
   </p>
-  <hr noshade="noshade" size="3"/>
-  <h2 align="center"><xsl:value-of select="@label"/></h2>
+  <hr class="thick"/>
+  <h2 class="centered"><xsl:value-of select="@label"/></h2>
   <xsl:value-of select="@label"/> functions:
   <ul>
     <xsl:apply-templates select="function[count(@hide)=0]" mode="index"/>
@@ -307,12 +316,12 @@
     </ul>    
   </xsl:if>
   <xsl:apply-templates select="intro|typedef|uniontypedef|capabilitiestypedef"/>
-  <p/>
+  <div class="sep"/>
   <xsl:apply-templates select="function[count(@hide)=0]|callback" mode="body"/>
 </xsl:template>
 
 <xsl:template match="function" mode="body">
-  <hr noshade="noshade" width="100%" size="1">
+  <hr class="thin">
     <xsl:attribute name="id">
       <xsl:value-of select="@id"/>
     </xsl:attribute>
@@ -336,8 +345,8 @@
 </xsl:template>
 
 <xsl:template match="function" mode="generalinfo">
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#EEEEFF">
+  <table class="bordered wide">
+     <tr class="bgLight">
       <td >
         <a href="#jvmtiPhase">Phase</a>
       </td>
@@ -369,8 +378,8 @@
 </xsl:template>
 
 <xsl:template match="event" mode="generalinfo">
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#EEEEFF">
+  <table class="bordered wide">
+    <tr class="bgLight">
       <td >
         <a href="#jvmtiPhase">Phase</a>
       </td>
@@ -500,13 +509,13 @@
 
 
 <xsl:template match="callback" mode="body">
-  <hr noshade="noshade" width="100%" size="1">
+  <hr class="thin">
     <xsl:attribute name="id">
       <xsl:value-of select="@id"/>
     </xsl:attribute>
   </hr>
   <xsl:apply-templates select="synopsis" mode="body"/>
-  <table cellpadding="0" cellspacing="0" border="0" width="90%" align="center"><tr><td>
+  <div class="callbackCtnr">
   <blockquote>
     <pre>
       <xsl:text>typedef </xsl:text>
@@ -528,7 +537,7 @@
   </blockquote>
   <xsl:apply-templates select="description"/>
   <xsl:apply-templates select="parameters" mode="body"/>
-</td></tr></table>
+  </div>
 </xsl:template>
 
 <xsl:template match="synopsis" mode="body">
@@ -536,8 +545,8 @@
 </xsl:template>
 
 <xsl:template match="eventsection">
-  <p/>
-  <hr noshade="noshade" size="3"/>
+  <div class="sep"/>
+  <hr class="thick"/>
   <h2>
     <xsl:value-of select="@label"/>
   </h2>
@@ -557,8 +566,8 @@
 </xsl:text>
   </pre>
   </blockquote>
-  <p/>
-  <hr noshade="noshade" width="100%" size="1"/>
+  <div class="sep"/>
+  <hr class="thin"/>
   <h3 id="EventIndex">Event Index</h3>
   <ul>
     <xsl:apply-templates select="event" mode="index">
@@ -585,9 +594,9 @@
       <xsl:value-of select="@id"/>
     </xsl:attribute>
   </p>
-  <hr noshade="noshade" size="3"/>
+  <hr class="thick"/>
   <h2><xsl:value-of select="@label"/></h2>
-  <p/>
+  <div class="sep"/>
   <blockquote>
     <xsl:apply-templates select="typedef" mode="code"/>
     <pre>
@@ -660,13 +669,13 @@
 </xsl:template>
 
 <xsl:template match="typedef|uniontypedef" mode="justbody">
-    <table border="1" cellpadding="3" cellspacing="0" width="100%">
-      <tr bgcolor="#CCCCFF">
-        <td colspan="3" align="center" class="tableHeader">
+    <table class="bordered wide">
+      <tr class="bgDark">
+        <td colspan="3" class="tableHeader">
           <code><xsl:value-of select="@id"/></code> - <xsl:value-of select="@label"/>
         </td>
       </tr>
-      <tr bgcolor="#EEEEFF">
+      <tr class="bgLight">
         <td>
           Field
         </td>
@@ -691,18 +700,18 @@
 </xsl:template>
 
 <xsl:template match="capabilitiestypedef" mode="justbody">
-    <table border="1" cellpadding="3" cellspacing="0" width="100%">
-      <tr bgcolor="#CCCCFF">
-        <td colspan="3" align="center" class="tableHeader">
+    <table class="bordered wide">
+      <tr class="bgDark">
+        <td colspan="3" class="tableHeader">
           <code><xsl:value-of select="@id"/></code> - <xsl:value-of select="@label"/>
         </td>
       </tr>
-      <tr bgcolor="#EEEEFF">
+      <tr class="bgLight">
         <td colspan="3">
           All types are <code>unsigned int : 1</code>
         </td>
       </tr>
-      <tr bgcolor="#EEEEFF">
+      <tr class="bgLight">
         <td>
           Field
         </td>
@@ -772,11 +781,9 @@
       </code>
     </td>
     <td>
-      <a>
-        <xsl:attribute name="name">
-          <xsl:value-of select="../@id"/>.<xsl:value-of select="@id"/>
-        </xsl:attribute>
-      </a>
+      <xsl:attribute name="id">
+        <xsl:value-of select="../@id"/>.<xsl:value-of select="@id"/>
+      </xsl:attribute>
       <xsl:apply-templates select="description" mode="brief"/>
     </td>
     <td>
@@ -806,14 +813,12 @@
 
 <xsl:template match="constants">
   <blockquote>
-  <a>
-    <xsl:attribute name="name">
-      <xsl:value-of select="@id"/>
-    </xsl:attribute>
-  </a>
-    <table border="1" cellpadding="3" cellspacing="0">
-      <tr bgcolor="#CCCCFF">
-        <td colspan="3" align="center" class="tableHeader">
+    <table class="bordered">
+      <xsl:attribute name="id">
+        <xsl:value-of select="@id"/>
+      </xsl:attribute>
+      <tr class="bgDark">
+        <td colspan="3" class="tableHeader">
             <xsl:value-of select="@label"/>
             <xsl:if test="@kind='enum'">
               <xsl:text> (</xsl:text>
@@ -824,7 +829,7 @@
             </xsl:if>
         </td>
       </tr>
-      <tr bgcolor="#EEEEFF">
+      <tr class="bgLight">
         <td>
           Constant
         </td>
@@ -861,7 +866,7 @@
         <xsl:value-of select="@id"/>
       </code>
     </td>
-    <td align="right">
+    <td class="rightAligned">
       <xsl:value-of select="@num"/>
     </td>
     <td>
@@ -876,13 +881,13 @@
       <xsl:value-of select="@id"/>
     </xsl:attribute>
   </p>
-    <table border="1" cellpadding="3" cellspacing="0" width="100%">
-      <tr bgcolor="#CCCCFF">
-        <td colspan="2" align="center" class="tableHeader">
+    <table class="bordered wide">
+      <tr class="bgDark">
+        <td colspan="2" class="tableHeader">
           <xsl:value-of select="@label"/>
         </td>
       </tr>
-      <tr bgcolor="#EEEEFF">
+      <tr class="bgLight">
         <td>
           Type
         </td>
@@ -949,7 +954,7 @@
 
 <xsl:template match="description">
   <xsl:apply-templates/>
-  <p/>
+  <div class="sep"/>
 </xsl:template>
 
 <xsl:template match="description" mode="brief">
@@ -1064,14 +1069,14 @@
 </xsl:template>
 
 <xsl:template match="parameters" mode="body">
-  <p/>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="3" align="center" class="tableHeader">
+  <div class="sep"/>
+  <table class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="3" class="tableHeader">
         Parameters
       </td>
     </tr>
-    <tr bgcolor="#EEEEFF">
+    <tr class="bgLight">
       <td>
         Name
       </td>
@@ -1111,17 +1116,28 @@
 </xsl:template>
 
 <xsl:template match="capabilities">
-  <p/>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center" class="tableHeader">
+  <div class="sep"/>
+  <!--
+  W3C Validator reports error if all cells has colspan==2.
+  The workaround is to detect the case and set colspan = 1 for all cells
+  which fills the entire row.
+  -->
+  <xsl:variable name="fullRowColspan">
+    <xsl:choose>
+      <xsl:when test="count(required)!=0 or count(capability)!=0">2</xsl:when>
+      <xsl:otherwise>1</xsl:otherwise>
+    </xsl:choose>
+  </xsl:variable>
+  <table class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="{$fullRowColspan}" class="tableHeader">
         Capabilities
       </td>
     </tr>
     <xsl:choose>
       <xsl:when test="count(required)=0">
         <tr>
-          <td colspan="2">
+          <td colspan="{$fullRowColspan}">
             <b>Required Functionality</b>
           </td>
         </tr>
@@ -1152,7 +1168,7 @@
             </xsl:choose>
           </td>
         </tr>
-        <tr bgcolor="#EEEEFF">
+        <tr class="bgLight">
           <td >
             Capability
           </td>
@@ -1164,13 +1180,13 @@
       </xsl:otherwise>
     </xsl:choose>
     <xsl:if test="count(capability)!=0">
-      <tr bgcolor="#CCCCFF">
-        <td colspan="2" align="center">
+      <tr class="bgDark">
+        <td colspan="{$fullRowColspan}" class="centered">
           Optional Features
         </td>
       </tr>
       <xsl:if test="count(required)=0">
-        <tr bgcolor="#EEEEFF">
+        <tr class="bgLight">
           <td >
             Capability
           </td>
@@ -1185,10 +1201,10 @@
 </xsl:template>
 
 <xsl:template match="eventcapabilities">
-  <p/>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center" class="tableHeader">
+  <div class="sep"/>
+  <table class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="2" class="tableHeader">
         Capabilities
       </td>
     </tr>
@@ -1197,12 +1213,12 @@
         <b>Required Functionality</b>
       </td>
     </tr>
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center">
+    <tr class="bgDark">
+      <td colspan="2" class="centered">
         Event Enabling Capabilities
       </td>
     </tr>
-    <tr bgcolor="#EEEEFF">
+    <tr class="bgLight">
       <td >
         Capability
       </td>
@@ -1275,10 +1291,21 @@
     <xsl:apply-templates select="errors/error" mode="haserrors"/>
     <xsl:apply-templates select="parameters/param" mode="haserrors"/>
   </xsl:variable>
-  <p/>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center" class="tableHeader">
+  <div class="sep"/>
+  <!--
+  W3C Validator reports error if all cells has colspan==2.
+  The workaround is to detect the case and set colspan = 1 for all cells
+  which fills the entire row.
+  -->
+  <xsl:variable name="fullRowColspan">
+    <xsl:choose>
+      <xsl:when test="contains($haserrors,'yes')">2</xsl:when>
+      <xsl:otherwise>1</xsl:otherwise>
+    </xsl:choose>
+  </xsl:variable>
+  <table class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="{$fullRowColspan}" class="tableHeader">
         Errors
       </td>
     </tr>
@@ -1291,7 +1318,7 @@
             or one of the following errors
           </td>
         </tr>
-        <tr bgcolor="#EEEEFF">
+        <tr class="bgLight">
           <td>
             Error
           </td>
@@ -1305,7 +1332,7 @@
       </xsl:when>
       <xsl:otherwise>
         <tr>
-          <td colspan="2">
+          <td colspan="{$fullRowColspan}">
             This function returns a 
             <a href="#universal-error">universal error</a>
           </td>
@@ -1673,36 +1700,35 @@
 </xsl:template>
 
 <xsl:template match="errorsection">
-  <p/>
-  <hr noshade="noshade" size="3"/>
+  <div class="sep"/>
+  <hr class="thick"/>
   <h2>
     Errors
   </h2>
-  <p/>
+  <div class="sep"/>
   <xsl:apply-templates select="intro"/>
-  <p/>
+  <div class="sep"/>
   <xsl:apply-templates select="errorcategory"/>
-  <p/>
+  <div class="sep"/>
 </xsl:template>
 
 <xsl:template match="datasection">
-  <p/>
-  <hr noshade="noshade" size="3"/>
+  <div class="sep"/>
+  <hr class="thick"/>
   <h2>
     Data Types
   </h2>
-  <p/>
+  <div class="sep"/>
   <xsl:apply-templates select="intro"/>
   <xsl:apply-templates select="basetypes"/>
-  <p/>
-  <a name="StructureTypeDefinitions"></a>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center" class="tableHeader">
+  <div class="sep"/>
+  <table id="StructureTypeDefinitions" class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="2" class="tableHeader">
         Structure Type Definitions
       </td>
     </tr>
-    <tr bgcolor="#EEEEFF">
+    <tr class="bgLight">
       <td>
         Type
       </td>
@@ -1714,15 +1740,14 @@
       <xsl:sort select="@id"/>
     </xsl:apply-templates>
   </table>
-  <p/>
-  <a name="FunctionTypeDefinitions"></a>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center" class="tableHeader">
+  <div class="sep"/>
+  <table id="FunctionTypeDefinitions" class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="2" class="tableHeader">
         Function Type Definitions
       </td>
     </tr>
-    <tr bgcolor="#EEEEFF">
+    <tr class="bgLight">
       <td>
         Type
       </td>
@@ -1734,15 +1759,14 @@
       <xsl:sort select="@id"/>
     </xsl:apply-templates>
   </table>
-  <p/>
-  <a name="EnumerationDefinitions"></a>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="2" align="center" class="tableHeader">
+  <div class="sep"/>
+  <table id="EnumerationDefinitions" class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="2" class="tableHeader">
         Enumeration Definitions
       </td>
     </tr>
-    <tr bgcolor="#EEEEFF">
+    <tr class="bgLight">
       <td>
         Type
       </td>
@@ -1754,15 +1778,14 @@
       <xsl:sort select="@id"/>
     </xsl:apply-templates>
   </table>
-  <p/>
-  <a name="FunctionTable"></a>
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
-    <tr bgcolor="#CCCCFF">
-      <td colspan="3" align="center" class="tableHeader">
+  <div class="sep"/>
+  <table id="FunctionTable" class="bordered wide">
+    <tr class="bgDark">
+      <td colspan="3" class="tableHeader">
         Function Table Layout
       </td>
     </tr>
-    <tr bgcolor="#EEEEFF">
+    <tr class="bgLight">
       <td>
         Position
       </td>
@@ -1778,7 +1801,7 @@
       <xsl:with-param name="index" select="1"/>
     </xsl:call-template>
   </table>
-  <p/>
+  <div class="sep"/>
 </xsl:template>
 
 
@@ -1787,7 +1810,7 @@
   <xsl:param name="index"/>
   <xsl:variable name="thisFunction" select="$funcs[@num=$index]"/>
   <tr>
-    <td align="right">
+    <td class="rightAligned">
       <xsl:number value="$index" format="  1"/>
     </td>
     <xsl:choose>
@@ -1852,11 +1875,11 @@
     <xsl:value-of select="@label"/>
   </h3>
   <xsl:apply-templates select="intro"/>
-  <p/>
+  <div class="sep"/>
   <dl>
     <xsl:apply-templates select="errorid"/>
   </dl>
-  <p/>
+  <div class="sep"/>
 </xsl:template>
 
 <xsl:template match="errorid">
@@ -1870,20 +1893,21 @@
   </dt>
   <dd>
     <xsl:apply-templates/>
-    <p/>
+    <div class="sep"/>
   </dd>
 </xsl:template>
 
 <xsl:template match="changehistory">
-    <p/><hr noshade="noshade" size="3"/>
+    <div class="sep"/>
+    <hr class="thick"/>
     <h2>Change History</h2>
     Last update: <xsl:value-of select="@update"/><br/>
     Version: <xsl:call-template name="showversion"/>
-    <p/>
+    <div class="sep"/>
     <xsl:apply-templates select="intro"/>
-    <p/>
-    <table border="1" cellpadding="3" cellspacing="0" width="100%">
-      <tr bgcolor="#EEEEFF">
+    <div class="sep"/>
+    <table class="bordered wide">
+      <tr class="bgLight">
         <td>
           <b>Version</b><br/>
           <b>Date</b>
@@ -1980,7 +2004,7 @@
 
 
 <xsl:template match="table">
-  <table border="1" cellpadding="3" cellspacing="0" width="100%">
+  <table class="bordered wide">
     <xsl:apply-templates/>
   </table>
 </xsl:template>
@@ -1993,18 +2017,22 @@
 
 <xsl:template match="td">
   <td>
-    <xsl:attribute name="align">
-      <xsl:value-of select="@align"/>
-    </xsl:attribute>
+    <xsl:if test="@class">
+      <xsl:attribute name="class">
+        <xsl:value-of select="@class"/>
+      </xsl:attribute>
+    </xsl:if>
     <xsl:apply-templates/>
   </td>
 </xsl:template>
 
 <xsl:template match="th">
-  <th bgcolor="#EEEEFF">
-    <xsl:attribute name="align">
-      <xsl:value-of select="@align"/>
-    </xsl:attribute>
+  <th>
+    <xsl:if test="@class">
+      <xsl:attribute name="class">
+        <xsl:value-of select="@class"/>
+      </xsl:attribute>
+    </xsl:if>
     <xsl:apply-templates/>
   </th>
 </xsl:template>
@@ -2027,10 +2055,14 @@
   </dd>
 </xsl:template>
 
+<xsl:template match="blockquote">
+  <blockquote>
+    <xsl:apply-templates/>
+  </blockquote>
+</xsl:template>
+
 <xsl:template match="p">
-  <p>
-    <xsl:apply-templates/>
-  </p>
+  <div class="sep"/>
 </xsl:template>
 
 <xsl:template match="br">
@@ -2041,7 +2073,7 @@
 
 <xsl:template match="ul">
   <ul>
-    <xsl:attribute name="type"><xsl:value-of select="@type"/></xsl:attribute>
+    <xsl:attribute name="style">list-style-type:<xsl:value-of select="@type"/></xsl:attribute>
     <xsl:apply-templates/>
   </ul>
 </xsl:template>
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -677,7 +677,7 @@
     int depth = 0;
     for (javaVFrame *jvf = java_thread->last_java_vframe(&reg_map); jvf != NULL;
          jvf = jvf->java_sender()) {
-      if (depth++ < MaxJavaStackTraceDepth) {  // check for stack too deep
+      if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) {  // check for stack too deep
         // add locked objects for this frame into list
         err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1);
         if (err != JVMTI_ERROR_NONE) {
--- a/src/hotspot/share/prims/jvmtiLib.xsl	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvmtiLib.xsl	Mon Feb 05 23:12:03 2018 +0100
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
 
- Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -184,12 +184,12 @@
 </xsl:template>
 
 <xsl:template match="ptrtype" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:apply-templates select="nullok" mode="funcdescription"/>
 </xsl:template>
 
 <xsl:template match="inptr" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:variable name="child" select="child::*[position()=1]"/>
   <xsl:text>Agent passes in a pointer</xsl:text>
   <xsl:if test="name($child)!='void'">
@@ -203,7 +203,7 @@
 </xsl:template>
 
 <xsl:template match="inbuf" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:variable name="child" select="child::*[position()=1]"/>
   <xsl:text>Agent passes in </xsl:text>
   <xsl:choose>
@@ -228,7 +228,7 @@
 </xsl:template>
 
 <xsl:template match="outptr" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:text>Agent passes a pointer to a </xsl:text>
   <code>
     <xsl:apply-templates select="child::*[position()=1]" mode="signature"/> 
@@ -244,7 +244,7 @@
 </xsl:template>
 
 <xsl:template match="allocbuf" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:text>Agent passes a pointer to a </xsl:text>
   <code>
     <xsl:apply-templates select="child::*[position()=1]" mode="signature"/> 
@@ -284,7 +284,7 @@
 </xsl:template>
 
 <xsl:template match="allocallocbuf" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:text>Agent passes a pointer to a </xsl:text>
   <code>
     <xsl:apply-templates select="child::*[position()=1]" mode="signature"/> 
@@ -328,7 +328,7 @@
 </xsl:template>
 
 <xsl:template match="outbuf" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:text>Agent passes an array </xsl:text>
   <xsl:if test="count(@incount)=1 and @incount!=''">
     <xsl:text>large enough to hold </xsl:text>
@@ -358,7 +358,7 @@
 </xsl:template>
 
 <xsl:template match="agentbuf" mode="funcdescription">
-  <p/>
+  <div class="sep"/>
   <xsl:apply-templates select="nullok" mode="funcdescription"/>
   <xsl:apply-templates select="child::*[position()=1]" mode="returndescription">
     <xsl:with-param name="plural" select="'plural'"/>
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -497,6 +497,7 @@
     } break;
 
     // this is an indirect CP entry so it needs special handling
+    case JVM_CONSTANT_Dynamic:  // fall through
     case JVM_CONSTANT_InvokeDynamic:
     {
       // Index of the bootstrap specifier in the operands array
@@ -509,15 +510,18 @@
                                                     merge_cp_length_p, THREAD);
       if (new_bs_i != old_bs_i) {
         log_trace(redefine, class, constantpool)
-          ("InvokeDynamic entry@%d bootstrap_method_attr_index change: %d to %d",
+          ("Dynamic entry@%d bootstrap_method_attr_index change: %d to %d",
            *merge_cp_length_p, old_bs_i, new_bs_i);
       }
       if (new_ref_i != old_ref_i) {
         log_trace(redefine, class, constantpool)
-          ("InvokeDynamic entry@%d name_and_type_index change: %d to %d", *merge_cp_length_p, old_ref_i, new_ref_i);
+          ("Dynamic entry@%d name_and_type_index change: %d to %d", *merge_cp_length_p, old_ref_i, new_ref_i);
       }
 
-      (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, new_bs_i, new_ref_i);
+      if (scratch_cp->tag_at(scratch_i).is_dynamic_constant())
+        (*merge_cp_p)->dynamic_constant_at_put(*merge_cp_length_p, new_bs_i, new_ref_i);
+      else
+        (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, new_bs_i, new_ref_i);
       if (scratch_i != *merge_cp_length_p) {
         // The new entry in *merge_cp_p is at a different index than
         // the new entry in scratch_cp so we need to map the index values.
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2582,9 +2582,9 @@
       return;
     }
 
-    // ignore null or deleted handles
     oop o = *obj_p;
-    if (o == NULL || o == JNIHandles::deleted_handle()) {
+    // ignore null
+    if (o == NULL) {
       return;
     }
 
@@ -2641,9 +2641,9 @@
       return;
     }
 
-    // ignore null or deleted handles
     oop o = *obj_p;
-    if (o == NULL || o == JNIHandles::deleted_handle()) {
+    // ignore null
+    if (o == NULL) {
       return;
     }
 
--- a/src/hotspot/share/prims/methodHandles.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/methodHandles.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1359,6 +1359,87 @@
 }
 JVM_END
 
+JVM_ENTRY(void, MHN_copyOutBootstrapArguments(JNIEnv* env, jobject igcls,
+                                              jobject caller_jh, jintArray index_info_jh,
+                                              jint start, jint end,
+                                              jobjectArray buf_jh, jint pos,
+                                              jboolean resolve, jobject ifna_jh)) {
+  Klass* caller_k = java_lang_Class::as_Klass(JNIHandles::resolve(caller_jh));
+  if (caller_k == NULL || !caller_k->is_instance_klass()) {
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "bad caller");
+  }
+  InstanceKlass* caller = InstanceKlass::cast(caller_k);
+  typeArrayOop index_info_oop = (typeArrayOop) JNIHandles::resolve(index_info_jh);
+  if (index_info_oop == NULL ||
+      index_info_oop->klass() != Universe::intArrayKlassObj() ||
+      typeArrayOop(index_info_oop)->length() < 2) {
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "bad index info (0)");
+  }
+  typeArrayHandle index_info(THREAD, index_info_oop);
+  int bss_index_in_pool = index_info->int_at(1);
+  // While we are here, take a quick look at the index info:
+  if (bss_index_in_pool <= 0 ||
+      bss_index_in_pool >= caller->constants()->length() ||
+      index_info->int_at(0)
+      != caller->constants()->invoke_dynamic_argument_count_at(bss_index_in_pool)) {
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "bad index info (1)");
+  }
+  objArrayHandle buf(THREAD, (objArrayOop) JNIHandles::resolve(buf_jh));
+  if (start < 0) {
+    for (int pseudo_index = -4; pseudo_index < 0; pseudo_index++) {
+      if (start == pseudo_index) {
+        if (start >= end || 0 > pos || pos >= buf->length())  break;
+        oop pseudo_arg = NULL;
+        switch (pseudo_index) {
+        case -4:  // bootstrap method
+          {
+            int bsm_index = caller->constants()->invoke_dynamic_bootstrap_method_ref_index_at(bss_index_in_pool);
+            pseudo_arg = caller->constants()->resolve_possibly_cached_constant_at(bsm_index, CHECK);
+            break;
+          }
+        case -3:  // name
+          {
+            Symbol* name = caller->constants()->name_ref_at(bss_index_in_pool);
+            Handle str = java_lang_String::create_from_symbol(name, CHECK);
+            pseudo_arg = str();
+            break;
+          }
+        case -2:  // type
+          {
+            Symbol* type = caller->constants()->signature_ref_at(bss_index_in_pool);
+            Handle th;
+            if (type->byte_at(0) == '(') {
+              th = SystemDictionary::find_method_handle_type(type, caller, CHECK);
+            } else {
+              th = SystemDictionary::find_java_mirror_for_type(type, caller, SignatureStream::NCDFError, CHECK);
+            }
+            pseudo_arg = th();
+            break;
+          }
+        case -1:  // argument count
+          {
+            int argc = caller->constants()->invoke_dynamic_argument_count_at(bss_index_in_pool);
+            jvalue argc_value; argc_value.i = (jint)argc;
+            pseudo_arg = java_lang_boxing_object::create(T_INT, &argc_value, CHECK);
+            break;
+          }
+        }
+
+        // Store the pseudo-argument, and advance the pointers.
+        buf->obj_at_put(pos++, pseudo_arg);
+        ++start;
+      }
+    }
+    // When we are done with this there may be regular arguments to process too.
+  }
+  Handle ifna(THREAD, JNIHandles::resolve(ifna_jh));
+  caller->constants()->
+    copy_bootstrap_arguments_at(bss_index_in_pool,
+                                start, end, buf, pos,
+                                (resolve == JNI_TRUE), ifna, CHECK);
+}
+JVM_END
+
 // It is called by a Cleaner object which ensures that dropped CallSites properly
 // deallocate their dependency information.
 JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) {
@@ -1438,6 +1519,7 @@
   {CC "objectFieldOffset",         CC "(" MEM ")J",                          FN_PTR(MHN_objectFieldOffset)},
   {CC "setCallSiteTargetNormal",   CC "(" CS "" MH ")V",                     FN_PTR(MHN_setCallSiteTargetNormal)},
   {CC "setCallSiteTargetVolatile", CC "(" CS "" MH ")V",                     FN_PTR(MHN_setCallSiteTargetVolatile)},
+  {CC "copyOutBootstrapArguments", CC "(" CLS "[III[" OBJ "IZ" OBJ ")V",     FN_PTR(MHN_copyOutBootstrapArguments)},
   {CC "clearCallSiteContext",      CC "(" CTX ")V",                          FN_PTR(MHN_clearCallSiteContext)},
   {CC "staticFieldOffset",         CC "(" MEM ")J",                          FN_PTR(MHN_staticFieldOffset)},
   {CC "staticFieldBase",           CC "(" MEM ")" OBJ,                        FN_PTR(MHN_staticFieldBase)},
--- a/src/hotspot/share/prims/whitebox.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/prims/whitebox.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1702,7 +1702,7 @@
 WB_END
 
 WB_ENTRY(jboolean, WB_IsSharedClass(JNIEnv* env, jobject wb, jclass clazz))
-  return (jboolean)MetaspaceShared::is_in_shared_space(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz)));
+  return (jboolean)MetaspaceShared::is_in_shared_metaspace(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz)));
 WB_END
 
 WB_ENTRY(jboolean, WB_AreSharedStringsIgnored(JNIEnv* env))
--- a/src/hotspot/share/runtime/arguments.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/runtime/arguments.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -513,7 +513,6 @@
   { "SafepointSpinBeforeYield",     JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "DeferThrSuspendLoopCount",     JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "DeferPollingPageLoopCount",    JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
-  { "UseCGroupMemoryLimitForHeap",  JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::jdk(11) },
   { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "CheckEndorsedAndExtDirs",      JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
 
@@ -1862,32 +1861,6 @@
     FLAG_IS_DEFAULT(MaxRAM) ? MIN2(os::physical_memory(), (julong)MaxRAM)
                             : (julong)MaxRAM;
 
-  // Experimental support for CGroup memory limits
-  if (UseCGroupMemoryLimitForHeap) {
-    // This is a rough indicator that a CGroup limit may be in force
-    // for this process
-    const char* lim_file = "/sys/fs/cgroup/memory/memory.limit_in_bytes";
-    FILE *fp = fopen(lim_file, "r");
-    if (fp != NULL) {
-      julong cgroup_max = 0;
-      int ret = fscanf(fp, JULONG_FORMAT, &cgroup_max);
-      if (ret == 1 && cgroup_max > 0) {
-        // If unlimited, cgroup_max will be a very large, but unspecified
-        // value, so use initial phys_mem as a limit
-        log_info(gc, heap)("Setting phys_mem to the min of cgroup limit ("
-                           JULONG_FORMAT "MB) and initial phys_mem ("
-                           JULONG_FORMAT "MB)", cgroup_max/M, phys_mem/M);
-        phys_mem = MIN2(cgroup_max, phys_mem);
-      } else {
-        warning("Unable to read/parse cgroup memory limit from %s: %s",
-                lim_file, errno != 0 ? strerror(errno) : "unknown error");
-      }
-      fclose(fp);
-    } else {
-      warning("Unable to open cgroup memory limit file %s (%s)", lim_file, strerror(errno));
-    }
-  }
-
   // Convert deprecated flags
   if (FLAG_IS_DEFAULT(MaxRAMPercentage) &&
       !FLAG_IS_DEFAULT(MaxRAMFraction))
@@ -3491,27 +3464,6 @@
   }
 #endif
 
-  // If we are running in a headless jre, force java.awt.headless property
-  // to be true unless the property has already been set.
-  // Also allow the OS environment variable JAVA_AWT_HEADLESS to set headless state.
-  if (os::is_headless_jre()) {
-    const char* headless = Arguments::get_property("java.awt.headless");
-    if (headless == NULL) {
-      const char *headless_env = ::getenv("JAVA_AWT_HEADLESS");
-      if (headless_env == NULL) {
-        if (!add_property("java.awt.headless=true")) {
-          return JNI_ENOMEM;
-        }
-      } else {
-        char buffer[256];
-        jio_snprintf(buffer, sizeof(buffer), "java.awt.headless=%s", headless_env);
-        if (!add_property(buffer)) {
-          return JNI_ENOMEM;
-        }
-      }
-    }
-  }
-
   if (!check_vm_args_consistency()) {
     return JNI_ERR;
   }
--- a/src/hotspot/share/runtime/arguments.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/runtime/arguments.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -373,13 +373,7 @@
 
   // Tiered
   static void set_tiered_flags();
-  // CMS/ParNew garbage collectors
-  static void set_parnew_gc_flags();
-  static void set_cms_and_parnew_gc_flags();
-  // UseParallel[Old]GC
-  static void set_parallel_gc_flags();
-  // Garbage-First (UseG1GC)
-  static void set_g1_gc_flags();
+
   // GC ergonomics
   static void set_conservative_max_heap_alignment();
   static void set_use_compressed_oops();
--- a/src/hotspot/share/runtime/globals.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/runtime/globals.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -78,21 +78,21 @@
 
 define_pd_global(intx, CompileThreshold,             0);
 
-define_pd_global(intx, OnStackReplacePercentage,     0);
-define_pd_global(bool, ResizeTLAB,                   false);
-define_pd_global(intx, FreqInlineSize,               0);
+define_pd_global(intx,   OnStackReplacePercentage,   0);
+define_pd_global(bool,   ResizeTLAB,                 false);
+define_pd_global(intx,   FreqInlineSize,             0);
 define_pd_global(size_t, NewSizeThreadIncrease,      4*K);
-define_pd_global(intx, InlineClassNatives,           true);
-define_pd_global(intx, InlineUnsafeOps,              true);
-define_pd_global(intx, InitialCodeCacheSize,         160*K);
-define_pd_global(intx, ReservedCodeCacheSize,        32*M);
-define_pd_global(intx, NonProfiledCodeHeapSize,      0);
-define_pd_global(intx, ProfiledCodeHeapSize,         0);
-define_pd_global(intx, NonNMethodCodeHeapSize,       32*M);
+define_pd_global(bool,   InlineClassNatives,         true);
+define_pd_global(bool,   InlineUnsafeOps,            true);
+define_pd_global(uintx,  InitialCodeCacheSize,       160*K);
+define_pd_global(uintx,  ReservedCodeCacheSize,      32*M);
+define_pd_global(uintx,  NonProfiledCodeHeapSize,    0);
+define_pd_global(uintx,  ProfiledCodeHeapSize,       0);
+define_pd_global(uintx,  NonNMethodCodeHeapSize,     32*M);
 
-define_pd_global(intx, CodeCacheExpansionSize,       32*K);
-define_pd_global(intx, CodeCacheMinBlockLength,      1);
-define_pd_global(intx, CodeCacheMinimumUseSpace,     200*K);
+define_pd_global(uintx,  CodeCacheExpansionSize,     32*K);
+define_pd_global(uintx,  CodeCacheMinBlockLength,    1);
+define_pd_global(uintx,  CodeCacheMinimumUseSpace,   200*K);
 define_pd_global(size_t, MetaspaceSize,              ScaleForWordSize(4*M));
 define_pd_global(bool, NeverActAsServerClassMachine, true);
 define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
@@ -2042,11 +2042,6 @@
           "MaxRAM * MaxRAMPercentage / 100")                                \
           range(0, max_uintx)                                               \
                                                                             \
-  experimental(bool, UseCGroupMemoryLimitForHeap, false,                    \
-          "Use CGroup memory limit as physical memory limit for heap "      \
-          "sizing"                                                          \
-          "Deprecated, replaced by container support")                      \
-                                                                            \
   product(uintx, MaxRAMFraction, 4,                                         \
           "Maximum fraction (1/n) of real memory used for maximum heap "    \
           "size. "                                                          \
@@ -3966,6 +3961,14 @@
   develop(bool, TraceInvokeDynamic, false,                                  \
           "trace internal invoke dynamic operations")                       \
                                                                             \
+  diagnostic(int, UseBootstrapCallInfo, 1,                                  \
+          "0: when resolving InDy or ConDy, force all BSM arguments to be " \
+          "resolved before the bootstrap method is called; 1: when a BSM "  \
+          "that may accept a BootstrapCallInfo is detected, use that API "  \
+          "to pass BSM arguments, which allows the BSM to delay their "     \
+          "resolution; 2+: stress test the BCI API by calling more BSMs "   \
+          "via that API, instead of with the eagerly-resolved array.")      \
+                                                                            \
   diagnostic(bool, PauseAtStartup,      false,                              \
           "Causes the VM to pause at startup time and wait for the pause "  \
           "file to be removed (default: ./vm.paused.<pid>)")                \
--- a/src/hotspot/share/runtime/jniHandles.cpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
@@ -32,13 +32,13 @@
 #include "runtime/thread.inline.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/align.hpp"
+#include "utilities/debug.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
-JNIHandleBlock* JNIHandles::_global_handles       = NULL;
-JNIHandleBlock* JNIHandles::_weak_global_handles  = NULL;
-oop             JNIHandles::_deleted_handle       = NULL;
+OopStorage* JNIHandles::_global_handles = NULL;
+OopStorage* JNIHandles::_weak_global_handles = NULL;
 
 
 jobject JNIHandles::make_local(oop obj) {
@@ -46,7 +46,7 @@
     return NULL;                // ignore null handles
   } else {
     Thread* thread = Thread::current();
-    assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+    assert(oopDesc::is_oop(obj), "not an oop");
     assert(!current_thread_in_native(), "must not be in native");
     return thread->active_handles()->allocate_handle(obj);
   }
@@ -59,7 +59,7 @@
   if (obj == NULL) {
     return NULL;                // ignore null handles
   } else {
-    assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+    assert(oopDesc::is_oop(obj), "not an oop");
     assert(thread->is_Java_thread(), "not a Java thread");
     assert(!current_thread_in_native(), "must not be in native");
     return thread->active_handles()->allocate_handle(obj);
@@ -72,22 +72,39 @@
     return NULL;                // ignore null handles
   } else {
     JavaThread* thread = JavaThread::thread_from_jni_environment(env);
-    assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+    assert(oopDesc::is_oop(obj), "not an oop");
     assert(!current_thread_in_native(), "must not be in native");
     return thread->active_handles()->allocate_handle(obj);
   }
 }
 
 
-jobject JNIHandles::make_global(Handle obj) {
+static void report_handle_allocation_failure(AllocFailType alloc_failmode,
+                                             const char* handle_kind) {
+  if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+    // Fake size value, since we don't know the min allocation size here.
+    vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
+                          "Cannot create %s JNI handle", handle_kind);
+  } else {
+    assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
+  }
+}
+
+jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
   assert(!current_thread_in_native(), "must not be in native");
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
-    MutexLocker ml(JNIGlobalHandle_lock);
-    assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
-    res = _global_handles->allocate_handle(obj());
+    assert(oopDesc::is_oop(obj()), "not an oop");
+    oop* ptr = _global_handles->allocate();
+    // Return NULL on allocation failure.
+    if (ptr != NULL) {
+      *ptr = obj();
+      res = reinterpret_cast<jobject>(ptr);
+    } else {
+      report_handle_allocation_failure(alloc_failmode, "global");
+    }
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
@@ -96,32 +113,32 @@
 }
 
 
-jobject JNIHandles::make_weak_global(Handle obj) {
+jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
   assert(!current_thread_in_native(), "must not be in native");
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
-    {
-      MutexLocker ml(JNIGlobalHandle_lock);
-      assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
-      res = _weak_global_handles->allocate_handle(obj());
+    assert(oopDesc::is_oop(obj()), "not an oop");
+    oop* ptr = _weak_global_handles->allocate();
+    // Return NULL on allocation failure.
+    if (ptr != NULL) {
+      *ptr = obj();
+      char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
+      res = reinterpret_cast<jobject>(tptr);
+    } else {
+      report_handle_allocation_failure(alloc_failmode, "weak global");
     }
-    // Add weak tag.
-    assert(is_aligned(res, weak_tag_alignment), "invariant");
-    char* tptr = reinterpret_cast<char*>(res) + weak_tag_value;
-    res = reinterpret_cast<jobject>(tptr);
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
   return res;
 }
 
-template<bool external_guard>
 oop JNIHandles::resolve_jweak(jweak handle) {
+  assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "precondition");
   oop result = jweak_ref(handle);
-  result = guard_value<external_guard>(result);
 #if INCLUDE_ALL_GCS
   if (result != NULL && UseG1GC) {
     G1SATBCardTableModRefBS::enqueue(result);
@@ -130,31 +147,31 @@
   return result;
 }
 
-template oop JNIHandles::resolve_jweak<true>(jweak);
-template oop JNIHandles::resolve_jweak<false>(jweak);
-
 bool JNIHandles::is_global_weak_cleared(jweak handle) {
+  assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "not a weak handle");
-  return guard_value<false>(jweak_ref(handle)) == NULL;
+  return jweak_ref(handle) == NULL;
 }
 
 void JNIHandles::destroy_global(jobject handle) {
   if (handle != NULL) {
-    assert(is_global_handle(handle), "Invalid delete of global JNI handle");
-    jobject_ref(handle) = deleted_handle();
+    assert(!is_jweak(handle), "wrong method for detroying jweak");
+    jobject_ref(handle) = NULL;
+    _global_handles->release(&jobject_ref(handle));
   }
 }
 
 
 void JNIHandles::destroy_weak_global(jobject handle) {
   if (handle != NULL) {
-    jweak_ref(handle) = deleted_handle();
+    assert(is_jweak(handle), "JNI handle not jweak");
+    jweak_ref(handle) = NULL;
+    _weak_global_handles->release(&jweak_ref(handle));
   }
 }
 
 
 void JNIHandles::oops_do(OopClosure* f) {
-  f->do_oop(&_deleted_handle);
   _global_handles->oops_do(f);
 }
 
@@ -165,23 +182,60 @@
 
 
 void JNIHandles::weak_oops_do(OopClosure* f) {
-  AlwaysTrueClosure always_true;
-  weak_oops_do(&always_true, f);
+  _weak_global_handles->weak_oops_do(f);
 }
 
 
 void JNIHandles::initialize() {
-  _global_handles      = JNIHandleBlock::allocate_block();
-  _weak_global_handles = JNIHandleBlock::allocate_block();
-  EXCEPTION_MARK;
-  // We will never reach the CATCH below since Exceptions::_throw will cause
-  // the VM to exit if an exception is thrown during initialization
-  Klass* k      = SystemDictionary::Object_klass();
-  _deleted_handle = InstanceKlass::cast(k)->allocate_instance(CATCH);
+  _global_handles = new OopStorage("JNI Global",
+                                   JNIGlobalAlloc_lock,
+                                   JNIGlobalActive_lock);
+  _weak_global_handles = new OopStorage("JNI Weak",
+                                        JNIWeakAlloc_lock,
+                                        JNIWeakActive_lock);
 }
 
 
+inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
+  return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
+}
+
+
+jobjectRefType JNIHandles::handle_type(Thread* thread, jobject handle) {
+  assert(handle != NULL, "precondition");
+  jobjectRefType result = JNIInvalidRefType;
+  if (is_jweak(handle)) {
+    if (is_storage_handle(_weak_global_handles, &jweak_ref(handle))) {
+      result = JNIWeakGlobalRefType;
+    }
+  } else {
+    switch (_global_handles->allocation_status(&jobject_ref(handle))) {
+    case OopStorage::ALLOCATED_ENTRY:
+      result = JNIGlobalRefType;
+      break;
+
+    case OopStorage::UNALLOCATED_ENTRY:
+      break;                    // Invalid global handle
+
+    case OopStorage::INVALID_ENTRY:
+      // Not in global storage.  Might be a local handle.
+      if (is_local_handle(thread, handle) ||
+          (thread->is_Java_thread() &&
+           is_frame_handle((JavaThread*)thread, handle))) {
+        result = JNILocalRefType;
+      }
+      break;
+
+    default:
+      ShouldNotReachHere();
+    }
+  }
+  return result;
+}
+
+
 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
+  assert(handle != NULL, "precondition");
   JNIHandleBlock* block = thread->active_handles();
 
   // Look back past possible native calls to jni_PushLocalFrame.
@@ -199,64 +253,51 @@
 // We easily can't isolate any particular stack frame the handle might
 // come from, so we'll check the whole stack.
 
-bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
+bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
+  assert(handle != NULL, "precondition");
   // If there is no java frame, then this must be top level code, such
   // as the java command executable, in which case, this type of handle
   // is not permitted.
   return (thr->has_last_Java_frame() &&
-         (void*)obj < (void*)thr->stack_base() &&
-         (void*)obj >= (void*)thr->last_Java_sp());
+         (void*)handle < (void*)thr->stack_base() &&
+         (void*)handle >= (void*)thr->last_Java_sp());
 }
 
 
 bool JNIHandles::is_global_handle(jobject handle) {
-  return _global_handles->chain_contains(handle);
+  assert(handle != NULL, "precondition");
+  return !is_jweak(handle) && is_storage_handle(_global_handles, &jobject_ref(handle));
 }
 
 
 bool JNIHandles::is_weak_global_handle(jobject handle) {
-  return _weak_global_handles->chain_contains(handle);
+  assert(handle != NULL, "precondition");
+  return is_jweak(handle) && is_storage_handle(_weak_global_handles, &jweak_ref(handle));
 }
 
-long JNIHandles::global_handle_memory_usage() {
-  return _global_handles->memory_usage();
+size_t JNIHandles::global_handle_memory_usage() {
+  return _global_handles->total_memory_usage();
 }
 
-long JNIHandles::weak_global_handle_memory_usage() {
-  return _weak_global_handles->memory_usage();
+size_t JNIHandles::weak_global_handle_memory_usage() {
+  return _weak_global_handles->total_memory_usage();
 }
 
 
-class CountHandleClosure: public OopClosure {
-private:
-  int _count;
-public:
-  CountHandleClosure(): _count(0) {}
-  virtual void do_oop(oop* ooph) {
-    if (*ooph != JNIHandles::deleted_handle()) {
-      _count++;
-    }
-  }
-  virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
-  int count() { return _count; }
-};
-
 // We assume this is called at a safepoint: no lock is needed.
 void JNIHandles::print_on(outputStream* st) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   assert(_global_handles != NULL && _weak_global_handles != NULL,
          "JNIHandles not initialized");
 
-  CountHandleClosure global_handle_count;
-  oops_do(&global_handle_count);
-  weak_oops_do(&global_handle_count);
-
-  st->print_cr("JNI global references: %d", global_handle_count.count());
+  st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
+               _global_handles->allocation_count(),
+               _weak_global_handles->allocation_count());
   st->cr();
   st->flush();
 }
 
-class VerifyHandleClosure: public OopClosure {
+class VerifyJNIHandles: public OopClosure {
 public:
   virtual void do_oop(oop* root) {
     (*root)->verify();
@@ -265,7 +306,7 @@
 };
 
 void JNIHandles::verify() {
-  VerifyHandleClosure verify_handle;
+  VerifyJNIHandles verify_handle;
 
   oops_do(&verify_handle);
   weak_oops_do(&verify_handle);
@@ -419,34 +460,6 @@
 }
 
 
-void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
-                                  OopClosure* f) {
-  for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
-    assert(current->pop_frame_link() == NULL,
-      "blocks holding weak global JNI handles should not have pop frame link set");
-    for (int index = 0; index < current->_top; index++) {
-      oop* root = &(current->_handles)[index];
-      oop value = *root;
-      // traverse heap pointers only, not deleted handles or free list pointers
-      if (value != NULL && Universe::heap()->is_in_reserved(value)) {
-        if (is_alive->do_object_b(value)) {
-          // The weakly referenced object is alive, update pointer
-          f->do_oop(root);
-        } else {
-          // The weakly referenced object is not alive, clear the reference by storing NULL
-          log_develop_trace(gc, ref)("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
-          *root = NULL;
-        }
-      }
-    }
-    // the next handle block is valid only if current block is full
-    if (current->_top < block_size_in_oops) {
-      break;
-    }
-  }
-}
-
-
 jobject JNIHandleBlock::allocate_handle(oop obj) {
   assert(Universe::heap()->is_in_reserved(obj), "sanity check");
   if (_top == 0) {
@@ -514,15 +527,6 @@
   return allocate_handle(obj);  // retry
 }
 
-void JNIHandleBlock::release_handle(jobject h) {
-  if (h != NULL) {
-    assert(chain_contains(h), "does not contain the JNI handle");
-    // Mark the handle as deleted, allocate will reuse it
-    *((oop*)h) = JNIHandles::deleted_handle();
-  }
-}
-
-
 void JNIHandleBlock::rebuild_free_list() {
   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
   int free = 0;
@@ -530,7 +534,7 @@
   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
     for (int index = 0; index < current->_top; index++) {
       oop* handle = &(current->_handles)[index];
-      if (*handle ==  JNIHandles::deleted_handle()) {
+      if (*handle == NULL) {
         // this handle was cleared out by a delete call, reuse it
         *handle = (oop) _free_list;
         _free_list = handle;
@@ -568,30 +572,45 @@
 }
 
 
-int JNIHandleBlock::length() const {
-  int result = 1;
+size_t JNIHandleBlock::length() const {
+  size_t result = 1;
   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
     result++;
   }
   return result;
 }
 
+class CountJNIHandleClosure: public OopClosure {
+private:
+  int _count;
+public:
+  CountJNIHandleClosure(): _count(0) {}
+  virtual void do_oop(oop* ooph) { _count++; }
+  virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
+  int count() { return _count; }
+};
+
 const size_t JNIHandleBlock::get_number_of_live_handles() {
-  CountHandleClosure counter;
+  CountJNIHandleClosure counter;
   oops_do(&counter);
   return counter.count();
 }
 
 // This method is not thread-safe, i.e., must be called while holding a lock on the
 // structure.
-long JNIHandleBlock::memory_usage() const {
+size_t JNIHandleBlock::memory_usage() const {
   return length() * sizeof(JNIHandleBlock);
 }
 
 
 #ifndef PRODUCT
 
+bool JNIHandles::is_local_handle(jobject handle) {
+  return JNIHandleBlock::any_contains(handle);
+}
+
 bool JNIHandleBlock::any_contains(jobject handle) {
+  assert(handle != NULL, "precondition");
   for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
     if (current->contains(handle)) {
       return true;
--- a/src/hotspot/share/runtime/jniHandles.hpp	Fri Feb 09 02:23:34 2018 +0000
+++ b/src/hotspot/share/runtime/jniHandles.hpp	Mon Feb 05 23:12:03 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "runtime/handles.hpp"
 
 class JNIHandleBlock;
+class OopStorage;
 
 
 // Interface for creating and resolving local/global JNI handles
@@ -36,17 +37,15 @@
 class JNIHandles : AllStatic {
   friend class VMStructs;
  private:
-  static JNIHandleBlock* _global_handles;             // First global handle block
-  static JNIHandleBlock* _weak_global_handles;        // First weak global handle block
-  static oop _deleted_handle;                         // Sentinel marking deleted handles
+  static OopStorage* _global_handles;
+  static OopStorage* _weak_global_handles;
 
   inline static bool is_jweak(jobject handle);
   inline static oop& jobject_ref(jobject handle); // NOT jweak!
   inline static oop& jweak_ref(jobject handle);
 
-  template<bool external_guard> inline static oop guard_value(oop value);
   template<bool external_guard> inline static oop resolve_impl(jobject handle);
-  template<bool external_guard> static oop resolve_jweak(jweak handle);
+  static oop resolve_jweak(jweak handle);
 
   // This method is not inlined in order to avoid circular includes between
   // this header file and thread.hpp.
@@ -80,19 +79,14 @@
   inline static void destroy_local(jobject handle);
 
   // Global handles
-  static jobject make_global(Handle  obj);
+  static jobject make_global(Handle  obj, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
   static void destroy_global(jobject handle);
 
   // Weak global handles
-  static jobject make_weak_global(Handle obj);
+  static jobject make_weak_global(Handle obj, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
   static void destroy_weak_global(jobject handle);
   static bool is_global_weak_cleared(jweak handle); // Test jweak without resolution
 
-  // Sentinel marking deleted handles in block. Note that we cannot store NULL as
-  // the sentinel, since clearing weak global JNI refs are done by storing NULL in
-  // the handle. The handle may not be reused before destroy_weak_global is called.
-  static oop deleted_handle()   { return _deleted_handle; }
-
   // Initialization
   static void initialize();
 
@@ -100,12 +94,21 @@
   static void print_on(outputStream* st);
   static void print()           { print_on(tty); }
   static void verify();
+  // The category predicates all require handle != NULL.
   static bool is_local_handle(Thread* thread, jobject handle);
-  static bool is_frame_handle(JavaThread* thr, jobject obj);
+  static bool is_frame_handle(JavaThread* thread, jobject handle);
   static bool is_global_handle(jobject handle);
   static bool is_weak_global_handle(jobject handle);
-  static long global_handle_memory_usage();
-  static long weak_global_handle_memory_usage();
+  static size_t global_handle_memory_usage();
+  static size_t weak_global_handle_memory_usage();
+
+#ifndef PRODUCT
+  // Is handle from any local block of any thread?
+  static bool is_local_handle(jobject handle);
+#endif
+