changeset 8959:3ed0df2c553a jdk9-b83

Merge
author lana
date Fri, 18 Sep 2015 14:21:46 -0700
parents 779012e87268 ce9c22f23999
children 90b308169cb2 0093079406dd
files src/share/vm/classfile/imageDecompressor.cpp src/share/vm/classfile/imageDecompressor.hpp src/share/vm/classfile/imageFile.cpp src/share/vm/classfile/imageFile.hpp src/share/vm/utilities/endian.cpp src/share/vm/utilities/endian.hpp test/runtime/modules/ImageFile/ImageAttributeOffsetsTest.java test/runtime/modules/ImageFile/ImageCloseTest.java test/runtime/modules/ImageFile/ImageFileHeaderTest.java test/runtime/modules/ImageFile/ImageFindAttributesTest.java test/runtime/modules/ImageFile/ImageGetAttributesTest.java test/runtime/modules/ImageFile/ImageGetDataAddressTest.java test/runtime/modules/ImageFile/ImageGetIndexAddressTest.java test/runtime/modules/ImageFile/ImageGetStringBytesTest.java test/runtime/modules/ImageFile/ImageOpenTest.java test/runtime/modules/ImageFile/ImageReadTest.java test/runtime/modules/ImageFile/LocationConstants.java
diffstat 250 files changed, 6332 insertions(+), 5452 deletions(-) [+]
line wrap: on
line diff
--- a/make/Makefile	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/Makefile	Fri Sep 18 14:21:46 2015 -0700
@@ -633,9 +633,9 @@
 
 update_jdk: export_product_jdk export_fastdebug_jdk test_jdk
 
-copy_jdk: $(JDK_IMAGE_DIR)/jre/lib/rt.jar
+copy_jdk: $(JDK_IMAGE_DIR)/bin/java
 
-$(JDK_IMAGE_DIR)/jre/lib/rt.jar:
+$(JDK_IMAGE_DIR)/bin/java:
 	$(RM) -r $(JDK_IMAGE_DIR)
 	$(MKDIR) -p $(JDK_IMAGE_DIR)
 	($(CD) $(JDK_IMPORT_PATH) && \
--- a/make/aix/makefiles/mapfile-vers-debug	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/aix/makefiles/mapfile-vers-debug	Fri Sep 18 14:21:46 2015 -0700
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/aix/makefiles/mapfile-vers-product	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/aix/makefiles/mapfile-vers-product	Fri Sep 18 14:21:46 2015 -0700
@@ -139,18 +139,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/bsd/makefiles/mapfile-vers-darwin-debug	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/bsd/makefiles/mapfile-vers-darwin-debug	Fri Sep 18 14:21:46 2015 -0700
@@ -139,18 +139,6 @@
                 _JVM_Halt
                 _JVM_HoldsLock
                 _JVM_IHashCode
-                _JVM_ImageAttributeOffsets
-                _JVM_ImageAttributeOffsetsLength
-                _JVM_ImageClose
-                _JVM_ImageFindAttributes
-                _JVM_ImageGetAttributes
-                _JVM_ImageGetAttributesCount
-                _JVM_ImageGetDataAddress
-                _JVM_ImageGetIndexAddress
-                _JVM_ImageGetStringBytes
-                _JVM_ImageOpen
-                _JVM_ImageRead
-                _JVM_ImageReadCompressed
                 _JVM_InitAgentProperties
                 _JVM_InitProperties
                 _JVM_InternString
--- a/make/bsd/makefiles/mapfile-vers-darwin-product	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/bsd/makefiles/mapfile-vers-darwin-product	Fri Sep 18 14:21:46 2015 -0700
@@ -139,18 +139,6 @@
                 _JVM_Halt
                 _JVM_HoldsLock
                 _JVM_IHashCode
-                _JVM_ImageAttributeOffsets
-                _JVM_ImageAttributeOffsetsLength
-                _JVM_ImageClose
-                _JVM_ImageFindAttributes
-                _JVM_ImageGetAttributes
-                _JVM_ImageGetAttributesCount
-                _JVM_ImageGetDataAddress
-                _JVM_ImageGetIndexAddress
-                _JVM_ImageGetStringBytes
-                _JVM_ImageOpen
-                _JVM_ImageRead
-                _JVM_ImageReadCompressed
                 _JVM_InitAgentProperties
                 _JVM_InitProperties
                 _JVM_InternString
--- a/make/bsd/makefiles/mapfile-vers-debug	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/bsd/makefiles/mapfile-vers-debug	Fri Sep 18 14:21:46 2015 -0700
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/bsd/makefiles/mapfile-vers-product	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/bsd/makefiles/mapfile-vers-product	Fri Sep 18 14:21:46 2015 -0700
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/bsd/makefiles/vm.make	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/bsd/makefiles/vm.make	Fri Sep 18 14:21:46 2015 -0700
@@ -131,7 +131,7 @@
 # By default, link the *.o into the library, not the executable.
 LINK_INTO$(LINK_INTO) = LIBJVM
 
-JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
+JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
 
 #----------------------------------------------------------------------
 # jvm_db & dtrace
--- a/make/build.sh	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/build.sh	Fri Sep 18 14:21:46 2015 -0700
@@ -49,7 +49,7 @@
 # Just in case:
 JAVA_HOME=`( cd $JAVA_HOME; pwd )`
 
-if [ "${ALT_BOOTDIR-}" = ""  -o  ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then
+if [ "${ALT_BOOTDIR-}" = ""  -o  ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/lib/ ]; then
     ALT_BOOTDIR=${JAVA_HOME}
 fi
 
--- a/make/hotspot.script	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/hotspot.script	Fri Sep 18 14:21:46 2015 -0700
@@ -127,7 +127,7 @@
 #     o		$JRE/lib/$ARCH
 # followed by the user's previous effective LD_LIBRARY_PATH, if
 # any.
-JRE=$JDK/jre
+JRE=$JDK
 JAVA_HOME=$JDK
 export JAVA_HOME
 
--- a/make/linux/makefiles/mapfile-vers-debug	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/linux/makefiles/mapfile-vers-debug	Fri Sep 18 14:21:46 2015 -0700
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/linux/makefiles/mapfile-vers-product	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/linux/makefiles/mapfile-vers-product	Fri Sep 18 14:21:46 2015 -0700
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/solaris/makefiles/adlc.make	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/solaris/makefiles/adlc.make	Fri Sep 18 14:21:46 2015 -0700
@@ -76,6 +76,11 @@
 ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
   CFLAGS_WARN = +w -errwarn
 endif
+# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly 
+# instantiated template functions trigger this warning when +w is active.
+ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
+  CFLAGS_WARN += -erroff=notemsource
+endif
 CFLAGS += $(CFLAGS_WARN)
 
 ifeq ("${Platform_compiler}", "sparcWorks")
--- a/make/solaris/makefiles/buildtree.make	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/solaris/makefiles/buildtree.make	Fri Sep 18 14:21:46 2015 -0700
@@ -270,6 +270,7 @@
 	echo "CP ?= cp"; \
 	echo "MV ?= mv"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
+	echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
 	) > $@
 
--- a/make/solaris/makefiles/mapfile-vers	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/solaris/makefiles/mapfile-vers	Fri Sep 18 14:21:46 2015 -0700
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/make/solaris/makefiles/vm.make	Fri Sep 18 10:46:35 2015 -0700
+++ b/make/solaris/makefiles/vm.make	Fri Sep 18 14:21:46 2015 -0700
@@ -197,7 +197,7 @@
 Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
 Src_Dirs/TIERED    := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
 Src_Dirs/ZERO      := $(CORE_PATHS)
-Src_Dirs/SHARK     := $(CORE_PATHS)
+Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
 Src_Dirs := $(Src_Dirs/$(TYPE))
 
 COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
@@ -206,7 +206,7 @@
 ZERO_SPECIFIC_FILES      := zero
 
 # Always exclude these.
-Src_Files_EXCLUDE := dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
+Src_Files_EXCLUDE += dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
 Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
--- a/src/cpu/aarch64/vm/aarch64.ad	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/aarch64/vm/aarch64.ad	Fri Sep 18 14:21:46 2015 -0700
@@ -4373,12 +4373,12 @@
       return;
     }
 
-    if (UseBiasedLocking) {
-      __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
+    if (UseBiasedLocking && !UseOptoBiasInlining) {
+      __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
     }
 
     // Handle existing monitor
-    if (EmitSync & 0x02) {
+    if ((EmitSync & 0x02) == 0) {
       // we can use AArch64's bit test and branch here but
       // markoopDesc does not define a bit index just the bit value
       // so assert in case the bit pos changes
@@ -4518,7 +4518,7 @@
       return;
     }
 
-    if (UseBiasedLocking) {
+    if (UseBiasedLocking && !UseOptoBiasInlining) {
       __ biased_locking_exit(oop, tmp, cont);
     }
 
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -1210,7 +1210,7 @@
 
   INSN(ldrs, 0b00, 1);
   INSN(ldrd, 0b01, 1);
-  INSN(ldrq, 0x10, 1);
+  INSN(ldrq, 0b10, 1);
 
 #undef INSN
 
@@ -2285,13 +2285,13 @@
 #undef INSN
 
   // Table vector lookup
-#define INSN(NAME, op)                                                                                       \
-  void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) {  \
-    starti;                                                                                                  \
-    assert(T == T8B || T == T16B, "invalid arrangement");                                                    \
-    assert(0 < registers && registers <= 4, "invalid number of registers");                                  \
-    f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15);                               \
-    f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0);                               \
+#define INSN(NAME, op)                                                  \
+  void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \
+    starti;                                                             \
+    assert(T == T8B || T == T16B, "invalid arrangement");               \
+    assert(0 < registers && registers <= 4, "invalid number of registers"); \
+    f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \
+    f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \
   }
 
   INSN(tbl, 0);
@@ -2299,6 +2299,7 @@
 
 #undef INSN
 
+  // AdvSIMD two-reg misc
 #define INSN(NAME, U, opcode)                                                       \
   void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {               \
        starti;                                                                      \
@@ -2316,10 +2317,19 @@
 
 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H)
   INSN(rev32, 1, 0b00000);
+private:
+  INSN(_rbit, 1, 0b00101);
+public:
+
 #undef ASSERTION
 
 #define ASSERTION (T == T8B || T == T16B)
   INSN(rev16, 0, 0b00001);
+  // RBIT only allows T8B and T16B but encodes them oddly.  Argh...
+  void rbit(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
+    assert((ASSERTION), MSG);
+    _rbit(Vd, SIMD_Arrangement(T & 1 | 0b010), Vn);
+  }
 #undef ASSERTION
 
 #undef MSG
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -3043,7 +3043,9 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
+         bs->kind() == BarrierSet::CardTableExtension,
+         "Wrong barrier set kind");
 
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -691,7 +691,7 @@
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
         __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -731,7 +731,7 @@
           __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
@@ -2364,7 +2364,7 @@
    *   c_rarg3   - int* table
    *
    * Ouput:
-   *       rax   - int crc result
+   *       r0   - int crc result
    */
   address generate_updateBytesCRC32C() {
     assert(UseCRC32CIntrinsics, "what are we doing here?");
@@ -2435,6 +2435,69 @@
     return start;
   }
 
+  void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
+                      FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
+                      FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) {
+    // Karatsuba multiplication performs a 128*128 -> 256-bit
+    // multiplication in three 128-bit multiplications and a few
+    // additions.
+    //
+    // (C1:C0) = A1*B1, (D1:D0) = A0*B0, (E1:E0) = (A0+A1)(B0+B1)
+    // (A1:A0)(B1:B0) = C1:(C0+C1+D1+E1):(D1+C0+D0+E0):D0
+    //
+    // Inputs:
+    //
+    // A0 in a.d[0]     (subkey)
+    // A1 in a.d[1]
+    // (A1+A0) in a1_xor_a0.d[0]
+    //
+    // B0 in b.d[0]     (state)
+    // B1 in b.d[1]
+
+    __ ext(tmp1, __ T16B, b, b, 0x08);
+    __ pmull2(result_hi, __ T1Q, b, a, __ T2D);  // A1*B1
+    __ eor(tmp1, __ T16B, tmp1, b);            // (B1+B0)
+    __ pmull(result_lo,  __ T1Q, b, a, __ T1D);  // A0*B0
+    __ pmull(tmp2, __ T1Q, tmp1, a1_xor_a0, __ T1D); // (A1+A0)(B1+B0)
+
+    __ ext(tmp4, __ T16B, result_lo, result_hi, 0x08);
+    __ eor(tmp3, __ T16B, result_hi, result_lo); // A1*B1+A0*B0
+    __ eor(tmp2, __ T16B, tmp2, tmp4);
+    __ eor(tmp2, __ T16B, tmp2, tmp3);
+
+    // Register pair <result_hi:result_lo> holds the result of carry-less multiplication
+    __ ins(result_hi, __ D, tmp2, 0, 1);
+    __ ins(result_lo, __ D, tmp2, 1, 0);
+  }
+
+  void ghash_reduce(FloatRegister result, FloatRegister lo, FloatRegister hi,
+                    FloatRegister p, FloatRegister z, FloatRegister t1) {
+    const FloatRegister t0 = result;
+
+    // The GCM field polynomial f is z^128 + p(z), where p =
+    // z^7+z^2+z+1.
+    //
+    //    z^128 === -p(z)  (mod (z^128 + p(z)))
+    //
+    // so, given that the product we're reducing is
+    //    a == lo + hi * z^128
+    // substituting,
+    //      === lo - hi * p(z)  (mod (z^128 + p(z)))
+    //
+    // we reduce by multiplying hi by p(z) and subtracting the result
+    // from (i.e. XORing it with) lo.  Because p has no nonzero high
+    // bits we can do this with two 64-bit multiplications, lo*p and
+    // hi*p.
+
+    __ pmull2(t0, __ T1Q, hi, p, __ T2D);
+    __ ext(t1, __ T16B, t0, z, 8);
+    __ eor(hi, __ T16B, hi, t1);
+    __ ext(t1, __ T16B, z, t0, 8);
+    __ eor(lo, __ T16B, lo, t1);
+    __ pmull(t0, __ T1Q, hi, p, __ T1D);
+    __ eor(result, __ T16B, lo, t0);
+  }
+
   /**
    *  Arguments:
    *
@@ -2448,10 +2511,27 @@
    *  Updated state at c_rarg0
    */
   address generate_ghash_processBlocks() {
+    // Bafflingly, GCM uses little-endian for the byte order, but
+    // big-endian for the bit order.  For example, the polynomial 1 is
+    // represented as the 16-byte string 80 00 00 00 | 12 bytes of 00.
+    //
+    // So, we must either reverse the bytes in each word and do
+    // everything big-endian or reverse the bits in each byte and do
+    // it little-endian.  On AArch64 it's more idiomatic to reverse
+    // the bits in each byte (we have an instruction, RBIT, to do
+    // that) and keep the data in little-endian bit order throught the
+    // calculation, bit-reversing the inputs and outputs.
+
+    StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
+    __ align(wordSize * 2);
+    address p = __ pc();
+    __ emit_int64(0x87);  // The low-order bits of the field
+                          // polynomial (i.e. p = z^7+z^2+z+1)
+                          // repeated in the low and high parts of a
+                          // 128-bit vector
+    __ emit_int64(0x87);
+
     __ align(CodeEntryAlignment);
-    Label L_ghash_loop, L_exit;
-
-    StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
     address start = __ pc();
 
     Register state   = c_rarg0;
@@ -2462,104 +2542,43 @@
     FloatRegister vzr = v30;
     __ eor(vzr, __ T16B, vzr, vzr); // zero register
 
-    __ mov(v26, __ T16B, 1);
-    __ mov(v27, __ T16B, 63);
-    __ mov(v28, __ T16B, 62);
-    __ mov(v29, __ T16B, 57);
-
-    __ ldrq(v6, Address(state));
-    __ ldrq(v16, Address(subkeyH));
-
-    __ ext(v0, __ T16B, v6, v6, 0x08);
-    __ ext(v1, __ T16B, v16, v16, 0x08);
-    __ eor(v16, __ T16B, v16, v1);
-
-    __ bind(L_ghash_loop);
-
-    __ ldrq(v2, Address(__ post(data, 0x10)));
-    __ rev64(v2, __ T16B, v2); // swap data
-
-    __ ext(v6, __ T16B, v0, v0, 0x08);
-    __ eor(v6, __ T16B, v6, v2);
-    __ ext(v2, __ T16B, v6, v6, 0x08);
-
-    __ pmull2(v7, __ T1Q, v2, v1, __ T2D);  // A1*B1
-    __ eor(v6, __ T16B, v6, v2);
-    __ pmull(v5,  __ T1Q, v2, v1, __ T1D);  // A0*B0
-    __ pmull(v20, __ T1Q, v6, v16, __ T1D);  // (A1 + A0)(B1 + B0)
-
-    __ ext(v21, __ T16B, v5, v7, 0x08);
-    __ eor(v18, __ T16B, v7, v5); // A1*B1 xor A0*B0
-    __ eor(v20, __ T16B, v20, v21);
-    __ eor(v20, __ T16B, v20, v18);
-
-    // Registers pair <v7:v5> holds the result of carry-less multiplication
-    __ ins(v7, __ D, v20, 0, 1);
-    __ ins(v5, __ D, v20, 1, 0);
-
-    // Result of the multiplication is shifted by one bit position
-    // [X3:X2:X1:X0] = [X3:X2:X1:X0] << 1
-    __ ushr(v18, __ T2D, v5, -63 & 63);
-    __ ins(v25, __ D, v18, 1, 0);
-    __ ins(v25, __ D, vzr, 0, 0);
-    __ ushl(v5, __ T2D, v5, v26);
-    __ orr(v5, __ T16B, v5, v25);
-
-    __ ushr(v19, __ T2D, v7, -63 & 63);
-    __ ins(v19, __ D, v19, 1, 0);
-    __ ins(v19, __ D, v18, 0, 1);
-    __ ushl(v7, __ T2D, v7, v26);
-    __ orr(v6, __ T16B, v7, v19);
-
-    __ ins(v24, __ D, v5, 0, 1);
-
-    // A = X0 << 63
-    __ ushl(v21, __ T2D, v5, v27);
-
-    // A = X0 << 62
-    __ ushl(v22, __ T2D, v5, v28);
-
-    // A = X0 << 57
-    __ ushl(v23, __ T2D, v5, v29);
-
-    // D = X1^A^B^C
-    __ eor(v21, __ T16B, v21, v22);
-    __ eor(v21, __ T16B, v21, v23);
-    __ eor(v21, __ T16B, v21, v24);
-    __ ins(v5, __ D, v21, 1, 0);
-
-    // [E1:E0] = [D:X0] >> 1
-    __ ushr(v20, __ T2D, v5, -1 & 63);
-    __ ushl(v18, __ T2D, v5, v27);
-    __ ext(v25, __ T16B, v18, vzr, 0x08);
-    __ orr(v19, __ T16B, v20, v25);
-
-    __ eor(v7, __ T16B, v5, v19);
-
-    // [F1:F0] = [D:X0] >> 2
-    __ ushr(v20, __ T2D, v5, -2 & 63);
-    __ ushl(v18, __ T2D, v5, v28);
-    __ ins(v25, __ D, v18, 0, 1);
-    __ orr(v19, __ T16B, v20, v25);
-
-    __ eor(v7, __ T16B, v7, v19);
-
-    // [G1:G0] = [D:X0] >> 7
-    __ ushr(v20, __ T2D, v5, -7 & 63);
-    __ ushl(v18, __ T2D, v5, v29);
-    __ ins(v25, __ D, v18, 0, 1);
-    __ orr(v19, __ T16B, v20, v25);
-
-    // [H1:H0] = [D^E1^F1^G1:X0^E0^F0^G0]
-    __ eor(v7, __ T16B, v7, v19);
-
-    // Result = [H1:H0]^[X3:X2]
-    __ eor(v0, __ T16B, v7, v6);
-
-    __ subs(blocks, blocks, 1);
-    __ cbnz(blocks, L_ghash_loop);
-
-    __ ext(v1, __ T16B, v0, v0, 0x08);
+    __ ldrq(v0, Address(state));
+    __ ldrq(v1, Address(subkeyH));
+
+    __ rev64(v0, __ T16B, v0);          // Bit-reverse words in state and subkeyH
+    __ rbit(v0, __ T16B, v0);
+    __ rev64(v1, __ T16B, v1);
+    __ rbit(v1, __ T16B, v1);
+
+    __ ldrq(v26, p);
+
+    __ ext(v16, __ T16B, v1, v1, 0x08); // long-swap subkeyH into v1
+    __ eor(v16, __ T16B, v16, v1);      // xor subkeyH into subkeyL (Karatsuba: (A1+A0))
+
+    {
+      Label L_ghash_loop;
+      __ bind(L_ghash_loop);
+
+      __ ldrq(v2, Address(__ post(data, 0x10))); // Load the data, bit
+                                                 // reversing each byte
+      __ rbit(v2, __ T16B, v2);
+      __ eor(v2, __ T16B, v0, v2);   // bit-swapped data ^ bit-swapped state
+
+      // Multiply state in v2 by subkey in v1
+      ghash_multiply(/*result_lo*/v5, /*result_hi*/v7,
+                     /*a*/v1, /*b*/v2, /*a1_xor_a0*/v16,
+                     /*temps*/v6, v20, v18, v21);
+      // Reduce v7:v5 by the field polynomial
+      ghash_reduce(v0, v5, v7, v26, vzr, v20);
+
+      __ sub(blocks, blocks, 1);
+      __ cbnz(blocks, L_ghash_loop);
+    }
+
+    // The bit-reversed result is at this point in v0
+    __ rev64(v1, __ T16B, v0);
+    __ rbit(v1, __ T16B, v1);
+
     __ st1(v1, __ T16B, state);
     __ ret(lr);
 
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -186,7 +186,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
--- a/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -177,6 +177,12 @@
   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
     warning("UseCRC32 specified, but not supported on this CPU");
   }
+
+  if (UseAdler32Intrinsics) {
+    warning("Adler32Intrinsics not available on this CPU.");
+    FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
+  }
+
   if (auxv & HWCAP_AES) {
     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
     UseAESIntrinsics =
--- a/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -2614,7 +2614,7 @@
 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableModRef ||
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
 #ifdef ASSERT
   cmpdi(CCR0, Rnew_val, 0);
--- a/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -656,7 +656,7 @@
           __ bind(filtered);
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -697,7 +697,7 @@
           }
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           Label Lskip_loop, Lstore_loop;
--- a/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -105,7 +105,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         Label Lnull, Ldone;
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -200,6 +200,11 @@
     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
   }
 
+  if (UseAdler32Intrinsics) {
+    warning("Adler32Intrinsics not available on this CPU.");
+    FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
+  }
+
   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
     UseMultiplyToLenIntrinsic = true;
   }
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -3958,7 +3958,7 @@
   if (new_val == G0) return;
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableModRef ||
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
   card_table_write(bs->byte_map_base, tmp, store_addr);
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_ALL_GCS
+
+// An implementation of memset, for use when there may be concurrent
+// readers of the region being stored into.
+//
+// We can't use the standard library memset if it is implemented using
+// block initializing stores.  Doing so can result in concurrent readers
+// seeing spurious zeros.
+//
+// We can't use the obvious C/C++ for-loop, because the compiler may
+// recognize the idiomatic loop and optimize it into a call to the
+// standard library memset; we've seen exactly this happen with, for
+// example, Solaris Studio 12.3.  Hence the use of inline assembly
+// code, hiding loops from the compiler's optimizer.
+//
+// We don't attempt to use the standard library memset when it is safe
+// to do so.  We could conservatively do so by detecting the presence
+// of block initializing stores (VM_Version::has_blk_init()), but the
+// implementation provided here should be sufficient.
+
+inline void fill_subword(void* start, void* end, int value) {
+  STATIC_ASSERT(BytesPerWord == 8);
+  assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
+  // Dispatch on (end - start).
+  void* pc;
+  __asm__ volatile(
+    // offset := (7 - (end - start)) + 3
+    //   3 instructions from rdpc to DISPATCH
+    " sub %[offset], %[end], %[offset]\n\t" // offset := start - end
+    " sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
+    " add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
+    " rd %pc, %[pc]\n\t"                // dispatch on scaled offset
+    " jmpl %[pc]+%[offset], %g0\n\t"
+    "  nop\n\t"
+    // DISPATCH: no direct reference, but without it the store block may be elided.
+    "1:\n\t"
+    " stb %[value], [%[end]-7]\n\t" // end[-7] = value
+    " stb %[value], [%[end]-6]\n\t"
+    " stb %[value], [%[end]-5]\n\t"
+    " stb %[value], [%[end]-4]\n\t"
+    " stb %[value], [%[end]-3]\n\t"
+    " stb %[value], [%[end]-2]\n\t"
+    " stb %[value], [%[end]-1]\n\t" // end[-1] = value
+    : /* no outputs */
+      [pc] "&=r" (pc)               // temp
+    : [offset] "&+r" (start),
+      [end] "r" (end),
+      [value] "r" (value)
+    : "memory");
+}
+
+void memset_with_concurrent_readers(void* to, int value, size_t size) {
+  Prefetch::write(to, 0);
+  void* end = static_cast<char*>(to) + size;
+  if (size >= BytesPerWord) {
+    // Fill any partial word prefix.
+    uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
+    fill_subword(to, aligned_to, value);
+
+    // Compute fill word.
+    STATIC_ASSERT(BitsPerByte == 8);
+    STATIC_ASSERT(BitsPerWord == 64);
+    uintx xvalue = value & 0xff;
+    xvalue |= (xvalue << 8);
+    xvalue |= (xvalue << 16);
+    xvalue |= (xvalue << 32);
+
+    uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord));
+    assert(aligned_to <= aligned_end, "invariant");
+
+    // for ( ; aligned_to < aligned_end; ++aligned_to) {
+    //   *aligned_to = xvalue;
+    // }
+    uintptr_t temp;
+    __asm__ volatile(
+      // Unroll loop x8.
+      " sub %[aend], %[ato], %[temp]\n\t"
+      " cmp %[temp], 56\n\t"           // cc := (aligned_end - aligned_to) > 7 words
+      " ba %xcc, 2f\n\t"               // goto TEST always
+      "  sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
+      // LOOP:
+      "1:\n\t"                         // unrolled x8 store loop top
+      " cmp %[temp], %[ato]\n\t"       // cc := limit > (next) aligned_to
+      " stx %[xvalue], [%[ato]-64]\n\t" // store 8 words, aligned_to pre-incremented
+      " stx %[xvalue], [%[ato]-56]\n\t"
+      " stx %[xvalue], [%[ato]-48]\n\t"
+      " stx %[xvalue], [%[ato]-40]\n\t"
+      " stx %[xvalue], [%[ato]-32]\n\t"
+      " stx %[xvalue], [%[ato]-24]\n\t"
+      " stx %[xvalue], [%[ato]-16]\n\t"
+      " stx %[xvalue], [%[ato]-8]\n\t"
+      // TEST:
+      "2:\n\t"
+      " bgu,a %xcc, 1b\n\t"            // goto LOOP if more than 7 words remaining
+      "  add %[ato], 64, %[ato]\n\t"   // aligned_to += 8, for next iteration
+      // Fill remaining < 8 full words.
+      // Dispatch on (aligned_end - aligned_to).
+      // offset := (7 - (aligned_end - aligned_to)) + 3
+      //   3 instructions from rdpc to DISPATCH
+      " sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
+      " srax %[ato], 1, %[ato]\n\t"      // scale offset for instruction size of 4
+      " add %[ato], 40, %[ato]\n\t"      // offset += 10 * instruction size
+      " rd %pc, %[temp]\n\t"             // dispatch on scaled offset
+      " jmpl %[temp]+%[ato], %g0\n\t"
+      "  nop\n\t"
+      // DISPATCH: no direct reference, but without it the store block may be elided.
+      "3:\n\t"
+      " stx %[xvalue], [%[aend]-56]\n\t" // aligned_end[-7] = xvalue
+      " stx %[xvalue], [%[aend]-48]\n\t"
+      " stx %[xvalue], [%[aend]-40]\n\t"
+      " stx %[xvalue], [%[aend]-32]\n\t"
+      " stx %[xvalue], [%[aend]-24]\n\t"
+      " stx %[xvalue], [%[aend]-16]\n\t"
+      " stx %[xvalue], [%[aend]-8]\n\t"  // aligned_end[-1] = xvalue
+      : /* no outputs */
+        [temp] "&=r" (temp)
+      : [ato] "&+r" (aligned_to),
+        [aend] "r" (aligned_end),
+        [xvalue] "r" (xvalue)
+      : "cc", "memory");
+    to = aligned_end;           // setup for suffix
+  }
+  // Fill any partial word suffix.  Also the prefix if size < BytesPerWord.
+  fill_subword(to, end, value);
+}
+
+#endif // INCLUDE_ALL_GCS
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -981,7 +981,7 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -1014,7 +1014,7 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
@@ -5110,6 +5110,188 @@
     return start;
   }
 
+#define ADLER32_NUM_TEMPS 16
+
+  /**
+   *  Arguments:
+   *
+   * Inputs:
+   *   O0   - int   adler
+   *   O1   - byte* buff
+   *   O2   - int   len
+   *
+   * Output:
+   *   O0   - int adler result
+   */
+  address generate_updateBytesAdler32() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
+    address start = __ pc();
+
+    Label L_cleanup_loop, L_cleanup_loop_check;
+    Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check;
+    Label L_nmax_check_done;
+
+    // Aliases
+    Register s1     = O0;
+    Register s2     = O3;
+    Register buff   = O1;
+    Register len    = O2;
+    Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7};
+
+    // Max number of bytes we can process before having to take the mod
+    // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
+    unsigned long NMAX = 0x15B0;
+
+    // Zero-out the upper bits of len
+    __ clruwu(len);
+
+    // Create the mask 0xFFFF
+    __ set64(0x00FFFF, O4, O5); // O5 is the temp register
+
+    // s1 is initialized to the lower 16 bits of adler
+    // s2 is initialized to the upper 16 bits of adler
+    __ srlx(O0, 16, O5); // adler >> 16
+    __ and3(O0, O4, s1); // s1  = (adler & 0xFFFF)
+    __ and3(O5, O4, s2); // s2  = ((adler >> 16) & 0xFFFF)
+
+    // The pipelined loop needs at least 16 elements for 1 iteration
+    // It does check this, but it is more effective to skip to the cleanup loop
+    // Setup the constant for cutoff checking
+    __ mov(15, O4);
+
+    // Check if we are above the cutoff, if not go to the cleanup loop immediately
+    __ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check);
+
+    // Free up some registers for our use
+    for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
+      __ movxtod(temp[i], as_FloatRegister(2*i));
+    }
+
+    // Loop maintenance stuff is done at the end of the loop, so skip to there
+    __ ba_short(L_main_loop_check);
+
+    __ BIND(L_main_loop);
+
+    // Prologue for inner loop
+    __ ldub(buff, 0, L0);
+    __ dec(O5);
+
+    for (int i = 1; i < 8; i++) {
+      __ ldub(buff, i, temp[i]);
+    }
+
+    __ inc(buff, 8);
+
+    // Inner loop processes 16 elements at a time, might never execute if only 16 elements
+    // to be processed by the outter loop
+    __ ba_short(L_inner_loop_check);
+
+    __ BIND(L_inner_loop);
+
+    for (int i = 0; i < 8; i++) {
+      __ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]);
+      __ add(s1, temp[i], s1);
+      __ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]);
+      __ add(s2, s1, s2);
+    }
+
+    // Original temp 0-7 used and new loads to temp 0-7 issued
+    // temp 8-15 ready to be consumed
+    __ add(s1, I0, s1);
+    __ dec(O5);
+    __ add(s2, s1, s2);
+    __ add(s1, I1, s1);
+    __ inc(buff, 16);
+    __ add(s2, s1, s2);
+
+    for (int i = 0; i < 6; i++) {
+      __ add(s1, temp[10+i], s1);
+      __ add(s2, s1, s2);
+    }
+
+    __ BIND(L_inner_loop_check);
+    __ nop();
+    __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop);
+
+    // Epilogue
+    for (int i = 0; i < 4; i++) {
+      __ ldub(buff, (2*i), temp[8+(2*i)]);
+      __ add(s1, temp[i], s1);
+      __ ldub(buff, (2*i)+1, temp[8+(2*i)+1]);
+      __ add(s2, s1, s2);
+    }
+
+    __ add(s1, temp[4], s1);
+    __ inc(buff, 8);
+
+    for (int i = 0; i < 11; i++) {
+      __ add(s2, s1, s2);
+      __ add(s1, temp[5+i], s1);
+    }
+
+    __ add(s2, s1, s2);
+
+    // Take the mod for s1 and s2
+    __ set64(0xFFF1, L0, L1);
+    __ udivx(s1, L0, L1);
+    __ udivx(s2, L0, L2);
+    __ mulx(L0, L1, L1);
+    __ mulx(L0, L2, L2);
+    __ sub(s1, L1, s1);
+    __ sub(s2, L2, s2);
+
+    // Make sure there is something left to process
+    __ BIND(L_main_loop_check);
+    __ set64(NMAX, L0, L1);
+    // k = len < NMAX ? len : NMAX
+    __ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done);
+    __ andn(len, 0x0F, L0); // only loop a multiple of 16 times
+    __ BIND(L_nmax_check_done);
+    __ mov(L0, O5);
+    __ sub(len, L0, len); // len -= k
+
+    __ srlx(O5, 4, O5); // multiplies of 16
+    __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop);
+
+    // Restore anything we used, take the mod one last time, combine and return
+    // Restore any registers we saved
+    for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
+      __ movdtox(as_FloatRegister(2*i), temp[i]);
+    }
+
+    // There might be nothing left to process
+    __ ba_short(L_cleanup_loop_check);
+
+    __ BIND(L_cleanup_loop);
+    __ ldub(buff, 0, O4); // load single byte form buffer
+    __ inc(buff); // buff++
+    __ add(s1, O4, s1); // s1 += *buff++;
+    __ dec(len); // len--
+    __ add(s1, s2, s2); // s2 += s1;
+    __ BIND(L_cleanup_loop_check);
+    __ nop();
+    __ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop);
+
+    // Take the mod one last time
+    __ set64(0xFFF1, O1, O2);
+    __ udivx(s1, O1, O2);
+    __ udivx(s2, O1, O5);
+    __ mulx(O1, O2, O2);
+    __ mulx(O1, O5, O5);
+    __ sub(s1, O2, s1);
+    __ sub(s2, O5, s2);
+
+    // Combine lower bits and higher bits
+    __ sllx(s2, 16, s2); // s2 = s2 << 16
+    __ or3(s1, s2, s1);  // adler = s2 | s1
+    // Final return value is in O0
+    __ retl();
+    __ delayed()->nop();
+
+    return start;
+  }
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -5206,6 +5388,11 @@
     if (UseCRC32CIntrinsics) {
       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
     }
+
+    // generate Adler32 intrinsics code
+    if (UseAdler32Intrinsics) {
+      StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
+    }
   }
 
 
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -41,7 +41,7 @@
 enum /* platform_dependent_constants */ {
   // %%%%%%%% May be able to shrink this a lot
   code_size1 = 20000,           // simply increase if too small (assembler will crash if too small)
-  code_size2 = 24000            // simply increase if too small (assembler will crash if too small)
+  code_size2 = 27000            // simply increase if too small (assembler will crash if too small)
 };
 
 class Sparc {
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -91,7 +91,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (index == noreg ) {
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -85,27 +85,6 @@
   _supports_cx8 = has_v9();
   _supports_atomic_getset4 = true; // swap instruction
 
-  // There are Fujitsu Sparc64 CPUs which support blk_init as well so
-  // we have to take this check out of the 'is_niagara()' block below.
-  if (has_blk_init()) {
-    // When using CMS or G1, we cannot use memset() in BOT updates
-    // because the sun4v/CMT version in libc_psr uses BIS which
-    // exposes "phantom zeros" to concurrent readers. See 6948537.
-    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
-      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
-    }
-    // Issue a stern warning if the user has explicitly set
-    // UseMemSetInBOT (it is known to cause issues), but allow
-    // use for experimentation and debugging.
-    if (UseConcMarkSweepGC || UseG1GC) {
-      if (UseMemSetInBOT) {
-        assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
-        warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
-                " on sun4v; please understand that you are using at your own risk!");
-      }
-    }
-  }
-
   if (is_niagara()) {
     // Indirect branch is the same cost as direct
     if (FLAG_IS_DEFAULT(UseInlineCaches)) {
@@ -377,6 +356,15 @@
     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
   }
 
+  if (UseVIS > 2) {
+    if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
+      FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
+    }
+  } else if (UseAdler32Intrinsics) {
+    warning("SPARC Adler32 intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
+    FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
+  }
+
   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
     (cache_line_size > ContendedPaddingWidth))
     ContendedPaddingWidth = cache_line_size;
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -4320,7 +4320,9 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
+         bs->kind() == BarrierSet::CardTableExtension,
+         "Wrong barrier set kind");
 
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -722,7 +722,7 @@
            __ popa();
          }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -754,7 +754,7 @@
         }
         break;
 
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -367,16 +367,20 @@
 #ifdef ASSERT
     // verify that threads correspond
     {
-      Label L, S;
+     Label L1, L2, L3;
       __ cmpptr(r15_thread, thread);
-      __ jcc(Assembler::notEqual, S);
+      __ jcc(Assembler::equal, L1);
+      __ stop("StubRoutines::call_stub: r15_thread is corrupted");
+      __ bind(L1);
       __ get_thread(rbx);
+      __ cmpptr(r15_thread, thread);
+      __ jcc(Assembler::equal, L2);
+      __ stop("StubRoutines::call_stub: r15_thread is modified by call");
+      __ bind(L2);
       __ cmpptr(r15_thread, rbx);
-      __ jcc(Assembler::equal, L);
-      __ bind(S);
-      __ jcc(Assembler::equal, L);
+      __ jcc(Assembler::equal, L3);
       __ stop("StubRoutines::call_stub: threads must correspond");
-      __ bind(L);
+      __ bind(L3);
     }
 #endif
 
@@ -450,15 +454,20 @@
 #ifdef ASSERT
     // verify that threads correspond
     {
-      Label L, S;
+      Label L1, L2, L3;
       __ cmpptr(r15_thread, thread);
-      __ jcc(Assembler::notEqual, S);
+      __ jcc(Assembler::equal, L1);
+      __ stop("StubRoutines::catch_exception: r15_thread is corrupted");
+      __ bind(L1);
       __ get_thread(rbx);
+      __ cmpptr(r15_thread, thread);
+      __ jcc(Assembler::equal, L2);
+      __ stop("StubRoutines::catch_exception: r15_thread is modified by call");
+      __ bind(L2);
       __ cmpptr(r15_thread, rbx);
-      __ jcc(Assembler::equal, L);
-      __ bind(S);
+      __ jcc(Assembler::equal, L3);
       __ stop("StubRoutines::catch_exception: threads must correspond");
-      __ bind(L);
+      __ bind(L3);
     }
 #endif
 
@@ -1244,7 +1253,7 @@
            __ popa();
         }
          break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -1284,7 +1293,7 @@
           __ popa();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/src/cpu/x86/vm/templateTable_x86.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/x86/vm/templateTable_x86.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -200,7 +200,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -714,6 +714,11 @@
     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
   }
 
+  if (UseAdler32Intrinsics) {
+    warning("Adler32Intrinsics not available on this CPU.");
+    FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
+  }
+
   // Adjust RTM (Restricted Transactional Memory) flags
   if (!supports_rtm() && UseRTMLocking) {
     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
--- a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,10 @@
   return cpuinfo_field_contains("cpu", "Niagara");
 }
 
+static bool detect_M_family() {
+  return cpuinfo_field_contains("cpu", "SPARC-M");
+}
+
 static bool detect_blkinit() {
   return cpuinfo_field_contains("cpucaps", "blkinit");
 }
@@ -66,6 +70,11 @@
     features = niagara1_m | T_family_m;
   }
 
+  if (detect_M_family()) {
+    NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
+    features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
+  }
+
   if (detect_blkinit()) {
     features |= blk_init_instructions_m;
   }
--- a/src/share/vm/adlc/Doc/Syntax.doc	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/adlc/Doc/Syntax.doc	Fri Sep 18 14:21:46 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1997, 1998, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
 the architecture of a processor, and is the input to the ADL Compiler.  The
 ADL Compiler compiles an ADL file into code which is incorporated into the
 Optimizing Just In Time Compiler (OJIT) to generate efficient and correct code
-for the target architecture.  The ADL describes three bassic different types
+for the target architecture.  The ADL describes three basic different types
 of architectural features.  It describes the instruction set (and associated
 operands) of the target architecture.  It describes the register set of the
 target architecture along with relevant information for the register allocator.
--- a/src/share/vm/c1/c1_Compiler.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/c1/c1_Compiler.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -32,7 +32,6 @@
 #include "c1/c1_Runtime1.hpp"
 #include "c1/c1_ValueType.hpp"
 #include "compiler/compileBroker.hpp"
-#include "compiler/compilerOracle.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -4212,7 +4212,7 @@
   if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
     return;
   }
-  CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
+  CompileTask::print_inlining_tty(callee, scope()->level(), bci(), msg);
   if (success && CIPrintMethodCodes) {
     callee->print_codes();
   }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -1425,7 +1425,7 @@
       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       // No pre barriers
       break;
@@ -1445,7 +1445,7 @@
       G1SATBCardTableModRef_post_barrier(addr,  new_val);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       CardTableModRef_post_barrier(addr,  new_val);
       break;
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -1447,7 +1447,6 @@
 
     if (methodData() == NULL)
       return;
-    bool printit = _method->should_print_assembly();
     if (methodData()->has_escape_info()) {
       TRACE_BCEA(2, tty->print_cr("[EA] Reading previous results for %s.%s",
                                   method->holder()->name()->as_utf8(),
--- a/src/share/vm/classfile/classLoader.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/classfile/classLoader.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -28,8 +28,8 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/classLoaderExt.hpp"
-#include "classfile/imageFile.hpp"
 #include "classfile/javaClasses.hpp"
+#include "classfile/jimage.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -58,6 +58,7 @@
 #include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/timer.hpp"
+#include "runtime/vm_version.hpp"
 #include "services/management.hpp"
 #include "services/threadService.hpp"
 #include "utilities/events.hpp"
@@ -68,7 +69,7 @@
 #include "classfile/sharedPathsMiscInfo.hpp"
 #endif
 
-// Entry points in zip.dll for loading zip/jar file entries and image file entries
+// Entry points in zip.dll for loading zip/jar file entries
 
 typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
 typedef void (JNICALL *ZipClose_t)(jzfile *zip);
@@ -89,6 +90,15 @@
 static ZipInflateFully_t ZipInflateFully    = NULL;
 static Crc32_t           Crc32              = NULL;
 
+// Entry points for jimage.dll for loading jimage file entries
+
+static JImageOpen_t                    JImageOpen                    = NULL;
+static JImageClose_t                   JImageClose                   = NULL;
+static JImagePackageToModule_t         JImagePackageToModule         = NULL;
+static JImageFindResource_t            JImageFindResource            = NULL;
+static JImageGetResource_t             JImageGetResource             = NULL;
+static JImageResourceIterator_t        JImageResourceIterator        = NULL;
+
 // Globals
 
 PerfCounter*    ClassLoader::_perf_accumulated_time = NULL;
@@ -141,6 +151,15 @@
   return (strncmp(str, str_to_find, str_to_find_len) == 0);
 }
 
+static const char* get_jimage_version_string() {
+  static char version_string[10] = "";
+  if (version_string[0] == '\0') {
+    jio_snprintf(version_string, sizeof(version_string), "%d.%d",
+                 Abstract_VM_Version::vm_minor_version(), Abstract_VM_Version::vm_micro_version());
+  }
+  return (const char*)version_string;
+}
+
 bool string_ends_with(const char* str, const char* str_to_find) {
   size_t str_len = strlen(str);
   size_t str_to_find_len = strlen(str_to_find);
@@ -272,98 +291,114 @@
   }
 }
 
-ClassPathImageEntry::ClassPathImageEntry(ImageFileReader* image) :
+ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
   ClassPathEntry(),
-  _image(image),
-  _module_data(NULL) {
-  guarantee(image != NULL, "image file is null");
-
-  char module_data_name[JVM_MAXPATHLEN];
-  ImageModuleData::module_data_name(module_data_name, _image->name());
-  _module_data = new ImageModuleData(_image, module_data_name);
+  _jimage(jimage) {
+  guarantee(jimage != NULL, "jimage file is null");
+  guarantee(name != NULL, "jimage file name is null");
+  size_t len = strlen(name) + 1;
+  _name = NEW_C_HEAP_ARRAY(const char, len, mtClass);
+  strncpy((char *)_name, name, len);
 }
 
 ClassPathImageEntry::~ClassPathImageEntry() {
-  if (_module_data != NULL) {
-    delete _module_data;
-    _module_data = NULL;
+  if (_name != NULL) {
+    FREE_C_HEAP_ARRAY(const char, _name);
+    _name = NULL;
   }
-
-  if (_image != NULL) {
-    ImageFileReader::close(_image);
-    _image = NULL;
+  if (_jimage != NULL) {
+    (*JImageClose)(_jimage);
+    _jimage = NULL;
   }
 }
 
-const char* ClassPathImageEntry::name() {
-  return _image ? _image->name() : "";
+void ClassPathImageEntry::name_to_package(const char* name, char* buffer, int length) {
+  const char *pslash = strrchr(name, '/');
+  if (pslash == NULL) {
+    buffer[0] = '\0';
+    return;
+  }
+  int len = pslash - name;
+#if INCLUDE_CDS
+  if (len <= 0 && DumpSharedSpaces) {
+    buffer[0] = '\0';
+    return;
+  }
+#endif
+  assert(len > 0, "Bad length for package name");
+  if (len >= length) {
+    buffer[0] = '\0';
+    return;
+  }
+  // drop name after last slash (including slash)
+  // Ex., "java/lang/String.class" => "java/lang"
+  strncpy(buffer, name, len);
+  // ensure string termination (strncpy does not guarantee)
+  buffer[len] = '\0';
 }
 
+// For a class in a named module, look it up in the jimage file using this syntax:
+//    /<module-name>/<package-name>/<base-class>
+//
+// Assumptions:
+//     1. There are no unnamed modules in the jimage file.
+//     2. A package is in at most one module in the jimage file.
+//
 ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
-  ImageLocation location;
-  bool found = _image->find_location(name, location);
+  jlong size;
+  JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size);
 
-  if (!found) {
-    const char *pslash = strrchr(name, '/');
-    int len = pslash - name;
-
-    // NOTE: IMAGE_MAX_PATH is used here since this path is internal to the jimage
-    // (effectively unlimited.)  There are several JCK tests that use paths over
-    // 1024 characters long, the limit on Windows systems.
-    if (pslash && 0 < len && len < IMAGE_MAX_PATH) {
-
-      char path[IMAGE_MAX_PATH];
-      strncpy(path, name, len);
-      path[len] = '\0';
-      const char* moduleName = _module_data->package_to_module(path);
-
-      if (moduleName != NULL && (len + strlen(moduleName) + 2) < IMAGE_MAX_PATH) {
-        jio_snprintf(path, IMAGE_MAX_PATH - 1, "/%s/%s", moduleName, name);
-        location.clear_data();
-        found = _image->find_location(path, location);
-      }
+  if (location == 0) {
+    char package[JIMAGE_MAX_PATH];
+    name_to_package(name, package, JIMAGE_MAX_PATH);
+    if (package[0] != '\0') {
+        const char* module = (*JImagePackageToModule)(_jimage, package);
+        if (module == NULL) {
+            module = "java.base";
+        }
+        location = (*JImageFindResource)(_jimage, module, get_jimage_version_string(), name, &size);
     }
   }
 
-  if (found) {
-    u8 size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
+  if (location != 0) {
     if (UsePerfData) {
       ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
     }
-    u1* data = NEW_RESOURCE_ARRAY(u1, size);
-    _image->get_resource(location, data);
-    return new ClassFileStream(data, (int)size, _image->name());  // Resource allocated
+    char* data = NEW_RESOURCE_ARRAY(char, size);
+    (*JImageGetResource)(_jimage, location, data, size);
+    return new ClassFileStream((u1*)data, (int)size, _name);  // Resource allocated
   }
 
   return NULL;
 }
 
 #ifndef PRODUCT
+bool ctw_visitor(JImageFile* jimage,
+        const char* module_name, const char* version, const char* package,
+        const char* name, const char* extension, void* arg) {
+  if (strcmp(extension, "class") == 0) {
+    Thread* THREAD = Thread::current();
+    char path[JIMAGE_MAX_PATH];
+    jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
+    ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
+    return !HAS_PENDING_EXCEPTION;
+  }
+  return true;
+}
+
 void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
   tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
   tty->cr();
-  const ImageStrings strings = _image->get_strings();
-  // Retrieve each path component string.
-  u4 length = _image->table_length();
-  for (u4 i = 0; i < length; i++) {
-    u1* location_data = _image->get_location_data(i);
-
-    if (location_data != NULL) {
-       ImageLocation location(location_data);
-       char path[IMAGE_MAX_PATH];
-       _image->location_path(location, path, IMAGE_MAX_PATH);
-       ClassLoader::compile_the_world_in(path, loader, CHECK);
+  (*JImageResourceIterator)(_jimage, (JImageResourceVisitor_t)ctw_visitor, (void *)&loader);
+  if (HAS_PENDING_EXCEPTION) {
+    if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
+      CLEAR_PENDING_EXCEPTION;
+      tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
+      tty->print_cr("Increase class metadata storage if a limit was set");
+    } else {
+      tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
     }
   }
-  if (HAS_PENDING_EXCEPTION) {
-  if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
-    CLEAR_PENDING_EXCEPTION;
-    tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
-    tty->print_cr("Increase class metadata storage if a limit was set");
-  } else {
-    tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
-  }
-  }
 }
 
 bool ClassPathImageEntry::is_jrt() {
@@ -490,7 +525,7 @@
   JavaThread* thread = JavaThread::current();
   ClassPathEntry* new_entry = NULL;
   if ((st->st_mode & S_IFREG) == S_IFREG) {
-    // Regular file, should be a zip or image file
+    // Regular file, should be a zip or jimage file
     // Canonicalized filename
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
@@ -501,9 +536,10 @@
         return NULL;
       }
     }
-    ImageFileReader* image = ImageFileReader::open(canonical_path);
-    if (image != NULL) {
-      new_entry = new ClassPathImageEntry(image);
+    jint error;
+    JImageFile* jimage =(*JImageOpen)(canonical_path, &error);
+    if (jimage != NULL) {
+      new_entry = new ClassPathImageEntry(jimage, canonical_path);
     } else {
       char* error_msg = NULL;
       jzfile* zip;
@@ -682,6 +718,35 @@
   // This lookup only works on 1.3. Do not check for non-null here
 }
 
+void ClassLoader::load_jimage_library() {
+  // First make sure native library is loaded
+  os::native_java_library();
+  // Load jimage library
+  char path[JVM_MAXPATHLEN];
+  char ebuf[1024];
+  void* handle = NULL;
+  if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
+    handle = os::dll_load(path, ebuf, sizeof ebuf);
+  }
+  if (handle == NULL) {
+    vm_exit_during_initialization("Unable to load jimage library", path);
+  }
+
+  // Lookup jimage entry points
+  JImageOpen = CAST_TO_FN_PTR(JImageOpen_t, os::dll_lookup(handle, "JIMAGE_Open"));
+  guarantee(JImageOpen != NULL, "function JIMAGE_Open not found");
+  JImageClose = CAST_TO_FN_PTR(JImageClose_t, os::dll_lookup(handle, "JIMAGE_Close"));
+  guarantee(JImageClose != NULL, "function JIMAGE_Close not found");
+  JImagePackageToModule = CAST_TO_FN_PTR(JImagePackageToModule_t, os::dll_lookup(handle, "JIMAGE_PackageToModule"));
+  guarantee(JImagePackageToModule != NULL, "function JIMAGE_PackageToModule not found");
+  JImageFindResource = CAST_TO_FN_PTR(JImageFindResource_t, os::dll_lookup(handle, "JIMAGE_FindResource"));
+  guarantee(JImageFindResource != NULL, "function JIMAGE_FindResource not found");
+  JImageGetResource = CAST_TO_FN_PTR(JImageGetResource_t, os::dll_lookup(handle, "JIMAGE_GetResource"));
+  guarantee(JImageGetResource != NULL, "function JIMAGE_GetResource not found");
+  JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, os::dll_lookup(handle, "JIMAGE_ResourceIterator"));
+  guarantee(JImageResourceIterator != NULL, "function JIMAGE_ResourceIterator not found");
+}
+
 jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
   return (*ZipInflateFully)(in, inSize, out, outSize, pmsg);
 }
@@ -1086,6 +1151,8 @@
 
   // lookup zip library entry points
   load_zip_library();
+  // lookup jimage library entry points
+  load_jimage_library();
 #if INCLUDE_CDS
   // initialize search path
   if (DumpSharedSpaces) {
--- a/src/share/vm/classfile/classLoader.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/classfile/classLoader.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -37,8 +37,7 @@
 
 // Class path entry (directory or zip file)
 
-class ImageFileReader;
-class ImageModuleData;
+class JImageFile;
 
 class ClassPathEntry: public CHeapObj<mtClass> {
  private:
@@ -52,7 +51,7 @@
   }
   virtual bool is_jar_file() = 0;
   virtual const char* name() = 0;
-  virtual ImageFileReader* image() = 0;
+  virtual JImageFile* jimage() = 0;
   // Constructor
   ClassPathEntry();
   // Attempt to locate file_name through this class path entry.
@@ -70,7 +69,7 @@
  public:
   bool is_jar_file()       { return false;  }
   const char* name()       { return _dir; }
-  ImageFileReader* image() { return NULL; }
+  JImageFile* jimage()     { return NULL; }
   ClassPathDirEntry(const char* dir);
   ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
@@ -100,7 +99,7 @@
  public:
   bool is_jar_file()       { return true;  }
   const char* name()       { return _zip_name; }
-  ImageFileReader* image() { return NULL; }
+  JImageFile* jimage()     { return NULL; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
   u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
@@ -115,16 +114,16 @@
 // For java image files
 class ClassPathImageEntry: public ClassPathEntry {
 private:
-  ImageFileReader* _image;
-  ImageModuleData* _module_data;
+  JImageFile* _jimage;
+  const char* _name;
 public:
   bool is_jar_file()  { return false;  }
-  bool is_open()  { return _image != NULL; }
-  const char* name();
-  ImageFileReader* image() { return _image; }
-  ImageModuleData* module_data() { return _module_data; }
-  ClassPathImageEntry(ImageFileReader* image);
+  bool is_open()  { return _jimage != NULL; }
+  const char* name() { return _name == NULL ? "" : _name; }
+  JImageFile* jimage() { return _jimage; }
+  ClassPathImageEntry(JImageFile* jimage, const char* name);
   ~ClassPathImageEntry();
+  static void name_to_package(const char* name, char* buffer, int length);
   ClassFileStream* open_stream(const char* name, TRAPS);
 
   // Debugging
@@ -206,6 +205,7 @@
   static void setup_search_path(const char *class_path);
 
   static void load_zip_library();
+  static void load_jimage_library();
   static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
                                                  bool throw_exception, TRAPS);
 
--- a/src/share/vm/classfile/imageDecompressor.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "classfile/imageDecompressor.hpp"
-#include "runtime/thread.hpp"
-#include "utilities/bytes.hpp"
-
-/*
- * Allocate in C Heap not in resource area, otherwise JVM crashes.
- * This array life time is the VM life time. Array is never freed and
- * is not expected to contain more than few references.
- */
-GrowableArray<ImageDecompressor*>* ImageDecompressor::_decompressors =
-  new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageDecompressor*>(2, true);
-
-static Symbol* createSymbol(const char* str) {
-  Thread* THREAD = Thread::current();
-  Symbol* sym = SymbolTable::lookup(str, (int) strlen(str), THREAD);
-  if (HAS_PENDING_EXCEPTION) {
-    warning("can't create symbol\n");
-    CLEAR_PENDING_EXCEPTION;
-    return NULL;
-  }
-  return sym;
-}
-
-/*
- * Initialize the array of decompressors.
- */
-bool image_decompressor_init() {
-  Symbol* zipSymbol = createSymbol("zip");
-  if (zipSymbol == NULL) {
-    return false;
-  }
-  ImageDecompressor::add_decompressor(new ZipDecompressor(zipSymbol));
-
-  return true;
-}
-
-/*
- * Decompression entry point. Called from ImageFileReader::get_resource.
- */
-void ImageDecompressor::decompress_resource(u1* compressed, u1* uncompressed,
-        u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap) {
-  bool has_header = false;
-  u1* decompressed_resource = compressed;
-  u1* compressed_resource = compressed;
-
-  // Resource could have been transformed by a stack of decompressors.
-  // Iterate and decompress resources until there is no more header.
-  do {
-    ResourceHeader _header;
-    memcpy(&_header, compressed_resource, sizeof (ResourceHeader));
-    has_header = _header._magic == ResourceHeader::resource_header_magic;
-    if (has_header) {
-      // decompressed_resource array contains the result of decompression
-      // when a resource content is terminal, it means that it is an actual resource,
-      // not an intermediate not fully uncompressed content. In this case
-      // the resource is allocated as an mtClass, otherwise as an mtOther
-      decompressed_resource = is_C_heap && _header._is_terminal ?
-              NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtClass) :
-              NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtOther);
-      // Retrieve the decompressor name
-      const char* decompressor_name = strings->get(_header._decompressor_name_offset);
-      if (decompressor_name == NULL) warning("image decompressor not found\n");
-      guarantee(decompressor_name, "image decompressor not found");
-      // Retrieve the decompressor instance
-      ImageDecompressor* decompressor = get_decompressor(decompressor_name);
-      if (decompressor == NULL) {
-        warning("image decompressor %s not found\n", decompressor_name);
-      }
-      guarantee(decompressor, "image decompressor not found");
-      u1* compressed_resource_base = compressed_resource;
-      compressed_resource += ResourceHeader::resource_header_length;
-      // Ask the decompressor to decompress the compressed content
-      decompressor->decompress_resource(compressed_resource, decompressed_resource,
-        &_header, strings);
-      if (compressed_resource_base != compressed) {
-        FREE_C_HEAP_ARRAY(char, compressed_resource_base);
-      }
-      compressed_resource = decompressed_resource;
-    }
-  } while (has_header);
-  memcpy(uncompressed, decompressed_resource, uncompressed_size);
-}
-
-// Zip decompressor
-
-void ZipDecompressor::decompress_resource(u1* data, u1* uncompressed,
-        ResourceHeader* header, const ImageStrings* strings) {
-  char* msg = NULL;
-  jboolean res = ClassLoader::decompress(data, header->_size, uncompressed,
-          header->_uncompressed_size, &msg);
-  if (!res) warning("decompression failed due to %s\n", msg);
-  guarantee(res, "decompression failed");
-}
-
-// END Zip Decompressor
--- a/src/share/vm/classfile/imageDecompressor.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
-#define SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
-
-#include "runtime/thread.inline.hpp"
-#include "classfile/classLoader.hpp"
-#include "classfile/imageFile.hpp"
-#include "classfile/symbolTable.hpp"
-#include "oops/symbol.hpp"
-#include "utilities/growableArray.hpp"
-
-/*
- * Compressed resources located in image have an header.
- * This header contains:
- * - _magic: A magic u4, required to retrieved the header in the compressed content
- * - _size: The size of the compressed resource.
- * - _uncompressed_size: The uncompressed size of the compressed resource.
- * - _decompressor_name_offset: The ImageDecompressor instance name StringsTable offset.
- * - _decompressor_config_offset: StringsTable offset of configuration that could be needed by
- *   the decompressor in order to decompress.
- * - _is_terminal: 1: the compressed content is terminal. Uncompressing it would
- *   create the actual resource. 0: the compressed content is not terminal. Uncompressing it
- *   will result in a compressed content to be decompressed (This occurs when a stack of compressors
- *   have been used to compress the resource.
- */
-struct ResourceHeader {
-  /* Length of header, needed to retrieve content offset */
-  static const u1 resource_header_length = 21;
-  /* magic bytes that identifies a compressed resource header*/
-  static const u4 resource_header_magic = 0xCAFEFAFA;
-  u4 _magic; // Resource header
-  u4 _size;  // Resource size
-  u4 _uncompressed_size;  // Expected uncompressed size
-  u4 _decompressor_name_offset;  // Strings table decompressor offset
-  u4 _decompressor_config_offset; // Strings table config offset
-  u1 _is_terminal; // Last decompressor 1, otherwise 0.
-};
-
-/*
- * Resources located in jimage file can be compressed. Compression occurs at
- * jimage file creation time. When compressed a resource is added an header that
- * contains the name of the compressor that compressed it.
- * Various compression strategies can be applied to compress a resource.
- * The same resource can even be compressed multiple time by a stack of compressors.
- * At runtime, a resource is decompressed in a loop until there is no more header
- * meaning that the resource is equivalent to the not compressed resource.
- * In each iteration, the name of the compressor located in the current header
- * is used to retrieve the associated instance of ImageDecompressor.
- * For example “zip” is the name of the compressor that compresses resources
- * using the zip algorithm. The ZipDecompressor class name is also “zip”.
- * ImageDecompressor instances are retrieved from a static array in which
- * they are registered.
- */
-class ImageDecompressor: public CHeapObj<mtClass> {
-
-private:
-  const Symbol* _name;
-
-  /*
-   * Array of concrete decompressors. This array is used to retrieve the decompressor
-   * that can handle resource decompression.
-   */
-  static GrowableArray<ImageDecompressor*>* _decompressors;
-
-  /*
-   * Identifier of a decompressor. This name is the identification key to retrieve
-   * decompressor from a resource header.
-   */
-  inline const Symbol* get_name() const { return _name; }
-
-protected:
-  ImageDecompressor(const Symbol* name) : _name(name) {
-  }
-  virtual void decompress_resource(u1* data, u1* uncompressed,
-    ResourceHeader* header, const ImageStrings* strings) = 0;
-
-public:
-  inline static void add_decompressor(ImageDecompressor* decompressor) {
-    _decompressors->append(decompressor);
-  }
-  inline static ImageDecompressor* get_decompressor(const char * decompressor_name) {
-    Thread* THREAD = Thread::current();
-    TempNewSymbol sym = SymbolTable::new_symbol(decompressor_name,
-            (int) strlen(decompressor_name), CHECK_NULL);
-    if (HAS_PENDING_EXCEPTION) {
-      warning("can't create symbol\n");
-      CLEAR_PENDING_EXCEPTION;
-      return NULL;
-    }
-    for (int i = 0; i < _decompressors->length(); i++) {
-      ImageDecompressor* decompressor = _decompressors->at(i);
-      if (decompressor->get_name()->fast_compare(sym) == 0) {
-        return decompressor;
-      }
-    }
-    guarantee(false, "No decompressor found.");
-    return NULL;
-  }
-  static void decompress_resource(u1* compressed, u1* uncompressed,
-    u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap);
-};
-
-/**
- * Zip decompressor.
- */
-class ZipDecompressor : public ImageDecompressor {
-public:
-  ZipDecompressor(const Symbol* sym) : ImageDecompressor(sym) { }
-  void decompress_resource(u1* data, u1* uncompressed, ResourceHeader* header,
-    const ImageStrings* strings);
-};
-
-#endif // SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
--- a/src/share/vm/classfile/imageFile.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,546 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/imageDecompressor.hpp"
-#include "classfile/imageFile.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
-#include "utilities/endian.hpp"
-#include "utilities/growableArray.hpp"
-
-// Image files are an alternate file format for storing classes and resources. The
-// goal is to supply file access which is faster and smaller than the jar format.
-//
-// (More detailed nodes in the header.)
-//
-
-// Compute the Perfect Hashing hash code for the supplied UTF-8 string.
-s4 ImageStrings::hash_code(const char* string, s4 seed) {
-  // Access bytes as unsigned.
-  u1* bytes = (u1*)string;
-  // Compute hash code.
-  for (u1 byte = *bytes++; byte; byte = *bytes++) {
-    seed = (seed * HASH_MULTIPLIER) ^ byte;
-  }
-  // Ensure the result is not signed.
-  return seed & 0x7FFFFFFF;
-}
-
-// Match up a string in a perfect hash table.  Result still needs validation
-// for precise match (false positive.)
-s4 ImageStrings::find(Endian* endian, const char* name, s4* redirect, u4 length) {
-  // If the table is empty, then short cut.
-  if (redirect == NULL || length == 0) {
-    return NOT_FOUND;
-  }
-  // Compute the basic perfect hash for name.
-  s4 hash_code = ImageStrings::hash_code(name);
-  // Modulo table size.
-  s4 index = hash_code % length;
-  // Get redirect entry.
-  //   value == 0 then not found
-  //   value < 0 then -1 - value is true index
-  //   value > 0 then value is seed for recomputing hash.
-  s4 value = endian->get(redirect[index]);
-  // if recompute is required.
-  if (value > 0) {
-    // Entry collision value, need to recompute hash.
-    hash_code = ImageStrings::hash_code(name, value);
-    // Modulo table size.
-    return hash_code % length;
-  } else if (value < 0) {
-    // Compute direct index.
-    return -1 - value;
-  }
-  // No entry found.
-  return NOT_FOUND;
-}
-
-// Test to see if UTF-8 string begins with the start UTF-8 string.  If so,
-// return non-NULL address of remaining portion of string.  Otherwise, return
-// NULL.  Used to test sections of a path without copying from image string
-// table.
-const char* ImageStrings::starts_with(const char* string, const char* start) {
-  char ch1, ch2;
-  // Match up the strings the best we can.
-  while ((ch1 = *string) && (ch2 = *start)) {
-    if (ch1 != ch2) {
-      // Mismatch, return NULL.
-      return NULL;
-    }
-    // Next characters.
-    string++, start++;
-  }
-  // Return remainder of string.
-  return string;
-}
-
-// Inflates the attribute stream into individual values stored in the long
-// array _attributes. This allows an attribute value to be quickly accessed by
-// direct indexing.  Unspecified values default to zero (from constructor.)
-void ImageLocation::set_data(u1* data) {
-  // Deflate the attribute stream into an array of attributes.
-  u1 byte;
-  // Repeat until end header is found.
-  while ((byte = *data)) {
-    // Extract kind from header byte.
-    u1 kind = attribute_kind(byte);
-    guarantee(kind < ATTRIBUTE_COUNT, "invalid image location attribute");
-    // Extract length of data (in bytes).
-    u1 n = attribute_length(byte);
-    // Read value (most significant first.)
-    _attributes[kind] = attribute_value(data + 1, n);
-    // Position to next attribute by skipping attribute header and data bytes.
-    data += n + 1;
-  }
-}
-
-// Zero all attribute values.
-void ImageLocation::clear_data() {
-  // Set defaults to zero.
-  memset(_attributes, 0, sizeof(_attributes));
-}
-
-// ImageModuleData constructor maps out sub-tables for faster access.
-ImageModuleData::ImageModuleData(const ImageFileReader* image_file,
-        const char* module_data_name) :
-    _image_file(image_file),
-    _endian(image_file->endian()),
-    _strings(image_file->get_strings()) {
-  // Retrieve the resource containing the module data for the image file.
-  ImageLocation location;
-  bool found = image_file->find_location(module_data_name, location);
-  guarantee(found, "missing module data");
-  u8 data_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
-  _data = (u1*)NEW_C_HEAP_ARRAY(char, data_size, mtClass);
-  _image_file->get_resource(location, _data);
-  // Map out the header.
-  _header = (Header*)_data;
-  // Get the package to module entry count.
-  u4 ptm_count = _header->ptm_count(_endian);
-  // Get the module to package entry count.
-  u4 mtp_count = _header->mtp_count(_endian);
-  // Compute the offset of the package to module perfect hash redirect.
-  u4 ptm_redirect_offset = sizeof(Header);
-  // Compute the offset of the package to module data.
-  u4 ptm_data_offset = ptm_redirect_offset + ptm_count * sizeof(s4);
-  // Compute the offset of the module to package perfect hash redirect.
-  u4 mtp_redirect_offset = ptm_data_offset + ptm_count * sizeof(PTMData);
-  // Compute the offset of the module to package data.
-  u4 mtp_data_offset = mtp_redirect_offset + mtp_count * sizeof(s4);
-  // Compute the offset of the module to package tables.
-  u4 mtp_packages_offset = mtp_data_offset + mtp_count * sizeof(MTPData);
-  // Compute the address of the package to module perfect hash redirect.
-  _ptm_redirect = (s4*)(_data + ptm_redirect_offset);
-  // Compute the address of the package to module data.
-  _ptm_data = (PTMData*)(_data + ptm_data_offset);
-  // Compute the address of the module to package perfect hash redirect.
-  _mtp_redirect = (s4*)(_data + mtp_redirect_offset);
-  // Compute the address of the module to package data.
-  _mtp_data = (MTPData*)(_data + mtp_data_offset);
-  // Compute the address of the module to package tables.
-  _mtp_packages = (s4*)(_data + mtp_packages_offset);
-}
-
-// Release module data resource.
-ImageModuleData::~ImageModuleData() {
-  if (_data != NULL) {
-    FREE_C_HEAP_ARRAY(u1, _data);
-  }
-}
-
-// Return the name of the module data resource.  Ex. "./lib/modules/file.jimage"
-// yields "file.jdata"
-void ImageModuleData::module_data_name(char* buffer, const char* image_file_name) {
-  // Locate the last slash in the file name path.
-  const char* slash = strrchr(image_file_name, os::file_separator()[0]);
-  // Trim the path to name and extension.
-  const char* name = slash != NULL ? slash + 1 : (char *)image_file_name;
-  // Locate the extension period.
-  const char* dot = strrchr(name, '.');
-  guarantee(dot, "missing extension on jimage name");
-  // Trim to only base name.
-  int length = dot - name;
-  strncpy(buffer, name, length);
-  buffer[length] = '\0';
-  // Append extension.
-  strcat(buffer, ".jdata");
-}
-
-// Return the module in which a package resides.  Returns NULL if not found.
-const char* ImageModuleData::package_to_module(const char* package_name) {
-  // Search the package to module table.
-  s4 index = ImageStrings::find(_endian, package_name, _ptm_redirect,
-                                  _header->ptm_count(_endian));
-  // If entry is found.
-  if (index != ImageStrings::NOT_FOUND) {
-    // Retrieve the package to module entry.
-    PTMData* data = _ptm_data + index;
-    // Verify that it is the correct data.
-    if (strcmp(package_name, get_string(data->name_offset(_endian))) != 0) {
-      return NULL;
-    }
-    // Return the module name.
-    return get_string(data->module_name_offset(_endian));
-  }
-  return NULL;
-}
-
-// Returns all the package names in a module.  Returns NULL if module not found.
-GrowableArray<const char*>* ImageModuleData::module_to_packages(const char* module_name) {
-  // Search the module to package table.
-  s4 index = ImageStrings::find(_endian, module_name, _mtp_redirect,
-                                  _header->mtp_count(_endian));
-  // If entry is found.
-  if (index != ImageStrings::NOT_FOUND) {
-    // Retrieve the module to package entry.
-    MTPData* data = _mtp_data + index;
-    // Verify that it is the correct data.
-    if (strcmp(module_name, get_string(data->name_offset(_endian))) != 0) {
-      return NULL;
-    }
-    // Construct an array of all the package entries.
-    GrowableArray<const char*>* packages = new GrowableArray<const char*>();
-    s4 package_offset = data->package_offset(_endian);
-    for (u4 i = 0; i < data->package_count(_endian); i++) {
-      u4 package_name_offset = mtp_package(package_offset + i);
-      const char* package_name = get_string(package_name_offset);
-      packages->append(package_name);
-    }
-    return packages;
-  }
-  return NULL;
-}
-
-// Table to manage multiple opens of an image file.
-GrowableArray<ImageFileReader*>* ImageFileReader::_reader_table =
-  new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageFileReader*>(2, true);
-
-// Open an image file, reuse structure if file already open.
-ImageFileReader* ImageFileReader::open(const char* name, bool big_endian) {
-  // Lock out _reader_table.
-  MutexLocker ml(ImageFileReaderTable_lock);
-  ImageFileReader* reader;
-  // Search for an exist image file.
-  for (int i = 0; i < _reader_table->length(); i++) {
-    // Retrieve table entry.
-    reader = _reader_table->at(i);
-    // If name matches, then reuse (bump up use count.)
-    if (strcmp(reader->name(), name) == 0) {
-      reader->inc_use();
-      return reader;
-    }
-  }
-  // Need a new image reader.
-  reader = new ImageFileReader(name, big_endian);
-  bool opened = reader->open();
-  // If failed to open.
-  if (!opened) {
-    delete reader;
-    return NULL;
-  }
-  // Bump use count and add to table.
-  reader->inc_use();
-  _reader_table->append(reader);
-  return reader;
-}
-
-// Close an image file if the file is not in use elsewhere.
-void ImageFileReader::close(ImageFileReader *reader) {
-  // Lock out _reader_table.
-  MutexLocker ml(ImageFileReaderTable_lock);
-  // If last use then remove from table and then close.
-  if (reader->dec_use()) {
-    _reader_table->remove(reader);
-    delete reader;
-  }
-}
-
-// Return an id for the specifed ImageFileReader.
-u8 ImageFileReader::readerToID(ImageFileReader *reader) {
-  // ID is just the cloaked reader address.
-  return (u8)reader;
-}
-
-// Validate the image id.
-bool ImageFileReader::idCheck(u8 id) {
-  // Make sure the ID is a managed (_reader_table) reader.
-  MutexLocker ml(ImageFileReaderTable_lock);
-  return _reader_table->contains((ImageFileReader*)id);
-}
-
-// Return an id for the specifed ImageFileReader.
-ImageFileReader* ImageFileReader::idToReader(u8 id) {
-#ifdef PRODUCT
-  // Fast convert.
-  return (ImageFileReader*)id;
-#else
-  // Do a slow check before fast convert.
-  return idCheck(id) ? (ImageFileReader*)id : NULL;
-#endif
-}
-
-// Constructor intializes to a closed state.
-ImageFileReader::ImageFileReader(const char* name, bool big_endian) {
-  // Copy the image file name.
-  _name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtClass);
-  strcpy(_name, name);
-  // Initialize for a closed file.
-  _fd = -1;
-  _endian = Endian::get_handler(big_endian);
-  _index_data = NULL;
-}
-
-// Close image and free up data structures.
-ImageFileReader::~ImageFileReader() {
-  // Ensure file is closed.
-  close();
-  // Free up name.
-  if (_name != NULL) {
-    FREE_C_HEAP_ARRAY(char, _name);
-    _name = NULL;
-  }
-}
-
-// Open image file for read access.
-bool ImageFileReader::open() {
-  // If file exists open for reading.
-  struct stat st;
-  if (os::stat(_name, &st) != 0 ||
-    (st.st_mode & S_IFREG) != S_IFREG ||
-    (_fd = os::open(_name, 0, O_RDONLY)) == -1) {
-    return false;
-  }
-  // Retrieve the file size.
-  _file_size = (u8)st.st_size;
-  // Read image file header and verify it has a valid header.
-  size_t header_size = sizeof(ImageHeader);
-  if (_file_size < header_size ||
-    !read_at((u1*)&_header, header_size, 0) ||
-    _header.magic(_endian) != IMAGE_MAGIC ||
-    _header.major_version(_endian) != MAJOR_VERSION ||
-    _header.minor_version(_endian) != MINOR_VERSION) {
-    close();
-    return false;
-  }
-  // Size of image index.
-  _index_size = index_size();
-  // Make sure file is large enough to contain the index.
-  if (_file_size < _index_size) {
-    return false;
-  }
-  // Determine how much of the image is memory mapped.
-  off_t map_size = (off_t)(MemoryMapImage ? _file_size : _index_size);
-  // Memory map image (minimally the index.)
-  _index_data = (u1*)os::map_memory(_fd, _name, 0, NULL, map_size, true, false);
-  guarantee(_index_data, "image file not memory mapped");
-  // Retrieve length of index perfect hash table.
-  u4 length = table_length();
-  // Compute offset of the perfect hash table redirect table.
-  u4 redirect_table_offset = (u4)header_size;
-  // Compute offset of index attribute offsets.
-  u4 offsets_table_offset = redirect_table_offset + length * sizeof(s4);
-  // Compute offset of index location attribute data.
-  u4 location_bytes_offset = offsets_table_offset + length * sizeof(u4);
-  // Compute offset of index string table.
-  u4 string_bytes_offset = location_bytes_offset + locations_size();
-  // Compute address of the perfect hash table redirect table.
-  _redirect_table = (s4*)(_index_data + redirect_table_offset);
-  // Compute address of index attribute offsets.
-  _offsets_table = (u4*)(_index_data + offsets_table_offset);
-  // Compute address of index location attribute data.
-  _location_bytes = _index_data + location_bytes_offset;
-  // Compute address of index string table.
-  _string_bytes = _index_data + string_bytes_offset;
-  // Successful open.
-  return true;
-}
-
-// Close image file.
-void ImageFileReader::close() {
-  // Dealllocate the index.
-  if (_index_data != NULL) {
-    os::unmap_memory((char*)_index_data, _index_size);
-    _index_data = NULL;
-  }
-  // Close file.
-  if (_fd != -1) {
-    os::close(_fd);
-    _fd = -1;
-  }
-}
-
-// Read directly from the file.
-bool ImageFileReader::read_at(u1* data, u8 size, u8 offset) const {
-  return os::read_at(_fd, data, size, offset) == size;
-}
-
-// Find the location attributes associated with the path.  Returns true if
-// the location is found, false otherwise.
-bool ImageFileReader::find_location(const char* path, ImageLocation& location) const {
-  // Locate the entry in the index perfect hash table.
-  s4 index = ImageStrings::find(_endian, path, _redirect_table, table_length());
-  // If is found.
-  if (index != ImageStrings::NOT_FOUND) {
-    // Get address of first byte of location attribute stream.
-    u1* data = get_location_data(index);
-    // Expand location attributes.
-    location.set_data(data);
-    // Make sure result is not a false positive.
-    return verify_location(location, path);
-  }
-  return false;
-}
-
-// Assemble the location path from the string fragments indicated in the location attributes.
-void ImageFileReader::location_path(ImageLocation& location, char* path, size_t max) const {
-  // Manage the image string table.
-  ImageStrings strings(_string_bytes, _header.strings_size(_endian));
-  // Position to first character of the path buffer.
-  char* next = path;
-  // Temp for string length.
-  size_t length;
-  // Get module string.
-  const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
-  // If module string is not empty string.
-  if (*module != '\0') {
-    // Get length of module name.
-    length = strlen(module);
-    // Make sure there is no buffer overflow.
-    guarantee(next - path + length + 2 < max, "buffer overflow");
-    // Append '/module/'.
-    *next++ = '/';
-    strcpy(next, module); next += length;
-    *next++ = '/';
-  }
-  // Get parent (package) string.
-  const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
-  // If parent string is not empty string.
-  if (*parent != '\0') {
-    // Get length of module string.
-    length = strlen(parent);
-    // Make sure there is no buffer overflow.
-    guarantee(next - path + length + 1 < max, "buffer overflow");
-    // Append 'patent/' .
-    strcpy(next, parent); next += length;
-    *next++ = '/';
-  }
-  // Get base name string.
-  const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
-  // Get length of base name.
-  length = strlen(base);
-  // Make sure there is no buffer overflow.
-  guarantee(next - path + length < max, "buffer overflow");
-  // Append base name.
-  strcpy(next, base); next += length;
-  // Get extension string.
-  const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
-  // If extension string is not empty string.
-  if (*extension != '\0') {
-    // Get length of extension string.
-    length = strlen(extension);
-    // Make sure there is no buffer overflow.
-    guarantee(next - path + length + 1 < max, "buffer overflow");
-    // Append '.extension' .
-    *next++ = '.';
-    strcpy(next, extension); next += length;
-  }
-  // Make sure there is no buffer overflow.
-  guarantee((size_t)(next - path) < max, "buffer overflow");
-  // Terminate string.
-  *next = '\0';
-}
-
-// Verify that a found location matches the supplied path (without copying.)
-bool ImageFileReader::verify_location(ImageLocation& location, const char* path) const {
-  // Manage the image string table.
-  ImageStrings strings(_string_bytes, _header.strings_size(_endian));
-  // Position to first character of the path string.
-  const char* next = path;
-  // Get module name string.
-  const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
-  // If module string is not empty.
-  if (*module != '\0') {
-    // Compare '/module/' .
-    if (*next++ != '/') return false;
-    if (!(next = ImageStrings::starts_with(next, module))) return false;
-    if (*next++ != '/') return false;
-  }
-  // Get parent (package) string
-  const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
-  // If parent string is not empty string.
-  if (*parent != '\0') {
-    // Compare 'parent/' .
-    if (!(next = ImageStrings::starts_with(next, parent))) return false;
-    if (*next++ != '/') return false;
-  }
-  // Get base name string.
-  const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
-  // Compare with basne name.
-  if (!(next = ImageStrings::starts_with(next, base))) return false;
-  // Get extension string.
-  const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
-  // If extension is not empty.
-  if (*extension != '\0') {
-    // Compare '.extension' .
-    if (*next++ != '.') return false;
-    if (!(next = ImageStrings::starts_with(next, extension))) return false;
-  }
-  // True only if complete match and no more characters.
-  return *next == '\0';
-}
-
-// Return the resource data for the supplied location.
-void ImageFileReader::get_resource(ImageLocation& location, u1* uncompressed_data) const {
-  // Retrieve the byte offset and size of the resource.
-  u8 offset = location.get_attribute(ImageLocation::ATTRIBUTE_OFFSET);
-  u8 uncompressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
-  u8 compressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_COMPRESSED);
-  if (compressed_size != 0) {
-    ResourceMark rm;
-    u1* compressed_data;
-    // If not memory mapped read in bytes.
-    if (!MemoryMapImage) {
-      // Allocate buffer for compression.
-      compressed_data = NEW_RESOURCE_ARRAY(u1, compressed_size);
-      // Read bytes from offset beyond the image index.
-      bool is_read = read_at(compressed_data, compressed_size, _index_size + offset);
-      guarantee(is_read, "error reading from image or short read");
-    } else {
-      compressed_data = get_data_address() + offset;
-    }
-    // Get image string table.
-    const ImageStrings strings = get_strings();
-    // Decompress resource.
-    ImageDecompressor::decompress_resource(compressed_data, uncompressed_data, uncompressed_size,
-            &strings, false);
-  } else {
-    // Read bytes from offset beyond the image index.
-    bool is_read = read_at(uncompressed_data, uncompressed_size, _index_size + offset);
-    guarantee(is_read, "error reading from image or short read");
-  }
-}
--- a/src/share/vm/classfile/imageFile.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,602 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_IMAGEFILE_HPP
-#define SHARE_VM_CLASSFILE_IMAGEFILE_HPP
-
-#include "classfile/classLoader.hpp"
-#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "utilities/endian.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/growableArray.hpp"
-
-// Image files are an alternate file format for storing classes and resources. The
-// goal is to supply file access which is faster and smaller than the jar format.
-// It should be noted that unlike jars, information stored in an image is in native
-// endian format. This allows the image to be mapped into memory without endian
-// translation.  This also means that images are platform dependent.
-//
-// Image files are structured as three sections;
-//
-//         +-----------+
-//         |  Header   |
-//         +-----------+
-//         |           |
-//         |   Index   |
-//         |           |
-//         +-----------+
-//         |           |
-//         |           |
-//         | Resources |
-//         |           |
-//         |           |
-//         +-----------+
-//
-// The header contains information related to identification and description of
-// contents.
-//
-//         +-------------------------+
-//         |   Magic (0xCAFEDADA)    |
-//         +------------+------------+
-//         | Major Vers | Minor Vers |
-//         +------------+------------+
-//         |          Flags          |
-//         +-------------------------+
-//         |      Resource Count     |
-//         +-------------------------+
-//         |       Table Length      |
-//         +-------------------------+
-//         |      Attributes Size    |
-//         +-------------------------+
-//         |       Strings Size      |
-//         +-------------------------+
-//
-// Magic - means of identifying validity of the file.  This avoids requiring a
-//         special file extension.
-// Major vers, minor vers - differences in version numbers indicate structural
-//                          changes in the image.
-// Flags - various image wide flags (future).
-// Resource count - number of resources in the file.
-// Table length - the length of lookup tables used in the index.
-// Attributes size - number of bytes in the region used to store location attribute
-//                   streams.
-// Strings size - the size of the region used to store strings used by the
-//                index and meta data.
-//
-// The index contains information related to resource lookup. The algorithm
-// used for lookup is "A Practical Minimal Perfect Hashing Method"
-// (http://homepages.dcc.ufmg.br/~nivio/papers/wea05.pdf). Given a path string
-// in the form /<module>/<package>/<base>.<extension>  return the resource location
-// information;
-//
-//     redirectIndex = hash(path, DEFAULT_SEED) % table_length;
-//     redirect = redirectTable[redirectIndex];
-//     if (redirect == 0) return not found;
-//     locationIndex = redirect < 0 ? -1 - redirect : hash(path, redirect) % table_length;
-//     location = locationTable[locationIndex];
-//     if (!verify(location, path)) return not found;
-//     return location;
-//
-// Note: The hash function takes an initial seed value.  A different seed value
-// usually returns a different result for strings that would otherwise collide with
-// other seeds. The verify function guarantees the found resource location is
-// indeed the resource we are looking for.
-//
-// The following is the format of the index;
-//
-//         +-------------------+
-//         |   Redirect Table  |
-//         +-------------------+
-//         | Attribute Offsets |
-//         +-------------------+
-//         |   Attribute Data  |
-//         +-------------------+
-//         |      Strings      |
-//         +-------------------+
-//
-// Redirect Table - Array of 32-bit signed values representing actions that
-//                  should take place for hashed strings that map to that
-//                  value.  Negative values indicate no hash collision and can be
-//                  quickly converted to indices into attribute offsets.  Positive
-//                  values represent a new seed for hashing an index into attribute
-//                  offsets.  Zero indicates not found.
-// Attribute Offsets - Array of 32-bit unsigned values representing offsets into
-//                     attribute data.  Attribute offsets can be iterated to do a
-//                     full survey of resources in the image.  Offset of zero
-//                     indicates no attributes.
-// Attribute Data - Bytes representing compact attribute data for locations. (See
-//                  comments in ImageLocation.)
-// Strings - Collection of zero terminated UTF-8 strings used by the index and
-//           image meta data.  Each string is accessed by offset.  Each string is
-//           unique.  Offset zero is reserved for the empty string.
-//
-// Note that the memory mapped index assumes 32 bit alignment of each component
-// in the index.
-//
-// Endianness of an image.
-// An image booted by hotspot is always in native endian.  However, it is possible
-// to read (by the JDK) in alternate endian format.  Primarily, this is during
-// cross platform scenarios.  Ex, where javac needs to read an embedded image
-// to access classes for crossing compilation.
-//
-
-class ImageFileReader; // forward declaration
-
-// Manage image file string table.
-class ImageStrings VALUE_OBJ_CLASS_SPEC {
-private:
-  u1* _data; // Data bytes for strings.
-  u4 _size; // Number of bytes in the string table.
-public:
-  enum {
-    // Not found result from find routine.
-    NOT_FOUND = -1,
-    // Prime used to generate hash for Perfect Hashing.
-    HASH_MULTIPLIER = 0x01000193
-  };
-
-  ImageStrings(u1* data, u4 size) : _data(data), _size(size) {}
-
-  // Return the UTF-8 string beginning at offset.
-  inline const char* get(u4 offset) const {
-    guarantee(offset < _size, "offset exceeds string table size");
-    return (const char*)(_data + offset);
-  }
-
-  // Compute the Perfect Hashing hash code for the supplied UTF-8 string.
-  inline static u4 hash_code(const char* string) {
-    return hash_code(string, HASH_MULTIPLIER);
-  }
-
-  // Compute the Perfect Hashing hash code for the supplied string, starting at seed.
-  static s4 hash_code(const char* string, s4 seed);
-
-  // Match up a string in a perfect hash table.  Result still needs validation
-  // for precise match.
-  static s4 find(Endian* endian, const char* name, s4* redirect, u4 length);
-
-  // Test to see if UTF-8 string begins with the start UTF-8 string.  If so,
-  // return non-NULL address of remaining portion of string.  Otherwise, return
-  // NULL.  Used to test sections of a path without copying from image string
-  // table.
-  static const char* starts_with(const char* string, const char* start);
-
-  // Test to see if UTF-8 string begins with start char.  If so, return non-NULL
-  // address of remaining portion of string.  Otherwise, return NULL.  Used
-  // to test a character of a path without copying.
-  inline static const char* starts_with(const char* string, const char ch) {
-    return *string == ch ? string + 1 : NULL;
-  }
-};
-
-// Manage image file location attribute data.  Within an image, a location's
-// attributes are compressed into a stream of bytes.  An attribute stream is
-// composed of individual attribute sequences.  Each attribute sequence begins with
-// a header byte containing the attribute 'kind' (upper 5 bits of header) and the
-// 'length' less 1 (lower 3 bits of header) of bytes that follow containing the
-// attribute value.  Attribute values present as most significant byte first.
-//
-// Ex. Container offset (ATTRIBUTE_OFFSET) 0x33562 would be represented as 0x22
-// (kind = 4, length = 3), 0x03, 0x35, 0x62.
-//
-// An attribute stream is terminated with a header kind of ATTRIBUTE_END (header
-// byte of zero.)
-//
-// ImageLocation inflates the stream into individual values stored in the long
-// array _attributes. This allows an attribute value can be quickly accessed by
-// direct indexing. Unspecified values default to zero.
-//
-// Notes:
-//  - Even though ATTRIBUTE_END is used to mark the end of the attribute stream,
-//    streams will contain zero byte values to represent lesser significant bits.
-//    Thus, detecting a zero byte is not sufficient to detect the end of an attribute
-//    stream.
-//  - ATTRIBUTE_OFFSET represents the number of bytes from the beginning of the region
-//    storing the resources.  Thus, in an image this represents the number of bytes
-//    after the index.
-//  - Currently, compressed resources are represented by having a non-zero
-//    ATTRIBUTE_COMPRESSED value.  This represents the number of bytes stored in the
-//    image, and the value of ATTRIBUTE_UNCOMPRESSED represents number of bytes of the
-//    inflated resource in memory. If the ATTRIBUTE_COMPRESSED is zero then the value
-//    of ATTRIBUTE_UNCOMPRESSED represents both the number of bytes in the image and
-//    in memory.  In the future, additional compression techniques will be used and
-//    represented differently.
-//  - Package strings include trailing slash and extensions include prefix period.
-//
-class ImageLocation VALUE_OBJ_CLASS_SPEC {
-public:
-  enum {
-    ATTRIBUTE_END,          // End of attribute stream marker
-    ATTRIBUTE_MODULE,       // String table offset of module name
-    ATTRIBUTE_PARENT,       // String table offset of resource path parent
-    ATTRIBUTE_BASE,         // String table offset of resource path base
-    ATTRIBUTE_EXTENSION,    // String table offset of resource path extension
-    ATTRIBUTE_OFFSET,       // Container byte offset of resource
-    ATTRIBUTE_COMPRESSED,   // In image byte size of the compressed resource
-    ATTRIBUTE_UNCOMPRESSED, // In memory byte size of the uncompressed resource
-    ATTRIBUTE_COUNT         // Number of attribute kinds
-  };
-
-private:
-  // Values of inflated attributes.
-  u8 _attributes[ATTRIBUTE_COUNT];
-
-  // Return the attribute value number of bytes.
-  inline static u1 attribute_length(u1 data) {
-    return (data & 0x7) + 1;
-  }
-
-  // Return the attribute kind.
-  inline static u1 attribute_kind(u1 data) {
-    u1 kind = data >> 3;
-    guarantee(kind < ATTRIBUTE_COUNT, "invalid attribute kind");
-    return kind;
-  }
-
-  // Return the attribute length.
-  inline static u8 attribute_value(u1* data, u1 n) {
-    guarantee(0 < n && n <= 8, "invalid attribute value length");
-    u8 value = 0;
-    // Most significant bytes first.
-    for (u1 i = 0; i < n; i++) {
-      value <<= 8;
-      value |= data[i];
-    }
-    return value;
-  }
-
-public:
-  ImageLocation() {
-    clear_data();
-  }
-
-  ImageLocation(u1* data) {
-    clear_data();
-    set_data(data);
-  }
-
-  // Inflates the attribute stream into individual values stored in the long
-  // array _attributes. This allows an attribute value to be quickly accessed by
-  // direct indexing. Unspecified values default to zero.
-  void set_data(u1* data);
-
-  // Zero all attribute values.
-  void clear_data();
-
-  // Retrieve an attribute value from the inflated array.
-  inline u8 get_attribute(u1 kind) const {
-    guarantee(ATTRIBUTE_END < kind && kind < ATTRIBUTE_COUNT, "invalid attribute kind");
-    return _attributes[kind];
-  }
-
-  // Retrieve an attribute string value from the inflated array.
-  inline const char* get_attribute(u4 kind, const ImageStrings& strings) const {
-    return strings.get((u4)get_attribute(kind));
-  }
-};
-
-//
-// NOTE: needs revision.
-// Each loader requires set of module meta data to identify which modules and
-// packages are managed by that loader.  Currently, there is one image file per
-// builtin loader, so only one  module meta data resource per file.
-//
-// Each element in the module meta data is a native endian 4 byte integer.  Note
-// that entries with zero offsets for string table entries should be ignored (
-// padding for hash table lookup.)
-//
-// Format:
-//    Count of package to module entries
-//    Count of module to package entries
-//    Perfect Hash redirect table[Count of package to module entries]
-//    Package to module entries[Count of package to module entries]
-//        Offset to package name in string table
-//        Offset to module name in string table
-//    Perfect Hash redirect table[Count of module to package entries]
-//    Module to package entries[Count of module to package entries]
-//        Offset to module name in string table
-//        Count of packages in module
-//        Offset to first package in packages table
-//    Packages[]
-//        Offset to package name in string table
-//
-// Manage the image module meta data.
-class ImageModuleData : public CHeapObj<mtClass> {
-  class Header VALUE_OBJ_CLASS_SPEC {
-  private:
-    u4 _ptm_count;      // Count of package to module entries
-    u4 _mtp_count;      // Count of module to package entries
-  public:
-    inline u4 ptm_count(Endian* endian) const { return endian->get(_ptm_count); }
-    inline u4 mtp_count(Endian* endian) const { return endian->get(_mtp_count); }
-  };
-
-  // Hashtable entry
-  class HashData VALUE_OBJ_CLASS_SPEC {
-  private:
-    u4 _name_offset;    // Name offset in string table
-  public:
-    inline s4 name_offset(Endian* endian) const { return endian->get(_name_offset); }
-  };
-
-  // Package to module hashtable entry
-  class PTMData : public HashData {
-  private:
-    u4 _module_name_offset; // Module name offset in string table
-  public:
-    inline s4 module_name_offset(Endian* endian) const { return endian->get(_module_name_offset); }
-  };
-
-  // Module to package hashtable entry
-  class MTPData : public HashData {
-  private:
-    u4 _package_count;     // Number of packages in module
-    u4 _package_offset;    // Offset in package list
-  public:
-    inline u4 package_count(Endian* endian)  const { return endian->get(_package_count); }
-    inline u4 package_offset(Endian* endian) const { return endian->get(_package_offset); }
-  };
-
-  const ImageFileReader* _image_file; // Source image file
-  Endian* _endian;                    // Endian handler
-  ImageStrings _strings;              // Image file strings
-  u1* _data;                          // Module data resource data
-  u8 _data_size;                      // Size of resource data
-  Header* _header;                    // Module data header
-  s4* _ptm_redirect;                  // Package to module hashtable redirect
-  PTMData* _ptm_data;                 // Package to module data
-  s4* _mtp_redirect;                  // Module to packages hashtable redirect
-  MTPData* _mtp_data;                 // Module to packages data
-  s4* _mtp_packages;                  // Package data (name offsets)
-
-  // Return a string from the string table.
-  inline const char* get_string(u4 offset) {
-    return _strings.get(offset);
-  }
-
-  inline u4 mtp_package(u4 index) {
-    return _endian->get(_mtp_packages[index]);
-  }
-
-public:
-  ImageModuleData(const ImageFileReader* image_file, const char* module_data_name);
-  ~ImageModuleData();
-
-  // Return the name of the module data resource.
-  static void module_data_name(char* buffer, const char* image_file_name);
-
-  // Return the module in which a package resides.  Returns NULL if not found.
-  const char* package_to_module(const char* package_name);
-
-  // Returns all the package names in a module.  Returns NULL if module not found.
-  GrowableArray<const char*>* module_to_packages(const char* module_name);
-};
-
-// Image file header, starting at offset 0.
-class ImageHeader VALUE_OBJ_CLASS_SPEC {
-private:
-  u4 _magic;           // Image file marker
-  u4 _version;         // Image file major version number
-  u4 _flags;           // Image file flags
-  u4 _resource_count;  // Number of resources in file
-  u4 _table_length;    // Number of slots in index tables
-  u4 _locations_size;  // Number of bytes in attribute table
-  u4 _strings_size;    // Number of bytes in string table
-
-public:
-  u4 magic() const { return _magic; }
-  u4 magic(Endian* endian) const { return endian->get(_magic); }
-  void set_magic(Endian* endian, u4 magic) { return endian->set(_magic, magic); }
-
-  u4 major_version(Endian* endian) const { return endian->get(_version) >> 16; }
-  u4 minor_version(Endian* endian) const { return endian->get(_version) & 0xFFFF; }
-  void set_version(Endian* endian, u4 major_version, u4 minor_version) {
-    return endian->set(_version, major_version << 16 | minor_version);
-  }
-
-  u4 flags(Endian* endian) const { return endian->get(_flags); }
-  void set_flags(Endian* endian, u4 value) { return endian->set(_flags, value); }
-
-  u4 resource_count(Endian* endian) const { return endian->get(_resource_count); }
-  void set_resource_count(Endian* endian, u4 count) { return endian->set(_resource_count, count); }
-
-  u4 table_length(Endian* endian) const { return endian->get(_table_length); }
-  void set_table_length(Endian* endian, u4 count) { return endian->set(_table_length, count); }
-
-  u4 locations_size(Endian* endian) const { return endian->get(_locations_size); }
-  void set_locations_size(Endian* endian, u4 size) { return endian->set(_locations_size, size); }
-
-  u4 strings_size(Endian* endian) const { return endian->get(_strings_size); }
-  void set_strings_size(Endian* endian, u4 size) { return endian->set(_strings_size, size); }
-};
-
-// Max path length limit independent of platform.  Windows max path is 1024,
-// other platforms use 4096.  The JCK fails several tests when 1024 is used.
-#define IMAGE_MAX_PATH 4096
-
-// Manage the image file.
-// ImageFileReader manages the content of an image file.
-// Initially, the header of the image file is read for validation.  If valid,
-// values in the header are used calculate the size of the image index.  The
-// index is then memory mapped to allow load on demand and sharing.  The
-// -XX:+MemoryMapImage flag determines if the entire file is loaded (server use.)
-// An image can be used by Hotspot and multiple reference points in the JDK, thus
-// it is desirable to share a reader.  To accomodate sharing, a share table is
-// defined (see ImageFileReaderTable in imageFile.cpp)  To track the number of
-// uses, ImageFileReader keeps a use count (_use).  Use is incremented when
-// 'opened' by reference point and decremented when 'closed'.  Use of zero
-// leads the ImageFileReader to be actually closed and discarded.
-class ImageFileReader : public CHeapObj<mtClass> {
-private:
-  // Manage a number of image files such that an image can be shared across
-  // multiple uses (ex. loader.)
-  static GrowableArray<ImageFileReader*>* _reader_table;
-
-  char* _name;         // Name of image
-  s4 _use;             // Use count
-  int _fd;             // File descriptor
-  Endian* _endian;     // Endian handler
-  u8 _file_size;       // File size in bytes
-  ImageHeader _header; // Image header
-  size_t _index_size;  // Total size of index
-  u1* _index_data;     // Raw index data
-  s4* _redirect_table; // Perfect hash redirect table
-  u4* _offsets_table;  // Location offset table
-  u1* _location_bytes; // Location attributes
-  u1* _string_bytes;   // String table
-
-  ImageFileReader(const char* name, bool big_endian);
-  ~ImageFileReader();
-
-  // Compute number of bytes in image file index.
-  inline u8 index_size() {
-    return sizeof(ImageHeader) +
-      table_length() * sizeof(u4) * 2 + locations_size() + strings_size();
-  }
-
-public:
-  enum {
-    // Image file marker.
-    IMAGE_MAGIC = 0xCAFEDADA,
-    // Endian inverted Image file marker.
-    IMAGE_MAGIC_INVERT = 0xDADAFECA,
-    // Image file major version number.
-    MAJOR_VERSION = 1,
-    // Image file minor version number.
-    MINOR_VERSION = 0
-  };
-
-  // Open an image file, reuse structure if file already open.
-  static ImageFileReader* open(const char* name, bool big_endian = Endian::is_big_endian());
-
-  // Close an image file if the file is not in use elsewhere.
-  static void close(ImageFileReader *reader);
-
-  // Return an id for the specifed ImageFileReader.
-  static u8 readerToID(ImageFileReader *reader);
-
-  // Validate the image id.
-  static bool idCheck(u8 id);
-
-  // Return an id for the specifed ImageFileReader.
-  static ImageFileReader* idToReader(u8 id);
-
-  // Open image file for read access.
-  bool open();
-
-  // Close image file.
-  void close();
-
-  // Read directly from the file.
-  bool read_at(u1* data, u8 size, u8 offset) const;
-
-  inline Endian* endian() const { return _endian; }
-
-  // Retrieve name of image file.
-  inline const char* name() const {
-    return _name;
-  }
-
-  // Retrieve size of image file.
-  inline u8 file_size() const {
-    return _file_size;
-  }
-
-  // Return first address of index data.
-  inline u1* get_index_address() const {
-    return _index_data;
-  }
-
-  // Return first address of resource data.
-  inline u1* get_data_address() const {
-    return _index_data + _index_size;
-  }
-
-  // Get the size of the index data.
-  size_t get_index_size() const {
-    return _index_size;
-  }
-
-  inline u4 table_length() const {
-    return _header.table_length(_endian);
-  }
-
-  inline u4 locations_size() const {
-    return _header.locations_size(_endian);
-  }
-
-  inline u4 strings_size()const  {
-    return _header.strings_size(_endian);
-  }
-
-  inline u4* offsets_table() const {
-    return _offsets_table;
-  }
-
-  // Increment use count.
-  inline void inc_use() {
-    _use++;
-  }
-
-  // Decrement use count.
-  inline bool dec_use() {
-    return --_use == 0;
-  }
-
-  // Return a string table accessor.
-  inline const ImageStrings get_strings() const {
-    return ImageStrings(_string_bytes, _header.strings_size(_endian));
-  }
-
-  // Return location attribute stream at offset.
-  inline u1* get_location_offset_data(u4 offset) const {
-    guarantee((u4)offset < _header.locations_size(_endian),
-              "offset exceeds location attributes size");
-    return offset != 0 ? _location_bytes + offset : NULL;
-  }
-
-  // Return location attribute stream for location i.
-  inline u1* get_location_data(u4 index) const {
-    guarantee((u4)index < _header.table_length(_endian),
-              "index exceeds location count");
-    u4 offset = _endian->get(_offsets_table[index]);
-
-    return get_location_offset_data(offset);
-  }
-
-  // Find the location attributes associated with the path.  Returns true if
-  // the location is found, false otherwise.
-  bool find_location(const char* path, ImageLocation& location) const;
-
-  // Assemble the location path.
-  void location_path(ImageLocation& location, char* path, size_t max) const;
-
-  // Verify that a found location matches the supplied path.
-  bool verify_location(ImageLocation& location, const char* path) const;
-
-  // Return the resource for the supplied path.
-  void get_resource(ImageLocation& location, u1* uncompressed_data) const;
-};
-#endif // SHARE_VM_CLASSFILE_IMAGEFILE_HPP
--- a/src/share/vm/classfile/javaClasses.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -29,7 +29,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/debugInfo.hpp"
 #include "code/pcDesc.hpp"
-#include "compiler/compilerOracle.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/jimage.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "jni.h"
+
+// Opaque reference to a JImage file.
+class JImageFile;
+// Opaque reference to an image file resource location.
+typedef jlong JImageLocationRef;
+
+// Max path length limit independent of platform.  Windows max path is 1024,
+// other platforms use 4096.  The JCK fails several tests when 1024 is used.
+#define JIMAGE_MAX_PATH 4096
+
+// JImage Error Codes
+
+// The image file is not prefixed with 0xCAFEDADA
+#define JIMAGE_BAD_MAGIC (-1)
+// The image file does not have a compatible (translatable) version
+#define JIMAGE_BAD_VERSION (-2)
+// The image file content is malformed
+#define JIMAGE_CORRUPTED (-3)
+
+/*
+ * JImageOpen - Given the supplied full path file name, open an image file. This
+ * function will also initialize tables and retrieve meta-data necessary to
+ * satisfy other functions in the API. If the image file has been previously
+ * open, a new open request will share memory and resources used by the previous
+ * open. A call to JImageOpen should be balanced by a call to JImageClose, to
+ * release memory and resources used. If the image file is not found or cannot
+ * be open, then NULL is returned and error will contain a reason for the
+ * failure; a positive value for a system error number, negative for a jimage
+ * specific error (see JImage Error Codes.)
+ *
+ *  Ex.
+ *   jint error;
+ *   JImageFile* jimage = (*JImageOpen)(JAVA_HOME "lib/modules/bootmodules.jimage", &error);
+ *   if (image == NULL) {
+ *     tty->print_cr("JImage failed to open: %d", error);
+ *     ...
+ *   }
+ *   ...
+ */
+
+extern "C" JImageFile* JIMAGE_Open(const char *name, jint* error);
+
+typedef JImageFile* (*JImageOpen_t)(const char *name, jint* error);
+
+/*
+ * JImageClose - Given the supplied open image file (see JImageOpen), release
+ * memory and resources used by the open file and close the file. If the image
+ * file is shared by other uses, release and close is deferred until the last use
+ * is also closed.
+ *
+ * Ex.
+ *  (*JImageClose)(image);
+ */
+
+extern "C" void JIMAGE_Close(JImageFile* jimage);
+
+typedef void (*JImageClose_t)(JImageFile* jimage);
+
+
+/*
+ * JImagePackageToModule - Given an open image file (see JImageOpen) and the name
+ * of a package, return the name of module where the package resides. If the
+ * package does not exist in the image file, the function returns NULL.
+ * The resulting string does/should not have to be released. All strings are
+ * utf-8, zero byte terminated.
+ *
+ * Ex.
+ *  const char* package = (*JImagePackageToModule)(image, "java/lang");
+ *  tty->print_cr(package);
+ *  —> java.base
+ */
+
+extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
+
+typedef const char* (*JImagePackageToModule_t)(JImageFile* jimage, const char* package_name);
+
+
+/*
+ * JImageFindResource - Given an open image file (see JImageOpen), a module
+ * name, a version string and the name of a class/resource, return location
+ * information describing the resource and its size. If no resource is found, the
+ * function returns JIMAGE_NOT_FOUND and the value of size is undefined.
+ * The version number should be "9.0" and is not used in locating the resource.
+ * The resulting location does/should not have to be released.
+ * All strings are utf-8, zero byte terminated.
+ *
+ *  Ex.
+ *   jlong size;
+ *   JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
+ */
+extern "C" JImageLocationRef JIMAGE_FindResource(JImageFile* jimage,
+        const char* module_name, const char* version, const char* name,
+        jlong* size);
+
+typedef JImageLocationRef(*JImageFindResource_t)(JImageFile* jimage,
+        const char* module_name, const char* version, const char* name,
+        jlong* size);
+
+
+/*
+ * JImageGetResource - Given an open image file (see JImageOpen), a resource’s
+ * location information (see JImageFindResource), a buffer of appropriate
+ * size and the size, retrieve the bytes associated with the
+ * resource. If the size is less than the resource size then the read is truncated.
+ * If the size is greater than the resource size then the remainder of the buffer
+ * is zero filled.  The function will return the actual size of the resource.
+ *
+ * Ex.
+ *  jlong size;
+ *  JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
+ *  char* buffer = new char[size];
+ *  (*JImageGetResource)(image, location, buffer, size);
+ */
+extern "C" jlong JIMAGE_GetResource(JImageFile* jimage, JImageLocationRef location,
+        char* buffer, jlong size);
+
+typedef jlong(*JImageGetResource_t)(JImageFile* jimage, JImageLocationRef location,
+        char* buffer, jlong size);
+
+
+/*
+ * JImageResourceIterator - Given an open image file (see JImageOpen), a visitor
+ * function and a visitor argument, iterator through each of the image's resources.
+ * The visitor function is called with the image file, the module name, the
+ * package name, the base name, the extension and the visitor argument. The return
+ * value of the visitor function should be true, unless an early iteration exit is
+ * required. All strings are utf-8, zero byte terminated.file.
+ *
+ * Ex.
+ *   bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version, const char* package, const char* name, const char* extension, void* arg) {
+ *     if (strcmp(extension, “class”) == 0) {
+ *       char path[JIMAGE_MAX_PATH];
+ *       Thread* THREAD = Thread::current();
+ *       jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
+ *       ClassLoader::compile_the_world_in(path, (Handle)arg, THREAD);
+ *       return !HAS_PENDING_EXCEPTION;
+ *     }
+ *     return true;
+ *   }
+ *   (*JImageResourceIterator)(image, ctw_visitor, loader);
+ */
+
+typedef bool (*JImageResourceVisitor_t)(JImageFile* jimage,
+        const char* module_name, const char* version, const char* package,
+        const char* name, const char* extension, void* arg);
+
+extern "C" void JIMAGE_ResourceIterator(JImageFile* jimage,
+        JImageResourceVisitor_t visitor, void *arg);
+
+typedef void (*JImageResourceIterator_t)(JImageFile* jimage,
+        JImageResourceVisitor_t visitor, void* arg);
--- a/src/share/vm/classfile/vmSymbols.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/classfile/vmSymbols.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -625,6 +625,10 @@
   case vmIntrinsics::_updateDirectByteBufferCRC32C:
     if (!UseCRC32CIntrinsics) return true;
     break;
+  case vmIntrinsics::_updateBytesAdler32:
+  case vmIntrinsics::_updateByteBufferAdler32:
+    if (!UseAdler32Intrinsics) return true;
+    break;
   case vmIntrinsics::_copyMemory:
     if (!InlineArrayCopy || !InlineUnsafeOps) return true;
     break;
--- a/src/share/vm/classfile/vmSymbols.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -927,6 +927,12 @@
   do_intrinsic(_updateDirectByteBufferCRC32C, java_util_zip_CRC32C, updateDirectByteBuffer_C_name, updateByteBuffer_signature, F_S) \
    do_name(    updateDirectByteBuffer_C_name,                     "updateDirectByteBuffer")                             \
                                                                                                                         \
+   /* support for java.util.zip.Adler32 */                                                                              \
+  do_class(java_util_zip_Adler32,        "java/util/zip/Adler32")                                                       \
+  do_intrinsic(_updateBytesAdler32,       java_util_zip_Adler32,  updateBytes_C_name,  updateBytes_signature,  F_SN)    \
+  do_intrinsic(_updateByteBufferAdler32,  java_util_zip_Adler32,  updateByteBuffer_A_name,  updateByteBuffer_signature,  F_SN) \
+   do_name(     updateByteBuffer_A_name,                          "updateByteBuffer")                                   \
+                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
--- a/src/share/vm/code/nmethod.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/code/nmethod.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -848,10 +848,10 @@
   if (st != NULL) {
     ttyLocker ttyl;
     if (WizardMode) {
-      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
+      CompileTask::print(st, this, msg, /*short_form:*/ true);
       st->print_cr(" (" INTPTR_FORMAT ")", this);
     } else {
-      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
+      CompileTask::print(st, this, msg, /*short_form:*/ false);
     }
   }
 }
--- a/src/share/vm/compiler/compileBroker.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -157,7 +157,6 @@
 CompileQueue* CompileBroker::_c2_compile_queue   = NULL;
 CompileQueue* CompileBroker::_c1_compile_queue   = NULL;
 
-
 class CompilationLog : public StringEventLog {
  public:
   CompilationLog() : StringEventLog("Compilation events") {
@@ -167,7 +166,7 @@
     StringLogMessage lm;
     stringStream sstr = lm.stream();
     // msg.time_stamp().update_to(tty->time_stamp().ticks());
-    task->print_compilation(&sstr, NULL, true, false);
+    task->print(&sstr, NULL, true, false);
     log(thread, "%s", (const char*)lm);
   }
 
@@ -233,371 +232,6 @@
   }
 }
 
-
-CompileTask*  CompileTask::_task_free_list = NULL;
-#ifdef ASSERT
-int CompileTask::_num_allocated_tasks = 0;
-#endif
-/**
- * Allocate a CompileTask, from the free list if possible.
- */
-CompileTask* CompileTask::allocate() {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  CompileTask* task = NULL;
-
-  if (_task_free_list != NULL) {
-    task = _task_free_list;
-    _task_free_list = task->next();
-    task->set_next(NULL);
-  } else {
-    task = new CompileTask();
-    DEBUG_ONLY(_num_allocated_tasks++;)
-    assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
-    task->set_next(NULL);
-    task->set_is_free(true);
-  }
-  assert(task->is_free(), "Task must be free.");
-  task->set_is_free(false);
-  return task;
-}
-
-
-/**
- * Add a task to the free list.
- */
-void CompileTask::free(CompileTask* task) {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  if (!task->is_free()) {
-    task->set_code(NULL);
-    assert(!task->lock()->is_locked(), "Should not be locked when freed");
-    JNIHandles::destroy_global(task->_method_holder);
-    JNIHandles::destroy_global(task->_hot_method_holder);
-
-    task->set_is_free(true);
-    task->set_next(_task_free_list);
-    _task_free_list = task;
-  }
-}
-
-void CompileTask::initialize(int compile_id,
-                             methodHandle method,
-                             int osr_bci,
-                             int comp_level,
-                             methodHandle hot_method,
-                             int hot_count,
-                             const char* comment,
-                             bool is_blocking) {
-  assert(!_lock->is_locked(), "bad locking");
-
-  _compile_id = compile_id;
-  _method = method();
-  _method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
-  _osr_bci = osr_bci;
-  _is_blocking = is_blocking;
-  _comp_level = comp_level;
-  _num_inlined_bytecodes = 0;
-
-  _is_complete = false;
-  _is_success = false;
-  _code_handle = NULL;
-
-  _hot_method = NULL;
-  _hot_method_holder = NULL;
-  _hot_count = hot_count;
-  _time_queued = 0;  // tidy
-  _comment = comment;
-  _failure_reason = NULL;
-
-  if (LogCompilation) {
-    _time_queued = os::elapsed_counter();
-    if (hot_method.not_null()) {
-      if (hot_method == method) {
-        _hot_method = _method;
-      } else {
-        _hot_method = hot_method();
-        // only add loader or mirror if different from _method_holder
-        _hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
-      }
-    }
-  }
-
-  _next = NULL;
-}
-
-// ------------------------------------------------------------------
-// CompileTask::code/set_code
-nmethod* CompileTask::code() const {
-  if (_code_handle == NULL)  return NULL;
-  return _code_handle->code();
-}
-void CompileTask::set_code(nmethod* nm) {
-  if (_code_handle == NULL && nm == NULL)  return;
-  guarantee(_code_handle != NULL, "");
-  _code_handle->set_code(nm);
-  if (nm == NULL)  _code_handle = NULL;  // drop the handle also
-}
-
-void CompileTask::mark_on_stack() {
-  // Mark these methods as something redefine classes cannot remove.
-  _method->set_on_stack(true);
-  if (_hot_method != NULL) {
-    _hot_method->set_on_stack(true);
-  }
-}
-
-// RedefineClasses support
-void CompileTask::metadata_do(void f(Metadata*)) {
-  f(method());
-  if (hot_method() != NULL && hot_method() != method()) {
-    f(hot_method());
-  }
-}
-
-// ------------------------------------------------------------------
-// CompileTask::print_line_on_error
-//
-// This function is called by fatal error handler when the thread
-// causing troubles is a compiler thread.
-//
-// Do not grab any lock, do not allocate memory.
-//
-// Otherwise it's the same as CompileTask::print_line()
-//
-void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
-  // print compiler name
-  st->print("%s:", CompileBroker::compiler_name(comp_level()));
-  print_compilation(st);
-}
-
-// ------------------------------------------------------------------
-// CompileTask::print_line
-void CompileTask::print_tty() {
-  ttyLocker ttyl;  // keep the following output all in one block
-  // print compiler name if requested
-  if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler_name(comp_level()));
-    print_compilation(tty);
-}
-
-// ------------------------------------------------------------------
-// CompileTask::print_compilation_impl
-void CompileTask::print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
-                                         bool is_osr_method, int osr_bci, bool is_blocking,
-                                         const char* msg, bool short_form, bool cr) {
-  if (!short_form) {
-    st->print("%7d ", (int) st->time_stamp().milliseconds());  // print timestamp
-  }
-  st->print("%4d ", compile_id);    // print compilation number
-
-  // For unloaded methods the transition to zombie occurs after the
-  // method is cleared so it's impossible to report accurate
-  // information for that case.
-  bool is_synchronized = false;
-  bool has_exception_handler = false;
-  bool is_native = false;
-  if (method != NULL) {
-    is_synchronized       = method->is_synchronized();
-    has_exception_handler = method->has_exception_handler();
-    is_native             = method->is_native();
-  }
-  // method attributes
-  const char compile_type   = is_osr_method                   ? '%' : ' ';
-  const char sync_char      = is_synchronized                 ? 's' : ' ';
-  const char exception_char = has_exception_handler           ? '!' : ' ';
-  const char blocking_char  = is_blocking                     ? 'b' : ' ';
-  const char native_char    = is_native                       ? 'n' : ' ';
-
-  // print method attributes
-  st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
-
-  if (TieredCompilation) {
-    if (comp_level != -1)  st->print("%d ", comp_level);
-    else                   st->print("- ");
-  }
-  st->print("     ");  // more indent
-
-  if (method == NULL) {
-    st->print("(method)");
-  } else {
-    method->print_short_name(st);
-    if (is_osr_method) {
-      st->print(" @ %d", osr_bci);
-    }
-    if (method->is_native())
-      st->print(" (native)");
-    else
-      st->print(" (%d bytes)", method->code_size());
-  }
-
-  if (msg != NULL) {
-    st->print("   %s", msg);
-  }
-  if (cr) {
-    st->cr();
-  }
-}
-
-// ------------------------------------------------------------------
-// CompileTask::print_inlining
-void CompileTask::print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) {
-  //         1234567
-  st->print("        ");     // print timestamp
-  //         1234
-  st->print("     ");        // print compilation number
-
-  // method attributes
-  if (method->is_loaded()) {
-    const char sync_char      = method->is_synchronized()        ? 's' : ' ';
-    const char exception_char = method->has_exception_handlers() ? '!' : ' ';
-    const char monitors_char  = method->has_monitor_bytecodes()  ? 'm' : ' ';
-
-    // print method attributes
-    st->print(" %c%c%c  ", sync_char, exception_char, monitors_char);
-  } else {
-    //         %s!bn
-    st->print("      ");     // print method attributes
-  }
-
-  if (TieredCompilation) {
-    st->print("  ");
-  }
-  st->print("     ");        // more indent
-  st->print("    ");         // initial inlining indent
-
-  for (int i = 0; i < inline_level; i++)  st->print("  ");
-
-  st->print("@ %d  ", bci);  // print bci
-  method->print_short_name(st);
-  if (method->is_loaded())
-    st->print(" (%d bytes)", method->code_size());
-  else
-    st->print(" (not loaded)");
-
-  if (msg != NULL) {
-    st->print("   %s", msg);
-  }
-  st->cr();
-}
-
-// ------------------------------------------------------------------
-// CompileTask::print_inline_indent
-void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
-  //         1234567
-  st->print("        ");     // print timestamp
-  //         1234
-  st->print("     ");        // print compilation number
-  //         %s!bn
-  st->print("      ");       // print method attributes
-  if (TieredCompilation) {
-    st->print("  ");
-  }
-  st->print("     ");        // more indent
-  st->print("    ");         // initial inlining indent
-  for (int i = 0; i < inline_level; i++)  st->print("  ");
-}
-
-// ------------------------------------------------------------------
-// CompileTask::print_compilation
-void CompileTask::print_compilation(outputStream* st, const char* msg, bool short_form, bool cr) {
-  bool is_osr_method = osr_bci() != InvocationEntryBci;
-  print_compilation_impl(st, method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), msg, short_form, cr);
-}
-
-// ------------------------------------------------------------------
-// CompileTask::log_task
-void CompileTask::log_task(xmlStream* log) {
-  Thread* thread = Thread::current();
-  methodHandle method(thread, this->method());
-  ResourceMark rm(thread);
-
-  // <task compiler='Cx' id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
-  log->print(" compiler='%s' compile_id='%d'", _comp_level <= CompLevel_full_profile ? "C1" : "C2", _compile_id);
-  if (_osr_bci != CompileBroker::standard_entry_bci) {
-    log->print(" compile_kind='osr'");  // same as nmethod::compile_kind
-  } // else compile_kind='c2c'
-  if (!method.is_null())  log->method(method);
-  if (_osr_bci != CompileBroker::standard_entry_bci) {
-    log->print(" osr_bci='%d'", _osr_bci);
-  }
-  if (_comp_level != CompLevel_highest_tier) {
-    log->print(" level='%d'", _comp_level);
-  }
-  if (_is_blocking) {
-    log->print(" blocking='1'");
-  }
-  log->stamp();
-}
-
-
-// ------------------------------------------------------------------
-// CompileTask::log_task_queued
-void CompileTask::log_task_queued() {
-  Thread* thread = Thread::current();
-  ttyLocker ttyl;
-  ResourceMark rm(thread);
-
-  xtty->begin_elem("task_queued");
-  log_task(xtty);
-  if (_comment != NULL) {
-    xtty->print(" comment='%s'", _comment);
-  }
-  if (_hot_method != NULL) {
-    methodHandle hot(thread, _hot_method);
-    methodHandle method(thread, _method);
-    if (hot() != method()) {
-      xtty->method(hot);
-    }
-  }
-  if (_hot_count != 0) {
-    xtty->print(" hot_count='%d'", _hot_count);
-  }
-  xtty->end_elem();
-}
-
-
-// ------------------------------------------------------------------
-// CompileTask::log_task_start
-void CompileTask::log_task_start(CompileLog* log)   {
-  log->begin_head("task");
-  log_task(log);
-  log->end_head();
-}
-
-
-// ------------------------------------------------------------------
-// CompileTask::log_task_done
-void CompileTask::log_task_done(CompileLog* log) {
-  Thread* thread = Thread::current();
-  methodHandle method(thread, this->method());
-  ResourceMark rm(thread);
-
-  if (!_is_success) {
-    const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
-    log->elem("failure reason='%s'", reason);
-  }
-
-  // <task_done ... stamp='1.234'>  </task>
-  nmethod* nm = code();
-  log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
-                  _is_success, nm == NULL ? 0 : nm->content_size(),
-                  method->invocation_count());
-  int bec = method->backedge_count();
-  if (bec != 0)  log->print(" backedge_count='%d'", bec);
-  // Note:  "_is_complete" is about to be set, but is not.
-  if (_num_inlined_bytecodes != 0) {
-    log->print(" inlined_bytes='%d'", _num_inlined_bytecodes);
-  }
-  log->stamp();
-  log->end_elem();
-  log->tail("task");
-  log->clear_identities();   // next task will have different CI
-  if (log->unflushed_count() > 2000) {
-    log->flush();
-  }
-  log->mark_file_end();
-}
-
-
-
 /**
  * Add a CompileTask to a CompileQueue.
  */
@@ -807,7 +441,7 @@
     st->print_cr("Empty");
   } else {
     while (task != NULL) {
-      task->print_compilation(st, NULL, true, true);
+      task->print(st, NULL, true, true);
       task = task->next();
     }
   }
@@ -1349,7 +983,7 @@
 #ifndef TIERED
     // seems like an assert of dubious value
     assert(comp_level == CompLevel_highest_tier,
-           "all OSR compiles are assumed to be at a single compilation lavel");
+           "all OSR compiles are assumed to be at a single compilation level");
 #endif // TIERED
     // We accept a higher level osr method
     nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
@@ -2037,7 +1671,7 @@
         FormatBufferResource msg = retry_message != NULL ?
             err_msg_res("COMPILE SKIPPED: %s (%s)", ci_env.failure_reason(), retry_message) :
             err_msg_res("COMPILE SKIPPED: %s",      ci_env.failure_reason());
-        task->print_compilation(tty, msg);
+        task->print(tty, msg);
       }
     } else {
       task->mark_success();
--- a/src/share/vm/compiler/compileBroker.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/compiler/compileBroker.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -27,127 +27,12 @@
 
 #include "ci/compilerInterface.hpp"
 #include "compiler/abstractCompiler.hpp"
+#include "compiler/compileTask.hpp"
 #include "runtime/perfData.hpp"
 
 class nmethod;
 class nmethodLocker;
 
-// CompileTask
-//
-// An entry in the compile queue.  It represents a pending or current
-// compilation.
-class CompileTask : public CHeapObj<mtCompiler> {
-  friend class VMStructs;
-
- private:
-  static CompileTask* _task_free_list;
-#ifdef ASSERT
-  static int          _num_allocated_tasks;
-#endif
-
-  Monitor*     _lock;
-  uint         _compile_id;
-  Method*      _method;
-  jobject      _method_holder;
-  int          _osr_bci;
-  bool         _is_complete;
-  bool         _is_success;
-  bool         _is_blocking;
-  int          _comp_level;
-  int          _num_inlined_bytecodes;
-  nmethodLocker* _code_handle;  // holder of eventual result
-  CompileTask* _next, *_prev;
-  bool         _is_free;
-  // Fields used for logging why the compilation was initiated:
-  jlong        _time_queued;  // in units of os::elapsed_counter()
-  Method*      _hot_method;   // which method actually triggered this task
-  jobject      _hot_method_holder;
-  int          _hot_count;    // information about its invocation counter
-  const char*  _comment;      // more info about the task
-  const char*  _failure_reason;
-
- public:
-  CompileTask() {
-    _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
-  }
-
-  void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
-                  methodHandle hot_method, int hot_count, const char* comment,
-                  bool is_blocking);
-
-  static CompileTask* allocate();
-  static void         free(CompileTask* task);
-
-  int          compile_id() const                { return _compile_id; }
-  Method*      method() const                    { return _method; }
-  Method*      hot_method() const                { return _hot_method; }
-  int          osr_bci() const                   { return _osr_bci; }
-  bool         is_complete() const               { return _is_complete; }
-  bool         is_blocking() const               { return _is_blocking; }
-  bool         is_success() const                { return _is_success; }
-
-  nmethodLocker* code_handle() const             { return _code_handle; }
-  void         set_code_handle(nmethodLocker* l) { _code_handle = l; }
-  nmethod*     code() const;                     // _code_handle->code()
-  void         set_code(nmethod* nm);            // _code_handle->set_code(nm)
-
-  Monitor*     lock() const                      { return _lock; }
-
-  void         mark_complete()                   { _is_complete = true; }
-  void         mark_success()                    { _is_success = true; }
-
-  int          comp_level()                      { return _comp_level;}
-  void         set_comp_level(int comp_level)    { _comp_level = comp_level;}
-
-  int          num_inlined_bytecodes() const     { return _num_inlined_bytecodes; }
-  void         set_num_inlined_bytecodes(int n)  { _num_inlined_bytecodes = n; }
-
-  CompileTask* next() const                      { return _next; }
-  void         set_next(CompileTask* next)       { _next = next; }
-  CompileTask* prev() const                      { return _prev; }
-  void         set_prev(CompileTask* prev)       { _prev = prev; }
-  bool         is_free() const                   { return _is_free; }
-  void         set_is_free(bool val)             { _is_free = val; }
-
-  // RedefineClasses support
-  void         metadata_do(void f(Metadata*));
-
-private:
-  static void  print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
-                                      bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
-                                      const char* msg = NULL, bool short_form = false, bool cr = true);
-
-public:
-  void         print_compilation(outputStream* st = tty, const char* msg = NULL, bool short_form = false, bool cr = true);
-  static void  print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false, bool cr = true) {
-    print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
-                           nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
-                           msg, short_form, cr);
-  }
-
-  static void  print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
-  static void  print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
-    print_inlining(tty, method, inline_level, bci, msg);
-  }
-
-  // Redefine Classes support
-  void mark_on_stack();
-
-  static void  print_inline_indent(int inline_level, outputStream* st = tty);
-
-  void         print_tty();
-  void         print_line_on_error(outputStream* st, char* buf, int buflen);
-
-  void         log_task(xmlStream* log);
-  void         log_task_queued();
-  void         log_task_start(CompileLog* log);
-  void         log_task_done(CompileLog* log);
-
-  void         set_failure_reason(const char* reason) {
-    _failure_reason = reason;
-  }
-};
-
 // CompilerCounters
 //
 // Per Compiler Performance Counters.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/compiler/compileTask.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "compiler/compileTask.hpp"
+#include "compiler/compileLog.hpp"
+#include "compiler/compileBroker.hpp"
+
+CompileTask*  CompileTask::_task_free_list = NULL;
+#ifdef ASSERT
+int CompileTask::_num_allocated_tasks = 0;
+#endif
+
+/**
+ * Allocate a CompileTask, from the free list if possible.
+ */
+CompileTask* CompileTask::allocate() {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  CompileTask* task = NULL;
+
+  if (_task_free_list != NULL) {
+    task = _task_free_list;
+    _task_free_list = task->next();
+    task->set_next(NULL);
+  } else {
+    task = new CompileTask();
+    DEBUG_ONLY(_num_allocated_tasks++;)
+    assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
+    task->set_next(NULL);
+    task->set_is_free(true);
+  }
+  assert(task->is_free(), "Task must be free.");
+  task->set_is_free(false);
+  return task;
+}
+
+/**
+* Add a task to the free list.
+*/
+
+void CompileTask::free(CompileTask* task) {
+ MutexLocker locker(CompileTaskAlloc_lock);
+ if (!task->is_free()) {
+   task->set_code(NULL);
+   assert(!task->lock()->is_locked(), "Should not be locked when freed");
+   JNIHandles::destroy_global(task->_method_holder);
+   JNIHandles::destroy_global(task->_hot_method_holder);
+
+   task->set_is_free(true);
+   task->set_next(_task_free_list);
+   _task_free_list = task;
+ }
+}
+
+
+void CompileTask::initialize(int compile_id,
+                             methodHandle method,
+                             int osr_bci,
+                             int comp_level,
+                             methodHandle hot_method,
+                             int hot_count,
+                             const char* comment,
+                             bool is_blocking) {
+  assert(!_lock->is_locked(), "bad locking");
+
+  _compile_id = compile_id;
+  _method = method();
+  _method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
+  _osr_bci = osr_bci;
+  _is_blocking = is_blocking;
+  _comp_level = comp_level;
+  _num_inlined_bytecodes = 0;
+
+  _is_complete = false;
+  _is_success = false;
+  _code_handle = NULL;
+
+  _hot_method = NULL;
+  _hot_method_holder = NULL;
+  _hot_count = hot_count;
+  _time_queued = 0;  // tidy
+  _comment = comment;
+  _failure_reason = NULL;
+
+  if (LogCompilation) {
+    _time_queued = os::elapsed_counter();
+    if (hot_method.not_null()) {
+      if (hot_method == method) {
+        _hot_method = _method;
+      } else {
+        _hot_method = hot_method();
+        // only add loader or mirror if different from _method_holder
+        _hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
+      }
+    }
+  }
+
+  _next = NULL;
+}
+
+// ------------------------------------------------------------------
+// CompileTask::code/set_code
+//
+nmethod* CompileTask::code() const {
+  if (_code_handle == NULL)  return NULL;
+  return _code_handle->code();
+}
+
+void CompileTask::set_code(nmethod* nm) {
+  if (_code_handle == NULL && nm == NULL)  return;
+  guarantee(_code_handle != NULL, "");
+  _code_handle->set_code(nm);
+  if (nm == NULL)  _code_handle = NULL;  // drop the handle also
+}
+
+void CompileTask::mark_on_stack() {
+  // Mark these methods as something redefine classes cannot remove.
+  _method->set_on_stack(true);
+  if (_hot_method != NULL) {
+    _hot_method->set_on_stack(true);
+  }
+}
+
+// RedefineClasses support
+void CompileTask::metadata_do(void f(Metadata*)) {
+  f(method());
+  if (hot_method() != NULL && hot_method() != method()) {
+    f(hot_method());
+  }
+}
+
+// ------------------------------------------------------------------
+// CompileTask::print_line_on_error
+//
+// This function is called by fatal error handler when the thread
+// causing troubles is a compiler thread.
+//
+// Do not grab any lock, do not allocate memory.
+//
+// Otherwise it's the same as CompileTask::print_line()
+//
+void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
+  // print compiler name
+  st->print("%s:", CompileBroker::compiler_name(comp_level()));
+  print(st);
+}
+
+// ------------------------------------------------------------------
+// CompileTask::print_tty
+void CompileTask::print_tty() {
+  ttyLocker ttyl;  // keep the following output all in one block
+  // print compiler name if requested
+  if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler_name(comp_level()));
+    print(tty);
+}
+
+// ------------------------------------------------------------------
+// CompileTask::print_impl
+void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, int comp_level,
+                                         bool is_osr_method, int osr_bci, bool is_blocking,
+                                         const char* msg, bool short_form, bool cr) {
+  if (!short_form) {
+    st->print("%7d ", (int) st->time_stamp().milliseconds());  // print timestamp
+  }
+  st->print("%4d ", compile_id);    // print compilation number
+
+  // For unloaded methods the transition to zombie occurs after the
+  // method is cleared so it's impossible to report accurate
+  // information for that case.
+  bool is_synchronized = false;
+  bool has_exception_handler = false;
+  bool is_native = false;
+  if (method != NULL) {
+    is_synchronized       = method->is_synchronized();
+    has_exception_handler = method->has_exception_handler();
+    is_native             = method->is_native();
+  }
+  // method attributes
+  const char compile_type   = is_osr_method                   ? '%' : ' ';
+  const char sync_char      = is_synchronized                 ? 's' : ' ';
+  const char exception_char = has_exception_handler           ? '!' : ' ';
+  const char blocking_char  = is_blocking                     ? 'b' : ' ';
+  const char native_char    = is_native                       ? 'n' : ' ';
+
+  // print method attributes
+  st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
+
+  if (TieredCompilation) {
+    if (comp_level != -1)  st->print("%d ", comp_level);
+    else                   st->print("- ");
+  }
+  st->print("     ");  // more indent
+
+  if (method == NULL) {
+    st->print("(method)");
+  } else {
+    method->print_short_name(st);
+    if (is_osr_method) {
+      st->print(" @ %d", osr_bci);
+    }
+    if (method->is_native())
+      st->print(" (native)");
+    else
+      st->print(" (%d bytes)", method->code_size());
+  }
+
+  if (msg != NULL) {
+    st->print("   %s", msg);
+  }
+  if (cr) {
+    st->cr();
+  }
+}
+
+void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
+  //         1234567
+  st->print("        ");     // print timestamp
+  //         1234
+  st->print("     ");        // print compilation number
+  //         %s!bn
+  st->print("      ");       // print method attributes
+  if (TieredCompilation) {
+    st->print("  ");
+  }
+  st->print("     ");        // more indent
+  st->print("    ");         // initial inlining indent
+  for (int i = 0; i < inline_level; i++)  st->print("  ");
+}
+
+// ------------------------------------------------------------------
+// CompileTask::print_compilation
+void CompileTask::print(outputStream* st, const char* msg, bool short_form, bool cr) {
+  bool is_osr_method = osr_bci() != InvocationEntryBci;
+  print_impl(st, method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), msg, short_form, cr);
+}
+
+// ------------------------------------------------------------------
+// CompileTask::log_task
+void CompileTask::log_task(xmlStream* log) {
+  Thread* thread = Thread::current();
+  methodHandle method(thread, this->method());
+  ResourceMark rm(thread);
+
+  // <task id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
+  log->print(" compile_id='%d'", _compile_id);
+  if (_osr_bci != CompileBroker::standard_entry_bci) {
+    log->print(" compile_kind='osr'");  // same as nmethod::compile_kind
+  } // else compile_kind='c2c'
+  if (!method.is_null())  log->method(method);
+  if (_osr_bci != CompileBroker::standard_entry_bci) {
+    log->print(" osr_bci='%d'", _osr_bci);
+  }
+  if (_comp_level != CompLevel_highest_tier) {
+    log->print(" level='%d'", _comp_level);
+  }
+  if (_is_blocking) {
+    log->print(" blocking='1'");
+  }
+  log->stamp();
+}
+
+// ------------------------------------------------------------------
+// CompileTask::log_task_queued
+void CompileTask::log_task_queued() {
+  Thread* thread = Thread::current();
+  ttyLocker ttyl;
+  ResourceMark rm(thread);
+
+  xtty->begin_elem("task_queued");
+  log_task(xtty);
+  if (_comment != NULL) {
+    xtty->print(" comment='%s'", _comment);
+  }
+  if (_hot_method != NULL) {
+    methodHandle hot(thread, _hot_method);
+    methodHandle method(thread, _method);
+    if (hot() != method()) {
+      xtty->method(hot);
+    }
+  }
+  if (_hot_count != 0) {
+    xtty->print(" hot_count='%d'", _hot_count);
+  }
+  xtty->end_elem();
+}
+
+
+// ------------------------------------------------------------------
+// CompileTask::log_task_start
+void CompileTask::log_task_start(CompileLog* log)   {
+  log->begin_head("task");
+  log_task(log);
+  log->end_head();
+}
+
+
+// ------------------------------------------------------------------
+// CompileTask::log_task_done
+void CompileTask::log_task_done(CompileLog* log) {
+  Thread* thread = Thread::current();
+  methodHandle method(thread, this->method());
+  ResourceMark rm(thread);
+
+  if (!_is_success) {
+    const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
+    log->elem("failure reason='%s'", reason);
+  }
+
+  // <task_done ... stamp='1.234'>  </task>
+  nmethod* nm = code();
+  log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
+                  _is_success, nm == NULL ? 0 : nm->content_size(),
+                  method->invocation_count());
+  int bec = method->backedge_count();
+  if (bec != 0)  log->print(" backedge_count='%d'", bec);
+  // Note:  "_is_complete" is about to be set, but is not.
+  if (_num_inlined_bytecodes != 0) {
+    log->print(" inlined_bytes='%d'", _num_inlined_bytecodes);
+  }
+  log->stamp();
+  log->end_elem();
+  log->clear_identities();   // next task will have different CI
+  log->tail("task");
+  if (log->unflushed_count() > 2000) {
+    log->flush();
+  }
+  log->mark_file_end();
+}
+
+// ------------------------------------------------------------------
+// CompileTask::print_inlining
+void CompileTask::print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) {
+  //         1234567
+  st->print("        ");     // print timestamp
+  //         1234
+  st->print("     ");        // print compilation number
+
+  // method attributes
+  if (method->is_loaded()) {
+    const char sync_char      = method->is_synchronized()        ? 's' : ' ';
+    const char exception_char = method->has_exception_handlers() ? '!' : ' ';
+    const char monitors_char  = method->has_monitor_bytecodes()  ? 'm' : ' ';
+
+    // print method attributes
+    st->print(" %c%c%c  ", sync_char, exception_char, monitors_char);
+  } else {
+    //         %s!bn
+    st->print("      ");     // print method attributes
+  }
+
+  if (TieredCompilation) {
+    st->print("  ");
+  }
+  st->print("     ");        // more indent
+  st->print("    ");         // initial inlining indent
+
+  for (int i = 0; i < inline_level; i++)  st->print("  ");
+
+  st->print("@ %d  ", bci);  // print bci
+  method->print_short_name(st);
+  if (method->is_loaded())
+    st->print(" (%d bytes)", method->code_size());
+  else
+    st->print(" (not loaded)");
+
+  if (msg != NULL) {
+    st->print("   %s", msg);
+  }
+  st->cr();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/compiler/compileTask.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_COMPILER_COMPILETASK_HPP
+#define SHARE_VM_COMPILER_COMPILETASK_HPP
+
+#include "code/nmethod.hpp"
+#include "ci/ciMethod.hpp"
+#include "compiler/compileLog.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/xmlstream.hpp"
+
+// CompileTask
+//
+// An entry in the compile queue.  It represents a pending or current
+// compilation.
+
+class CompileTask : public CHeapObj<mtCompiler> {
+  friend class VMStructs;
+
+ private:
+  static CompileTask* _task_free_list;
+#ifdef ASSERT
+  static int          _num_allocated_tasks;
+#endif
+
+  Monitor*     _lock;
+  uint         _compile_id;
+  Method*      _method;
+  jobject      _method_holder;
+  int          _osr_bci;
+  bool         _is_complete;
+  bool         _is_success;
+  bool         _is_blocking;
+  int          _comp_level;
+  int          _num_inlined_bytecodes;
+  nmethodLocker* _code_handle;  // holder of eventual result
+  CompileTask* _next, *_prev;
+  bool         _is_free;
+  // Fields used for logging why the compilation was initiated:
+  jlong        _time_queued;  // in units of os::elapsed_counter()
+  Method*      _hot_method;   // which method actually triggered this task
+  jobject      _hot_method_holder;
+  int          _hot_count;    // information about its invocation counter
+  const char*  _comment;      // more info about the task
+  const char*  _failure_reason;
+
+ public:
+  CompileTask() {
+    _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
+  }
+
+  void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
+                  methodHandle hot_method, int hot_count, const char* comment,
+                  bool is_blocking);
+
+  static CompileTask* allocate();
+  static void         free(CompileTask* task);
+
+  int          compile_id() const                { return _compile_id; }
+  Method*      method() const                    { return _method; }
+  Method*      hot_method() const                { return _hot_method; }
+  int          osr_bci() const                   { return _osr_bci; }
+  bool         is_complete() const               { return _is_complete; }
+  bool         is_blocking() const               { return _is_blocking; }
+  bool         is_success() const                { return _is_success; }
+
+  nmethodLocker* code_handle() const             { return _code_handle; }
+  void         set_code_handle(nmethodLocker* l) { _code_handle = l; }
+  nmethod*     code() const;                     // _code_handle->code()
+  void         set_code(nmethod* nm);            // _code_handle->set_code(nm)
+
+  Monitor*     lock() const                      { return _lock; }
+
+  void         mark_complete()                   { _is_complete = true; }
+  void         mark_success()                    { _is_success = true; }
+
+  int          comp_level()                      { return _comp_level;}
+  void         set_comp_level(int comp_level)    { _comp_level = comp_level;}
+
+  int          num_inlined_bytecodes() const     { return _num_inlined_bytecodes; }
+  void         set_num_inlined_bytecodes(int n)  { _num_inlined_bytecodes = n; }
+
+  CompileTask* next() const                      { return _next; }
+  void         set_next(CompileTask* next)       { _next = next; }
+  CompileTask* prev() const                      { return _prev; }
+  void         set_prev(CompileTask* prev)       { _prev = prev; }
+  bool         is_free() const                   { return _is_free; }
+  void         set_is_free(bool val)             { _is_free = val; }
+
+  // RedefineClasses support
+  void         metadata_do(void f(Metadata*));
+  void         mark_on_stack();
+
+private:
+  static void  print_impl(outputStream* st, Method* method, int compile_id, int comp_level,
+                                      bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
+                                      const char* msg = NULL, bool short_form = false, bool cr = true);
+
+public:
+  void         print(outputStream* st = tty, const char* msg = NULL, bool short_form = false, bool cr = true);
+  static void  print(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false, bool cr = true) {
+    print_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
+                           nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
+                           msg, short_form, cr);
+  }
+
+  static void  print_inline_indent(int inline_level, outputStream* st = tty);
+
+  void         print_tty();
+  void         print_line_on_error(outputStream* st, char* buf, int buflen);
+
+  void         log_task(xmlStream* log);
+  void         log_task_queued();
+  void         log_task_start(CompileLog* log);
+  void         log_task_done(CompileLog* log);
+
+  void         set_failure_reason(const char* reason) {
+    _failure_reason = reason;
+  }
+
+  bool         check_break_at_flags();
+
+  static void print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
+  static void print_inlining_tty(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
+    print_inlining_inner(tty, method, inline_level, bci, msg);
+  }
+};
+
+#endif // SHARE_VM_COMPILER_COMPILETASK_HPP
--- a/src/share/vm/gc/cms/cmsOopClosures.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/cmsOopClosures.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -66,7 +66,8 @@
   virtual void do_klass(Klass* k);
   void do_klass_nv(Klass* k);
 
-  virtual void do_class_loader_data(ClassLoaderData* cld);
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
+  void do_cld_nv(ClassLoaderData* cld);
 };
 
 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
--- a/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -50,11 +50,11 @@
 
 inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
+  do_cld_nv(cld);
 }
 inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
 
-inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -702,7 +702,7 @@
         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
                     oop(bottom)) &&                                             \
         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
       bottom += _cfls->adjustObjectSize(word_sz);                               \
     } else {                                                                    \
       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
@@ -729,7 +729,7 @@
         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
                     oop(bottom)) &&                                             \
         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
       bottom += _cfls->adjustObjectSize(word_sz);                               \
     } else {                                                                    \
       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
@@ -2989,7 +2989,7 @@
   assert(task_size > CardTableModRefBS::card_size_in_words &&
          (task_size %  CardTableModRefBS::card_size_in_words == 0),
          "Otherwise arithmetic below would be incorrect");
-  MemRegion span = _gen->reserved();
+  MemRegion span = _old_gen->reserved();
   if (low != NULL) {
     if (span.contains(low)) {
       // Align low down to  a card boundary so that
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -99,7 +99,7 @@
   BlockOffsetArrayNonContigSpace _bt;
 
   CMSCollector* _collector;
-  ConcurrentMarkSweepGeneration* _gen;
+  ConcurrentMarkSweepGeneration* _old_gen;
 
   // Data structures for free blocks (used during allocation/sweeping)
 
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -212,7 +212,7 @@
                                            use_adaptive_freelists,
                                            dictionaryChoice);
   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
-  _cmsSpace->_gen = this;
+  _cmsSpace->_old_gen = this;
 
   _gc_stats = new CMSGCStats();
 
@@ -359,13 +359,13 @@
                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
-    // for the next minor collection.  Use the padded average as
+    // for the next young collection.  Use the padded average as
     // a safety factor.
     cms_free -= expected_promotion;
 
     // Adjust by the safety factor.
     double cms_free_dbl = (double)cms_free;
-    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
+    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
     // Apply a further correction factor which tries to adjust
     // for recent occurance of concurrent mode failures.
     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
@@ -531,7 +531,7 @@
   if (CMSConcurrentMTEnabled) {
     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
       // just for now
-      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
+      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
     }
     if (ConcGCThreads > 1) {
       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
@@ -592,7 +592,7 @@
   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 
   // Clip CMSBootstrapOccupancy between 0 and 100.
-  _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
+  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
 
   // Now tell CMS generations the identity of their collector
   ConcurrentMarkSweepGeneration::set_collector(this);
@@ -613,7 +613,7 @@
     _end_addr = gch->end_addr();
     assert(_young_gen != NULL, "no _young_gen");
     _eden_chunk_index = 0;
-    _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
+    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
   }
 
@@ -795,29 +795,22 @@
       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
-      gclog_or_tty->print_cr("  Desired free fraction %f",
-              desired_free_percentage);
-      gclog_or_tty->print_cr("  Maximum free fraction %f",
-              maximum_free_percentage);
-      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity()/1000);
-      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT,
-              desired_capacity/1000);
+      gclog_or_tty->print_cr("  Desired free fraction %f", desired_free_percentage);
+      gclog_or_tty->print_cr("  Maximum free fraction %f", maximum_free_percentage);
+      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity() / 1000);
+      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
       GenCollectedHeap* gch = GenCollectedHeap::heap();
       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
       size_t young_size = gch->young_gen()->capacity();
       gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
-      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT,
-              unsafe_max_alloc_nogc()/1000);
-      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT,
-              contiguous_available()/1000);
-      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)",
-              expand_bytes);
+      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
+      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
+      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
     }
     // safe if expansion fails
     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
     if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("  Expanded free fraction %f",
-        ((double) free()) / capacity());
+      gclog_or_tty->print_cr("  Expanded free fraction %f", ((double) free()) / capacity());
     }
   } else {
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
@@ -834,16 +827,14 @@
   return cmsSpace()->freelistLock();
 }
 
-HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
-                                                  bool   tlab) {
+HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
   CMSSynchronousYieldRequest yr;
-  MutexLockerEx x(freelistLock(),
-                  Mutex::_no_safepoint_check_flag);
+  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
   return have_lock_and_allocate(size, tlab);
 }
 
 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
-                                                  bool   tlab /* ignored */) {
+                                                                bool   tlab /* ignored */) {
   assert_lock_strong(freelistLock());
   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
   HeapWord* res = cmsSpace()->allocate(adjustedSize);
@@ -2426,7 +2417,7 @@
 
     gch->gen_process_roots(&srs,
                            GenCollectedHeap::OldGen,
-                           true,   // younger gens are roots
+                           true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
                            &notOlder,
@@ -2498,7 +2489,7 @@
 
     gch->gen_process_roots(&srs,
                            GenCollectedHeap::OldGen,
-                           true,   // younger gens are roots
+                           true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
                            &notOlder,
@@ -2952,12 +2943,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   assert(_collectorState == InitialMarking, "just checking");
 
-  // If there has not been a GC[n-1] since last GC[n] cycle completed,
-  // precede our marking with a collection of all
-  // younger generations to keep floating garbage to a minimum.
-  // XXX: we won't do this for now -- it's an optimization to be done later.
-
-  // already have locks
+  // Already have locks.
   assert_lock_strong(bitMapLock());
   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
 
@@ -3027,7 +3013,7 @@
 
       gch->gen_process_roots(&srs,
                              GenCollectedHeap::OldGen,
-                             true,   // younger gens are roots
+                             true,   // young gen as roots
                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
                              should_unload_classes(),
                              &notOlder,
@@ -3037,7 +3023,7 @@
   }
 
   // Clear mod-union table; it will be dirtied in the prologue of
-  // CMS generation per each younger generation collection.
+  // CMS generation per each young generation collection.
 
   assert(_modUnionTable.isAllClear(),
        "Was cleared in most recent final checkpoint phase"
@@ -3057,7 +3043,7 @@
   // assert(!SafepointSynchronize::is_at_safepoint(),
   //        "inconsistent argument?");
   // However that wouldn't be right, because it's possible that
-  // a safepoint is indeed in progress as a younger generation
+  // a safepoint is indeed in progress as a young generation
   // stop-the-world GC happens even as we mark in this generation.
   assert(_collectorState == Marking, "inconsistent state?");
   check_correct_thread_executing();
@@ -3065,7 +3051,7 @@
 
   // Weak ref discovery note: We may be discovering weak
   // refs in this generation concurrent (but interleaved) with
-  // weak ref discovery by a younger generation collector.
+  // weak ref discovery by the young generation collector.
 
   CMSTokenSyncWithLocks ts(true, bitMapLock());
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
@@ -3095,7 +3081,7 @@
 
   // Note that when we do a marking step we need to hold the
   // bit map lock -- recall that direct allocation (by mutators)
-  // and promotion (by younger generation collectors) is also
+  // and promotion (by the young generation collector) is also
   // marking the bit map. [the so-called allocate live policy.]
   // Because the implementation of bit map marking is not
   // robust wrt simultaneous marking of bits in the same word,
@@ -4049,7 +4035,7 @@
 // one of these methods, please check the other method too.
 
 size_t CMSCollector::preclean_mod_union_table(
-  ConcurrentMarkSweepGeneration* gen,
+  ConcurrentMarkSweepGeneration* old_gen,
   ScanMarkedObjectsAgainCarefullyClosure* cl) {
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -4064,10 +4050,10 @@
   // generation, but we might potentially miss cards when the
   // generation is rapidly expanding while we are in the midst
   // of precleaning.
-  HeapWord* startAddr = gen->reserved().start();
-  HeapWord* endAddr   = gen->reserved().end();
-
-  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
+  HeapWord* startAddr = old_gen->reserved().start();
+  HeapWord* endAddr   = old_gen->reserved().end();
+
+  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
 
   size_t numDirtyCards, cumNumDirtyCards;
   HeapWord *nextAddr, *lastAddr;
@@ -4109,7 +4095,7 @@
       HeapWord* stop_point = NULL;
       stopTimer();
       // Potential yield point
-      CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
+      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
                                bitMapLock());
       startTimer();
       {
@@ -4117,7 +4103,7 @@
         verify_overflow_empty();
         sample_eden();
         stop_point =
-          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       }
       if (stop_point != NULL) {
         // The careful iteration stopped early either because it found an
@@ -4152,15 +4138,15 @@
 // below are largely identical; if you need to modify
 // one of these methods, please check the other method too.
 
-size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
+size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
   ScanMarkedObjectsAgainCarefullyClosure* cl) {
   // strategy: it's similar to precleamModUnionTable above, in that
   // we accumulate contiguous ranges of dirty cards, mark these cards
   // precleaned, then scan the region covered by these cards.
-  HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
-  HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
-
-  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
+  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
+  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
+
+  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
 
   size_t numDirtyCards, cumNumDirtyCards;
   HeapWord *lastAddr, *nextAddr;
@@ -4197,13 +4183,13 @@
 
     if (!dirtyRegion.is_empty()) {
       stopTimer();
-      CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
+      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
       startTimer();
       sample_eden();
       verify_work_stacks_empty();
       verify_overflow_empty();
       HeapWord* stop_point =
-        gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       if (stop_point != NULL) {
         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
                "Should only be AbortablePreclean.");
@@ -4623,7 +4609,7 @@
     ResourceMark rm;
     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
     for (int i = 0; i < array->length(); i++) {
-      par_mrias_cl.do_class_loader_data(array->at(i));
+      par_mrias_cl.do_cld_nv(array->at(i));
     }
 
     // We don't need to keep track of new CLDs anymore.
@@ -5086,7 +5072,7 @@
   // preclean phase did of eden, plus the [two] tasks of
   // scanning the [two] survivor spaces. Further fine-grain
   // parallelization of the scanning of the survivor spaces
-  // themselves, and of precleaning of the younger gen itself
+  // themselves, and of precleaning of the young gen itself
   // is deferred to the future.
   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
 
@@ -5177,7 +5163,7 @@
 
     gch->gen_process_roots(&srs,
                            GenCollectedHeap::OldGen,
-                           true,  // younger gens as roots
+                           true,  // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
                            &mrias_cl,
@@ -5199,7 +5185,7 @@
     ResourceMark rm;
     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
     for (int i = 0; i < array->length(); i++) {
-      mrias_cl.do_class_loader_data(array->at(i));
+      mrias_cl.do_cld_nv(array->at(i));
     }
 
     // We don't need to keep track of new CLDs anymore.
@@ -5661,7 +5647,7 @@
   }
 }
 
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
+void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
   // We iterate over the space(s) underlying this generation,
   // checking the mark bit map to see if the bits corresponding
   // to specific blocks are marked or not. Blocks that are
@@ -5690,26 +5676,26 @@
   // check that we hold the requisite locks
   assert(have_cms_token(), "Should hold cms token");
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
-  assert_lock_strong(gen->freelistLock());
+  assert_lock_strong(old_gen->freelistLock());
   assert_lock_strong(bitMapLock());
 
   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
-  gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
-                                      _inter_sweep_estimate.padded_average(),
-                                      _intra_sweep_estimate.padded_average());
-  gen->setNearLargestChunk();
+  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+                                          _inter_sweep_estimate.padded_average(),
+                                          _intra_sweep_estimate.padded_average());
+  old_gen->setNearLargestChunk();
 
   {
-    SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
-    gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
+    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
+    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
     // We need to free-up/coalesce garbage/blocks from a
     // co-terminal free run. This is done in the SweepClosure
     // destructor; so, do not remove this scope, else the
     // end-of-sweep-census below will be off by a little bit.
   }
-  gen->cmsSpace()->sweep_completed();
-  gen->cmsSpace()->endSweepFLCensus(sweep_count());
+  old_gen->cmsSpace()->sweep_completed();
+  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
   if (should_unload_classes()) {                // unloaded classes this cycle,
     _concurrent_cycles_since_last_unload = 0;   // ... reset count
   } else {                                      // did not unload classes,
@@ -6324,12 +6310,12 @@
           // objArrays are precisely marked; restrict scanning
           // to dirty cards only.
           size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure, mr));
+                   p->oop_iterate_size(_scanningClosure, mr));
         } else {
           // A non-array may have been imprecisely marked; we need
           // to scan object in its entirety.
           size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure));
+                   p->oop_iterate_size(_scanningClosure));
         }
         #ifdef ASSERT
           size_t direct_size =
@@ -6417,7 +6403,7 @@
   // Note that we do not yield while we iterate over
   // the interior oops of p, pushing the relevant ones
   // on our marking stack.
-  size_t size = p->oop_iterate(_scanning_closure);
+  size_t size = p->oop_iterate_size(_scanning_closure);
   do_yield_check();
   // Observe that below, we do not abandon the preclean
   // phase as soon as we should; rather we empty the
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -723,7 +723,7 @@
 
  private:
   // Support for parallelizing young gen rescan in CMS remark phase
-  ParNewGeneration* _young_gen;  // the younger gen
+  ParNewGeneration* _young_gen;
 
   HeapWord** _top_addr;    // ... Top of Eden
   HeapWord** _end_addr;    // ... End of Eden
@@ -772,9 +772,9 @@
  private:
 
   // Concurrent precleaning work
-  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
+  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
-  size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
+  size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
                              ScanMarkedObjectsAgainCarefullyClosure* cl);
   // Does precleaning work, returning a quantity indicative of
   // the amount of "useful work" done.
@@ -797,7 +797,7 @@
   void refProcessingWork();
 
   // Concurrent sweeping work
-  void sweepWork(ConcurrentMarkSweepGeneration* gen);
+  void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
 
   // (Concurrent) resetting of support data structures
   void reset(bool concurrent);
@@ -1120,10 +1120,8 @@
   MemRegion used_region_at_save_marks() const;
 
   // Does a "full" (forced) collection invoked on this generation collect
-  // all younger generations as well? Note that the second conjunct is a
-  // hack to allow the collection of the younger gen first if the flag is
-  // set.
-  virtual bool full_collects_younger_generations() const {
+  // the young generation as well?
+  virtual bool full_collects_young_generation() const {
     return !ScavengeBeforeFullGC;
   }
 
@@ -1153,9 +1151,8 @@
 
   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
 
-  // Inform this (non-young) generation that a promotion failure was
-  // encountered during a collection of a younger generation that
-  // promotes into this generation.
+  // Inform this (old) generation that a promotion failure was
+  // encountered during a collection of the young generation.
   virtual void promotion_failure_occurred();
 
   bool should_collect(bool full, size_t size, bool tlab);
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -295,7 +295,7 @@
     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
   }
 
-  // If the younger gen collections were skipped, then the
+  // If the young gen collection was skipped, then the
   // number of promoted bytes will be 0 and adding it to the
   // average will incorrectly lessen the average.  It is, however,
   // also possible that no promotion was needed.
--- a/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -39,23 +39,17 @@
 
 // ======= Concurrent Mark Sweep Thread ========
 
-// The CMS thread is created when Concurrent Mark Sweep is used in the
-// older of two generations in a generational memory system.
+ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
+CMSCollector* ConcurrentMarkSweepThread::_collector         = NULL;
+bool ConcurrentMarkSweepThread::_should_terminate           = false;
+int  ConcurrentMarkSweepThread::_CMS_flag                   = CMS_nil;
 
-ConcurrentMarkSweepThread*
-     ConcurrentMarkSweepThread::_cmst     = NULL;
-CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
-bool ConcurrentMarkSweepThread::_should_terminate = false;
-int  ConcurrentMarkSweepThread::_CMS_flag         = CMS_nil;
+volatile jint ConcurrentMarkSweepThread::_pending_yields    = 0;
 
-volatile jint ConcurrentMarkSweepThread::_pending_yields      = 0;
-
-SurrogateLockerThread*
-     ConcurrentMarkSweepThread::_slt = NULL;
+SurrogateLockerThread* ConcurrentMarkSweepThread::_slt      = NULL;
 SurrogateLockerThread::SLT_msg_type
      ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
-Monitor*
-     ConcurrentMarkSweepThread::_sltMonitor = NULL;
+Monitor* ConcurrentMarkSweepThread::_sltMonitor             = NULL;
 
 ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
   : ConcurrentGCThread() {
--- a/src/share/vm/gc/cms/parNewGeneration.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/parNewGeneration.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -69,20 +69,28 @@
                                        Stack<oop, mtGC>* overflow_stacks_,
                                        size_t desired_plab_sz_,
                                        ParallelTaskTerminator& term_) :
-  _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
-  _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
+  _to_space(to_space_),
+  _old_gen(old_gen_),
+  _young_gen(young_gen_),
+  _thread_num(thread_num_),
+  _work_queue(work_queue_set_->queue(thread_num_)),
+  _to_space_full(false),
   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
   _ageTable(false), // false ==> not the global age table, no perf data.
   _to_space_alloc_buffer(desired_plab_sz_),
-  _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
-  _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
+  _to_space_closure(young_gen_, this),
+  _old_gen_closure(young_gen_, this),
+  _to_space_root_closure(young_gen_, this),
+  _old_gen_root_closure(young_gen_, this),
   _older_gen_closure(young_gen_, this),
   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
                       work_queue_set_, &term_),
-  _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
+  _is_alive_closure(young_gen_),
+  _scan_weak_ref_closure(young_gen_, this),
   _keep_alive_closure(&_scan_weak_ref_closure),
-  _strong_roots_time(0.0), _term_time(0.0)
+  _strong_roots_time(0.0),
+  _term_time(0.0)
 {
   #if TASKQUEUE_STATS
   _term_attempts = 0;
@@ -90,8 +98,7 @@
   _overflow_refill_objs = 0;
   #endif // TASKQUEUE_STATS
 
-  _survivor_chunk_array =
-    (ChunkArray*) old_gen()->get_data_recorder(thread_num());
+  _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
   _hash_seed = 17;  // Might want to take time-based random value.
   _start = os::elapsedTime();
   _old_gen_closure.set_generation(old_gen_);
@@ -154,7 +161,6 @@
   }
 }
 
-
 void ParScanThreadState::trim_queues(int max_size) {
   ObjToScanQueue* queue = work_queue();
   do {
@@ -222,15 +228,12 @@
 }
 
 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
-
-  // Otherwise, if the object is small enough, try to reallocate the
-  // buffer.
+  // If the object is small enough, try to reallocate the buffer.
   HeapWord* obj = NULL;
   if (!_to_space_full) {
     PLAB* const plab = to_space_alloc_buffer();
-    Space*            const sp   = to_space();
-    if (word_sz * 100 <
-        ParallelGCBufferWastePct * plab->word_sz()) {
+    Space* const sp  = to_space();
+    if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
       // Is small enough; abandon this buffer and start a new one.
       plab->retire();
       size_t buf_size = plab->word_sz();
@@ -241,8 +244,7 @@
         size_t free_bytes = sp->free();
         while(buf_space == NULL && free_bytes >= min_bytes) {
           buf_size = free_bytes >> LogHeapWordSize;
-          assert(buf_size == (size_t)align_object_size(buf_size),
-                 "Invariant");
+          assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
           buf_space  = sp->par_allocate(buf_size);
           free_bytes = sp->free();
         }
@@ -262,7 +264,6 @@
         // We're used up.
         _to_space_full = true;
       }
-
     } else {
       // Too large; allocate the object individually.
       obj = sp->par_allocate(word_sz);
@@ -271,7 +272,6 @@
   return obj;
 }
 
-
 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
   to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 }
@@ -288,7 +288,7 @@
   // Initializes states for the specified number of threads;
   ParScanThreadStateSet(int                     num_threads,
                         Space&                  to_space,
-                        ParNewGeneration&       gen,
+                        ParNewGeneration&       young_gen,
                         Generation&             old_gen,
                         ObjToScanQueueSet&      queue_set,
                         Stack<oop, mtGC>*       overflow_stacks_,
@@ -315,21 +315,25 @@
 
 private:
   ParallelTaskTerminator& _term;
-  ParNewGeneration&       _gen;
+  ParNewGeneration&       _young_gen;
   Generation&             _old_gen;
  public:
   bool is_valid(int id) const { return id < length(); }
   ParallelTaskTerminator* terminator() { return &_term; }
 };
 
-
-ParScanThreadStateSet::ParScanThreadStateSet(
-  int num_threads, Space& to_space, ParNewGeneration& gen,
-  Generation& old_gen, ObjToScanQueueSet& queue_set,
-  Stack<oop, mtGC>* overflow_stacks,
-  size_t desired_plab_sz, ParallelTaskTerminator& term)
+ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
+                                             Space& to_space,
+                                             ParNewGeneration& young_gen,
+                                             Generation& old_gen,
+                                             ObjToScanQueueSet& queue_set,
+                                             Stack<oop, mtGC>* overflow_stacks,
+                                             size_t desired_plab_sz,
+                                             ParallelTaskTerminator& term)
   : ResourceArray(sizeof(ParScanThreadState), num_threads),
-    _gen(gen), _old_gen(old_gen), _term(term)
+    _young_gen(young_gen),
+    _old_gen(old_gen),
+    _term(term)
 {
   assert(num_threads > 0, "sanity check!");
   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
@@ -337,13 +341,12 @@
   // Initialize states.
   for (int i = 0; i < num_threads; ++i) {
     new ((ParScanThreadState*)_data + i)
-        ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
+        ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
                            overflow_stacks, desired_plab_sz, term);
   }
 }
 
-inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
-{
+inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
   assert(i >= 0 && i < length(), "sanity check!");
   return ((ParScanThreadState*)_data)[i];
 }
@@ -357,8 +360,7 @@
   }
 }
 
-void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
-{
+void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
   _term.reset_for_reuse(active_threads);
   if (promotion_failed) {
     for (int i = 0; i < length(); ++i) {
@@ -368,36 +370,27 @@
 }
 
 #if TASKQUEUE_STATS
-void
-ParScanThreadState::reset_stats()
-{
+void ParScanThreadState::reset_stats() {
   taskqueue_stats().reset();
   _term_attempts = 0;
   _overflow_refills = 0;
   _overflow_refill_objs = 0;
 }
 
-void ParScanThreadStateSet::reset_stats()
-{
+void ParScanThreadStateSet::reset_stats() {
   for (int i = 0; i < length(); ++i) {
     thread_state(i).reset_stats();
   }
 }
 
-void
-ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
-{
+void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
   st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- "
-                   "-------termination-------");
-  st->print_raw_cr("thr     ms        ms       %   "
-                   "    ms       %   attempts");
-  st->print_raw_cr("--- --------- --------- ------ "
-                   "--------- ------ --------");
+  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
+  st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
+  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
 }
 
-void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
-{
+void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
   print_termination_stats_hdr(st);
 
   for (int i = 0; i < length(); ++i) {
@@ -405,23 +398,20 @@
     const double elapsed_ms = pss.elapsed_time() * 1000.0;
     const double s_roots_ms = pss.strong_roots_time() * 1000.0;
     const double term_ms = pss.term_time() * 1000.0;
-    st->print_cr("%3d %9.2f %9.2f %6.2f "
-                 "%9.2f %6.2f " SIZE_FORMAT_W(8),
+    st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
                  i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
                  term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
   }
 }
 
 // Print stats related to work queue activity.
-void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
-{
+void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
   st->print_raw_cr("GC Task Stats");
   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 }
 
-void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
-{
+void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
   print_taskqueue_stats_hdr(st);
 
   TaskQueueStats totals;
@@ -443,8 +433,7 @@
 }
 #endif // TASKQUEUE_STATS
 
-void ParScanThreadStateSet::flush()
-{
+void ParScanThreadStateSet::flush() {
   // Work in this loop should be kept as lightweight as
   // possible since this might otherwise become a bottleneck
   // to scaling. Should we add heavy-weight work into this
@@ -454,12 +443,12 @@
 
     // Flush stats related to To-space PLAB activity and
     // retire the last buffer.
-    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
+    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
 
     // Every thread has its own age table.  We need to merge
     // them all into one.
     ageTable *local_table = par_scan_state.age_table();
-    _gen.age_table()->merge(local_table);
+    _young_gen.age_table()->merge(local_table);
 
     // Inform old gen that we're done.
     _old_gen.par_promote_alloc_done(i);
@@ -478,8 +467,7 @@
 
 ParScanClosure::ParScanClosure(ParNewGeneration* g,
                                ParScanThreadState* par_scan_state) :
-  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
-{
+  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
   _boundary = _g->reserved().end();
 }
 
@@ -531,24 +519,23 @@
   ObjToScanQueue* work_q = par_scan_state()->work_queue();
 
   while (true) {
-
     // Scan to-space and old-gen objs until we run out of both.
     oop obj_to_scan;
     par_scan_state()->trim_queues(0);
 
     // We have no local work, attempt to steal from other threads.
 
-    // attempt to steal work from promoted.
+    // Attempt to steal work from promoted.
     if (task_queues()->steal(par_scan_state()->thread_num(),
                              par_scan_state()->hash_seed(),
                              obj_to_scan)) {
       bool res = work_q->push(obj_to_scan);
       assert(res, "Empty queue should have room for a push.");
 
-      //   if successful, goto Start.
+      // If successful, goto Start.
       continue;
 
-      // try global overflow list.
+      // Try global overflow list.
     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
       continue;
     }
@@ -564,15 +551,17 @@
   par_scan_state()->end_term_time();
 }
 
-ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
-                             HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
+ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
+                             Generation* old_gen,
+                             HeapWord* young_old_boundary,
+                             ParScanThreadStateSet* state_set,
                              StrongRootsScope* strong_roots_scope) :
     AbstractGangTask("ParNewGeneration collection"),
     _young_gen(young_gen), _old_gen(old_gen),
     _young_old_boundary(young_old_boundary),
     _state_set(state_set),
     _strong_roots_scope(strong_roots_scope)
-  {}
+{}
 
 void ParNewGenTask::work(uint worker_id) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -595,8 +584,7 @@
   par_scan_state.start_strong_roots();
   gch->gen_process_roots(_strong_roots_scope,
                          GenCollectedHeap::YoungGen,
-                         true,  // Process younger gens, if any,
-                                // as strong roots.
+                         true,  // Process younger gens, if any, as strong roots.
                          GenCollectedHeap::SO_ScavengeCodeCache,
                          GenCollectedHeap::StrongAndWeakRoots,
                          &par_scan_state.to_space_root_closure(),
@@ -613,8 +601,7 @@
 #pragma warning( push )
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif
-ParNewGeneration::
-ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
+ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
   _overflow_list(NULL),
   _is_alive_closure(this),
@@ -625,20 +612,19 @@
   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 
-  for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
+  for (uint i = 0; i < ParallelGCThreads; i++) {
     ObjToScanQueue *q = new ObjToScanQueue();
     guarantee(q != NULL, "work_queue Allocation failure.");
-    _task_queues->register_queue(i1, q);
+    _task_queues->register_queue(i, q);
   }
 
-  for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
-    _task_queues->queue(i2)->initialize();
+  for (uint i = 0; i < ParallelGCThreads; i++) {
+    _task_queues->queue(i)->initialize();
+  }
 
   _overflow_stacks = NULL;
   if (ParGCUseLocalOverflow) {
-
-    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
-    // with ','
+    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
     typedef Stack<oop, mtGC> GCOopStack;
 
     _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
@@ -742,7 +728,7 @@
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 public:
   ParNewRefProcTaskProxy(ProcessTask& task,
-                         ParNewGeneration& gen,
+                         ParNewGeneration& young_gen,
                          Generation& old_gen,
                          HeapWord* young_old_boundary,
                          ParScanThreadStateSet& state_set);
@@ -768,11 +754,9 @@
     _old_gen(old_gen),
     _young_old_boundary(young_old_boundary),
     _state_set(state_set)
-{
-}
+{ }
 
-void ParNewRefProcTaskProxy::work(uint worker_id)
-{
+void ParNewRefProcTaskProxy::work(uint worker_id) {
   ResourceMark rm;
   HandleMark hm;
   ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
@@ -792,15 +776,12 @@
       _task(task)
   { }
 
-  virtual void work(uint worker_id)
-  {
+  virtual void work(uint worker_id) {
     _task.work(worker_id);
   }
 };
 
-
-void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
-{
+void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
@@ -812,8 +793,7 @@
                    _young_gen.promotion_failed());
 }
 
-void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
-{
+void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
@@ -821,8 +801,7 @@
   workers->run_task(&enq_task);
 }
 
-void ParNewRefProcTaskExecutor::set_single_threaded_mode()
-{
+void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
   _state_set.flush();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   gch->save_marks();
@@ -830,7 +809,8 @@
 
 ScanClosureWithParBarrier::
 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
-  ScanClosure(g, gc_barrier) {}
+  ScanClosure(g, gc_barrier)
+{ }
 
 EvacuateFollowersClosureGeneral::
 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
@@ -838,7 +818,7 @@
                                 OopsInGenClosure* older) :
   _gch(gch),
   _scan_cur_or_nonheap(cur), _scan_older(older)
-{}
+{ }
 
 void EvacuateFollowersClosureGeneral::do_void() {
   do {
@@ -850,7 +830,6 @@
   } while (!_gch->no_allocs_since_save_marks());
 }
 
-
 // A Generation that does parallel young-gen collection.
 
 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
@@ -996,9 +975,9 @@
     if (ZapUnusedHeapArea) {
       // This is now done here because of the piece-meal mangling which
       // can check for valid mangling at intermediate points in the
-      // collection(s).  When a minor collection fails to collect
+      // collection(s).  When a young collection fails to collect
       // sufficient space resizing of the young generation can occur
-      // an redistribute the spaces in the young generation.  Mangle
+      // and redistribute the spaces in the young generation.  Mangle
       // here so that unzapped regions don't get distributed to
       // other spaces.
       to()->mangle_unused_area();
@@ -1113,8 +1092,10 @@
 // thus avoiding the need to undo the copy as in
 // copy_to_survivor_space_avoiding_with_undo.
 
-oop ParNewGeneration::copy_to_survivor_space(
-        ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
+oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
+                                             oop old,
+                                             size_t sz,
+                                             markOop m) {
   // In the sequential version, this assert also says that the object is
   // not forwarded.  That might not be the case here.  It is the case that
   // the caller observed it to be not forwarded at some time in the past.
@@ -1141,8 +1122,7 @@
   }
 
   if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote
-    // try allocating obj tenured
+    // Either to-space is full or we decided to promote try allocating obj tenured
 
     // Attempt to install a null forwarding pointer (atomically),
     // to claim the right to install the real forwarding pointer.
--- a/src/share/vm/gc/cms/parNewGeneration.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/cms/parNewGeneration.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -71,11 +71,7 @@
   ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
   ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
   ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
-  // One of these two will be passed to process_roots, which will
-  // set its generation.  The first is for two-gen configs where the
-  // old gen collects the perm gen; the second is for arbitrary configs.
-  // The second isn't used right now (it used to be used for the train, an
-  // incremental collector) but the declaration has been left as a reminder.
+  // Will be passed to process_roots to set its generation.
   ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
   // This closure will always be bound to the old gen; it will be used
   // in evacuate_followers.
@@ -85,7 +81,6 @@
   ParScanWeakRefClosure                _scan_weak_ref_closure;
   ParKeepAliveClosure                  _keep_alive_closure;
 
-
   Space* _to_space;
   Space* to_space() { return _to_space; }
 
--- a/src/share/vm/gc/g1/concurrentMark.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/concurrentMark.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -1143,7 +1143,7 @@
   while (curr < end) {
     Prefetch::read(curr, interval);
     oop obj = oop(curr);
-    int size = obj->oop_iterate(&cl);
+    int size = obj->oop_iterate_size(&cl);
     assert(size == obj->size(), "sanity");
     curr += size;
   }
--- a/src/share/vm/gc/g1/g1Allocator.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1Allocator.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -367,7 +367,7 @@
   _max = _bottom + HeapRegion::min_region_size_in_words();
 
   // Tell mark-sweep that objects in this region are not to be marked.
-  G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
+  G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
 
   // Since we've modified the old set, call update_sizes.
   _g1h->g1mm()->update_sizes();
--- a/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "gc/g1/g1BlockOffsetTable.hpp"
 #include "gc/g1/heapRegion.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "gc/shared/space.hpp"
 
 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
@@ -68,15 +69,7 @@
   check_index(right, "right index out of range");
   assert(left <= right, "indexes out of order");
   size_t num_cards = right - left + 1;
-  if (UseMemSetInBOT) {
-    memset(&_offset_array[left], offset, num_cards);
-  } else {
-    size_t i = left;
-    const size_t end = i + num_cards;
-    for (; i < end; i++) {
-      _offset_array[i] = offset;
-    }
-  }
+  memset_with_concurrent_readers(&_offset_array[left], offset, num_cards);
 }
 
 // Variant of index_for that does not check the index for validity.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/g1/g1CodeBlobClosure.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/nmethod.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "oops/oop.inline.hpp"
+
+template <typename T>
+void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
+  _work->do_oop(p);
+  T oop_or_narrowoop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(oop_or_narrowoop)) {
+    oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+    HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+    assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
+    hr->add_strong_code_root(_nm);
+  }
+}
+
+void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
+  do_oop_work(o);
+}
+
+void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
+  do_oop_work(o);
+}
+
+void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm != NULL) {
+    if (!nm->test_set_oops_do_mark()) {
+      _oc.set_nm(nm);
+      nm->oops_do(&_oc);
+      nm->fix_oop_relocations();
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/g1/g1CodeBlobClosure.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "memory/iterator.hpp"
+
+class nmethod;
+
+class G1CodeBlobClosure : public CodeBlobClosure {
+  class HeapRegionGatheringOopClosure : public OopClosure {
+    G1CollectedHeap* _g1h;
+    OopClosure* _work;
+    nmethod* _nm;
+
+    template <typename T>
+    void do_oop_work(T* p);
+
+  public:
+    HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
+
+    void do_oop(oop* o);
+    void do_oop(narrowOop* o);
+
+    void set_nm(nmethod* nm) {
+      _nm = nm;
+    }
+  };
+
+  HeapRegionGatheringOopClosure _oc;
+public:
+  G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+
+  void do_code_blob(CodeBlob* cb);
+};
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -65,6 +65,7 @@
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
+#include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -949,6 +950,7 @@
 }
 
 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
@@ -1037,12 +1039,13 @@
     }
 
     // Notify mark-sweep of the archive range.
-    G1MarkSweep::mark_range_archive(curr_range);
+    G1MarkSweep::set_range_archive(curr_range, true);
   }
   return true;
 }
 
 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MemRegion reserved = _hrm.reserved();
@@ -1125,6 +1128,81 @@
   return result;
 }
 
+void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MemRegion reserved = _hrm.reserved();
+  HeapWord* prev_last_addr = NULL;
+  HeapRegion* prev_last_region = NULL;
+  size_t size_used = 0;
+  size_t uncommitted_regions = 0;
+
+  // For each Memregion, free the G1 regions that constitute it, and
+  // notify mark-sweep that the range is no longer to be considered 'archive.'
+  MutexLockerEx x(Heap_lock);
+  for (size_t i = 0; i < count; i++) {
+    HeapWord* start_address = ranges[i].start();
+    HeapWord* last_address = ranges[i].last();
+
+    assert(reserved.contains(start_address) && reserved.contains(last_address),
+           err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
+                   p2i(start_address), p2i(last_address)));
+    assert(start_address > prev_last_addr,
+           err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
+                   p2i(start_address), p2i(prev_last_addr)));
+    size_used += ranges[i].byte_size();
+    prev_last_addr = last_address;
+
+    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+
+    // Check for ranges that start in the same G1 region in which the previous
+    // range ended, and adjust the start address so we don't try to free
+    // the same region again. If the current range is entirely within that
+    // region, skip it.
+    if (start_region == prev_last_region) {
+      start_address = start_region->end();
+      if (start_address > last_address) {
+        continue;
+      }
+      start_region = _hrm.addr_to_region(start_address);
+    }
+    prev_last_region = last_region;
+
+    // After verifying that each region was marked as an archive region by
+    // alloc_archive_regions, set it free and empty and uncommit it.
+    HeapRegion* curr_region = start_region;
+    while (curr_region != NULL) {
+      guarantee(curr_region->is_archive(),
+                err_msg("Expected archive region at index %u", curr_region->hrm_index()));
+      uint curr_index = curr_region->hrm_index();
+      _old_set.remove(curr_region);
+      curr_region->set_free();
+      curr_region->set_top(curr_region->bottom());
+      if (curr_region != last_region) {
+        curr_region = _hrm.next_region_in_heap(curr_region);
+      } else {
+        curr_region = NULL;
+      }
+      _hrm.shrink_at(curr_index, 1);
+      uncommitted_regions++;
+    }
+
+    // Notify mark-sweep that this is no longer an archive range.
+    G1MarkSweep::set_range_archive(ranges[i], false);
+  }
+
+  if (uncommitted_regions != 0) {
+    ergo_verbose1(ErgoHeapSizing,
+                  "attempt heap shrinking",
+                  ergo_format_reason("uncommitted archive regions")
+                  ergo_format_byte("total size"),
+                  HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
+  }
+  decrease_used(size_used);
+}
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
@@ -2845,9 +2923,9 @@
 }
 
 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
-// must be smaller than the humongous object limit.
+// must be equal to the humongous object limit.
 size_t G1CollectedHeap::max_tlab_size() const {
-  return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
+  return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
@@ -4051,7 +4129,9 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
+        g1_policy()->finalize_cset(target_pause_time_ms);
+
+        evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
 
         register_humongous_regions_with_cset();
 
@@ -4175,7 +4255,10 @@
         // investigate this in CR 7178365.
         double sample_end_time_sec = os::elapsedTime();
         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
-        g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
+        g1_policy()->record_collection_pause_end(pause_time_ms);
+
+        evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
+        evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
 
         MemoryService::track_memory_usage();
 
@@ -4501,8 +4584,7 @@
                  bool only_young, bool claim)
         : _oop_closure(oop_closure),
           _oop_in_klass_closure(oop_closure->g1(),
-                                oop_closure->pss(),
-                                oop_closure->rp()),
+                                oop_closure->pss()),
           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
           _claim(claim) {
 
@@ -4531,18 +4613,18 @@
       bool only_young = _g1h->collector_state()->gcs_are_young();
 
       // Non-IM young GC.
-      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
                                                                                only_young, // Only process dirty klasses.
                                                                                false);     // No need to claim CLDs.
       // IM young GC.
       //    Strong roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
                                                                                false, // Process all klasses.
                                                                                true); // Need to claim CLDs.
       //    Weak roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
                                                                                     false, // Process all klasses.
                                                                                     true); // Need to claim CLDs.
@@ -4582,9 +4664,9 @@
                                       worker_id);
 
       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
-      _root_processor->scan_remembered_sets(&push_heap_rs_cl,
-                                            weak_root_cl,
-                                            worker_id);
+      _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
+                                                      weak_root_cl,
+                                                      worker_id);
       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
 
       double term_sec = 0.0;
@@ -5241,9 +5323,9 @@
     G1ParScanThreadState*           pss = _pss[worker_id];
     pss->set_ref_processor(NULL);
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
@@ -5341,9 +5423,9 @@
     pss->set_ref_processor(NULL);
     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
@@ -5451,9 +5533,9 @@
   // closures while we're actually processing the discovered
   // reference objects.
 
-  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss, NULL);
-
-  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
+  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss);
+
+  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
 
   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
--- a/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -757,6 +757,12 @@
   // alloc_archive_regions, and after class loading has occurred.
   void fill_archive_regions(MemRegion* range, size_t count);
 
+  // For each of the specified MemRegions, uncommit the containing G1 regions
+  // which had been allocated by alloc_archive_regions. This should be called
+  // rather than fill_archive_regions at JVM init time if the archive file
+  // mapping failed, with the same non-overlapping and sorted MemRegion array.
+  void dealloc_archive_regions(MemRegion* range, size_t count);
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
--- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -181,15 +181,6 @@
     G1ErgoVerbose::set_enabled(false);
   }
 
-  // Verify PLAB sizes
-  const size_t region_size = HeapRegion::GrainWords;
-  if (YoungPLABSize > region_size || OldPLABSize > region_size) {
-    char buffer[128];
-    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most " SIZE_FORMAT,
-                 OldPLABSize > region_size ? "Old" : "Young", region_size);
-    vm_exit_during_initialization(buffer);
-  }
-
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
@@ -932,7 +923,7 @@
 // Anything below that is considered to be zero
 #define MIN_TIMER_GRANULARITY 0.0000001
 
-void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
+void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
   double end_time_sec = os::elapsedTime();
   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
          "otherwise, the subtraction below does not make sense");
@@ -964,9 +955,6 @@
   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
                           end_time_sec, _g1->gc_tracer_stw()->gc_id());
 
-  evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
-  evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
-
   if (update_stats) {
     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
     // this is where we update the allocation rate of the application
@@ -1883,7 +1871,7 @@
 }
 
 
-void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
+void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   double young_start_time_sec = os::elapsedTime();
 
   YoungList* young_list = _g1->young_list();
@@ -2093,7 +2081,6 @@
 
   double non_young_end_time_sec = os::elapsedTime();
   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
-  evacuation_info.set_collectionset_regions(cset_region_length());
 }
 
 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
--- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -604,10 +604,6 @@
 
   virtual G1CollectorPolicy* as_g1_policy() { return this; }
 
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::G1CollectorPolicyKind;
-  }
-
   G1CollectorState* collector_state();
 
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
@@ -634,13 +630,11 @@
   virtual HeapWord* satisfy_failed_allocation(size_t size,
                                               bool is_tlab);
 
-  BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
-
   bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
 
   // Record the start and end of an evacuation pause.
   void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
+  void record_collection_pause_end(double pause_time_ms);
 
   // Record the start and end of a full collection.
   void record_full_collection_start();
@@ -682,6 +676,10 @@
     return _bytes_copied_during_gc;
   }
 
+  size_t collection_set_bytes_used_before() const {
+    return _collection_set_bytes_used_before;
+  }
+
   // Determine whether there are candidate regions so that the
   // next GC should be mixed. The two action strings are used
   // in the ergo output when the method returns true or false.
@@ -691,7 +689,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
+  void finalize_cset(double target_pause_time_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
--- a/src/share/vm/gc/g1/g1EvacStats.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1EvacStats.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -54,17 +54,46 @@
                      _allocated, _wasted, _region_end_waste, _unused, used()));
       _allocated = 1;
     }
-    // We account region end waste fully to PLAB allocation. This is not completely fair,
-    // but is a conservative assumption because PLABs may be sized flexibly while we
-    // cannot adjust direct allocations.
-    // In some cases, wasted_frac may become > 1 but that just reflects the problem
-    // with region_end_waste.
-    double wasted_frac    = (double)(_unused + _wasted + _region_end_waste) / (double)_allocated;
-    size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
-    if (target_refills == 0) {
-      target_refills = 1;
-    }
-    size_t cur_plab_sz = used() / target_refills;
+    // The size of the PLAB caps the amount of space that can be wasted at the
+    // end of the collection. In the worst case the last PLAB could be completely
+    // empty.
+    // This allows us to calculate the new PLAB size to achieve the
+    // TargetPLABWastePct given the latest memory usage and that the last buffer
+    // will be G1LastPLABAverageOccupancy full.
+    //
+    // E.g. assume that if in the current GC 100 words were allocated and a
+    // TargetPLABWastePct of 10 had been set.
+    //
+    // So we could waste up to 10 words to meet that percentage. Given that we
+    // also assume that that buffer is typically half-full, the new desired PLAB
+    // size is set to 20 words.
+    //
+    // The amount of allocation performed should be independent of the number of
+    // threads, so should the maximum waste we can spend in total. So if
+    // we used n threads to allocate, each of them can spend maximum waste/n words in
+    // a first rough approximation. The number of threads only comes into play later
+    // when actually retrieving the actual desired PLAB size.
+    //
+    // After calculating this optimal PLAB size the algorithm applies the usual
+    // exponential decaying average over this value to guess the next PLAB size.
+    //
+    // We account region end waste fully to PLAB allocation (in the calculation of
+    // what we consider as "used_for_waste_calculation" below). This is not
+    // completely fair, but is a conservative assumption because PLABs may be sized
+    // flexibly while we cannot adjust inline allocations.
+    // Allocation during GC will try to minimize region end waste so this impact
+    // should be minimal.
+    //
+    // We need to cover overflow when calculating the amount of space actually used
+    // by objects in PLABs when subtracting the region end waste.
+    // Region end waste may be higher than actual allocation. This may occur if many
+    // threads do not allocate anything but a few rather large objects. In this
+    // degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
+    // which is an okay reaction.
+    size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
+
+    size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
+    size_t const cur_plab_sz = (double)total_waste_allowed / G1LastPLABAverageOccupancy;
     // Take historical weighted average
     _filter.sample(cur_plab_sz);
     // Clip from above and below, and align to object boundary
--- a/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -74,7 +74,7 @@
   assert(rp != NULL, "should be non-NULL");
   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
 
-  GenMarkSweep::_ref_processor = rp;
+  GenMarkSweep::set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
   // When collecting the permanent generation Method*s may be moving,
@@ -108,7 +108,7 @@
   JvmtiExport::gc_epilogue();
 
   // refs processing: clean slate
-  GenMarkSweep::_ref_processor = NULL;
+  GenMarkSweep::set_ref_processor(NULL);
 }
 
 
@@ -310,9 +310,9 @@
                                  HeapRegion::GrainBytes);
 }
 
-void G1MarkSweep::mark_range_archive(MemRegion range) {
+void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
   assert(_archive_check_enabled, "archive range check not enabled");
-  _archive_region_map.set_by_address(range, true);
+  _archive_region_map.set_by_address(range, is_archive);
 }
 
 bool G1MarkSweep::in_archive_range(oop object) {
--- a/src/share/vm/gc/g1/g1MarkSweep.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1MarkSweep.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -58,8 +58,8 @@
   // Create the _archive_region_map which is used to identify archive objects.
   static void enable_archive_object_check();
 
-  // Mark the regions containing the specified address range as archive regions.
-  static void mark_range_archive(MemRegion range);
+  // Set the regions containing the specified address range as archive/non-archive.
+  static void set_range_archive(MemRegion range, bool is_archive);
 
   // Check if an object is in an archive region using the _archive_region_map.
   static bool in_archive_range(oop object);
--- a/src/share/vm/gc/g1/g1OopClosures.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1OopClosures.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -125,8 +125,7 @@
   template <class T> void do_oop_work(T* p);
 
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
-                   ReferenceProcessor* rp) :
+  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
       G1ParCopyHelper(g1, par_scan_state) {
     assert(_ref_processor == NULL, "sanity");
   }
@@ -141,7 +140,6 @@
 
   G1CollectedHeap*      g1()  { return _g1; };
   G1ParScanThreadState* pss() { return _par_scan_state; }
-  ReferenceProcessor*   rp()  { return _ref_processor; };
 };
 
 typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
--- a/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -186,6 +186,21 @@
   return dest(state);
 }
 
+void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
+                                                  oop const old, size_t word_sz, uint age,
+                                                  HeapWord * const obj_ptr,
+                                                  const AllocationContext_t context) const {
+  G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
+  if (alloc_buf->contains(obj_ptr)) {
+    _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
+                                                             dest_state.value() == InCSetState::Old,
+                                                             alloc_buf->word_sz());
+  } else {
+    _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
+                                                              dest_state.value() == InCSetState::Old);
+  }
+}
+
 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
                                                  oop const old,
                                                  markOop const old_mark) {
@@ -219,6 +234,10 @@
         return handle_evacuation_failure_par(old, old_mark);
       }
     }
+    if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
+      // The events are checked individually as part of the actual commit
+      report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
+    }
   }
 
   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
--- a/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -173,6 +173,10 @@
                                   bool previous_plab_refill_failed);
 
   inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
+
+  void report_promotion_event(InCSetState const dest_state,
+                              oop const old, size_t word_sz, uint age,
+                              HeapWord * const obj_ptr, const AllocationContext_t context) const;
  public:
 
   oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
--- a/src/share/vm/gc/g1/g1RemSet.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1RemSet.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -26,6 +26,7 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -228,12 +229,15 @@
 };
 
 void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
-                      CodeBlobClosure* code_root_cl,
+                      OopClosure* non_heap_roots,
                       uint worker_i) {
   double rs_time_start = os::elapsedTime();
+
+  G1CodeBlobClosure code_root_cl(non_heap_roots);
+
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
-  ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
+  ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
 
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
@@ -295,7 +299,7 @@
 }
 
 void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
-                                           CodeBlobClosure* code_root_cl,
+                                           OopClosure* non_heap_roots,
                                            uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
@@ -318,7 +322,7 @@
   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
 
   updateRS(&into_cset_dcq, worker_i);
-  scanRS(oc, code_root_cl, worker_i);
+  scanRS(oc, non_heap_roots, worker_i);
 
   // We now clear the cached values of _cset_rs_update_cl for this worker
   _cset_rs_update_cl[worker_i] = NULL;
--- a/src/share/vm/gc/g1/g1RemSet.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1RemSet.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -85,7 +85,7 @@
   // invoked "blk->set_region" to set the "from" region correctly
   // beforehand.)
   //
-  // Invoke code_root_cl->do_code_blob on the unmarked nmethods
+  // Apply non_heap_roots on the oops of the unmarked nmethods
   // on the strong code roots list for each region in the
   // collection set.
   //
@@ -95,7 +95,7 @@
   // the "i" passed to the calling thread's work(i) function.
   // In the sequential case this param will be ignored.
   void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
-                                   CodeBlobClosure* code_root_cl,
+                                   OopClosure* non_heap_roots,
                                    uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
@@ -107,7 +107,7 @@
   void cleanup_after_oops_into_collection_set_do();
 
   void scanRS(G1ParPushHeapRSClosure* oc,
-              CodeBlobClosure* code_root_cl,
+              OopClosure* non_heap_roots,
               uint worker_i);
 
   void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
--- a/src/share/vm/gc/g1/g1RootProcessor.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1RootProcessor.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -28,6 +28,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
@@ -40,57 +41,6 @@
 #include "runtime/mutex.hpp"
 #include "services/management.hpp"
 
-class G1CodeBlobClosure : public CodeBlobClosure {
-  class HeapRegionGatheringOopClosure : public OopClosure {
-    G1CollectedHeap* _g1h;
-    OopClosure* _work;
-    nmethod* _nm;
-
-    template <typename T>
-    void do_oop_work(T* p) {
-      _work->do_oop(p);
-      T oop_or_narrowoop = oopDesc::load_heap_oop(p);
-      if (!oopDesc::is_null(oop_or_narrowoop)) {
-        oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
-        HeapRegion* hr = _g1h->heap_region_containing_raw(o);
-        assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
-        hr->add_strong_code_root(_nm);
-      }
-    }
-
-  public:
-    HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
-
-    void do_oop(oop* o) {
-      do_oop_work(o);
-    }
-
-    void do_oop(narrowOop* o) {
-      do_oop_work(o);
-    }
-
-    void set_nm(nmethod* nm) {
-      _nm = nm;
-    }
-  };
-
-  HeapRegionGatheringOopClosure _oc;
-public:
-  G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
-
-  void do_code_blob(CodeBlob* cb) {
-    nmethod* nm = cb->as_nmethod_or_null();
-    if (nm != NULL) {
-      if (!nm->test_set_oops_do_mark()) {
-        _oc.set_nm(nm);
-        nm->oops_do(&_oc);
-        nm->fix_oop_relocations();
-      }
-    }
-  }
-};
-
-
 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
 
@@ -321,14 +271,6 @@
   }
 }
 
-void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
-                                           OopClosure* scan_non_heap_weak_roots,
-                                           uint worker_i) {
-  G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
-
-  _g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
-}
-
 uint G1RootProcessor::n_workers() const {
   return _srs.n_threads();
 }
--- a/src/share/vm/gc/g1/g1RootProcessor.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1RootProcessor.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -107,13 +107,6 @@
                          CLDClosure* clds,
                          CodeBlobClosure* blobs);
 
-  // Apply scan_rs to all locations in the union of the remembered sets for all
-  // regions in the collection set
-  // (having done "set_region" to indicate the region in which the root resides),
-  void scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
-                            OopClosure* scan_non_heap_weak_roots,
-                            uint worker_i);
-
   // Number of worker threads used by the root processor.
   uint n_workers() const;
 };
--- a/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -27,6 +27,7 @@
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbQueue.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -108,15 +109,7 @@
   jbyte *const first = byte_for(mr.start());
   jbyte *const last = byte_after(mr.last());
 
-  // Below we may use an explicit loop instead of memset() because on
-  // certain platforms memset() can give concurrent readers phantom zeros.
-  if (UseMemSetInBOT) {
-    memset(first, g1_young_gen, last - first);
-  } else {
-    for (jbyte* i = first; i < last; i++) {
-      *i = g1_young_gen;
-    }
-  }
+  memset_with_concurrent_readers(first, g1_young_gen, last - first);
 }
 
 #ifndef PRODUCT
@@ -207,7 +200,7 @@
   // Otherwise, log it.
   G1SATBCardTableLoggingModRefBS* g1_bs =
     barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
-  g1_bs->write_ref_field_work(field, new_val);
+  g1_bs->write_ref_field_work(field, new_val, false);
 }
 
 void
--- a/src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -147,6 +147,10 @@
  private:
   G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
+
+ protected:
+  virtual void write_ref_field_work(void* field, oop new_val, bool release);
+
  public:
   static size_t compute_size(size_t mem_region_size_in_words) {
     size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
@@ -165,8 +169,6 @@
 
   virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
 
-  void write_ref_field_work(void* field, oop new_val, bool release = false);
-
   // Can be called from static contexts.
   static void write_ref_field_static(void* field, oop new_val);
 
--- a/src/share/vm/gc/g1/g1_globals.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/g1_globals.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -82,6 +82,11 @@
           "If true, enable reference discovery during concurrent "          \
           "marking and reference processing at the end of remark.")         \
                                                                             \
+  experimental(double, G1LastPLABAverageOccupancy, 50.0,                    \
+               "The expected average occupancy of the last PLAB in "        \
+               "percent.")                                                  \
+               range(0.001, 100.0)                                          \
+                                                                            \
   product(size_t, G1SATBBufferSize, 1*K,                                    \
           "Number of entries in an SATB log buffer.")                       \
                                                                             \
--- a/src/share/vm/gc/g1/heapRegion.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/heapRegion.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -68,7 +68,7 @@
   // or it was allocated after marking finished, then we add it. Otherwise
   // we can safely ignore the object.
   if (!g1h->is_obj_dead(oop(cur), _hr)) {
-    oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
+    oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
   } else {
     oop_size = _hr->block_size(cur);
   }
--- a/src/share/vm/gc/g1/heapRegionManager.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/heapRegionManager.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -426,7 +426,7 @@
       (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
     uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
 
-    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+    shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
 
     cur = idx_last_found;
     removed += to_remove;
@@ -437,6 +437,17 @@
   return removed;
 }
 
+void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
+#ifdef ASSERT
+  for (uint i = index; i < (index + num_regions); i++) {
+    assert(is_available(i), err_msg("Expected available region at index %u", i));
+    assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
+    assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
+  }
+#endif
+  uncommit_regions(index, num_regions);
+}
+
 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
   guarantee(start_idx < _allocated_heapregions_length, "checking");
   guarantee(res_idx != NULL, "checking");
--- a/src/share/vm/gc/g1/heapRegionManager.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/heapRegionManager.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -241,6 +241,10 @@
   // Return the actual number of uncommitted regions.
   uint shrink_by(uint num_regions_to_remove);
 
+  // Uncommit a number of regions starting at the specified index, which must be available,
+  // empty, and free.
+  void shrink_at(uint index, size_t num_regions);
+
   void verify();
 
   // Do some sanity checking.
--- a/src/share/vm/gc/g1/heapRegionType.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/g1/heapRegionType.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -35,7 +35,7 @@
   // We encode the value of the heap region type so the generation can be
   // determined quickly. The tag is split into two parts:
   //
-  //   major type (young, humongous)                         : top N-1 bits
+  //   major type (young, old, humongous, archive)           : top N-1 bits
   //   minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
   //
   // If there's need to increase the number of minor types in the
--- a/src/share/vm/gc/parallel/cardTableExtension.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/cardTableExtension.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -89,7 +89,7 @@
     CheckForUnmarkedOops object_check(_young_gen, _card_table);
     obj->oop_iterate_no_header(&object_check);
     if (object_check.has_unmarked_oop()) {
-      assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
+      guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
     }
   }
 };
--- a/src/share/vm/gc/parallel/cardTableExtension.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/cardTableExtension.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -56,13 +56,7 @@
   CardTableExtension(MemRegion whole_heap) :
     CardTableModRefBS(
       whole_heap,
-      // Concrete tag should be BarrierSet::CardTableExtension.
-      // That will presently break things in a bunch of places though.
-      // The concrete tag is used as a dispatch key in many places, and
-      // CardTableExtension does not correctly dispatch in some of those
-      // uses. This will be addressed as part of a reorganization of the
-      // BarrierSet hierarchy.
-      BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
+      BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
     { }
 
   // Scavenge support
--- a/src/share/vm/gc/parallel/immutableSpace.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/immutableSpace.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -44,7 +44,7 @@
   HeapWord* t = end();
   // Could call objects iterate, but this is easier.
   while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(cl);
+    obj_addr += oop(obj_addr)->oop_iterate_size(cl);
   }
 }
 
--- a/src/share/vm/gc/parallel/mutableSpace.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/mutableSpace.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -213,15 +213,6 @@
   return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
 }
 
-void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
-  HeapWord* obj_addr = bottom();
-  HeapWord* t = top();
-  // Could call objects iterate, but this is easier.
-  while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(cl);
-  }
-}
-
 void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
   HeapWord* obj_addr = bottom();
   HeapWord* t = top();
--- a/src/share/vm/gc/parallel/mutableSpace.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/mutableSpace.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -134,7 +134,6 @@
   bool cas_deallocate(HeapWord *obj, size_t size);
 
   // Iteration.
-  void oop_iterate(ExtendedOopClosure* cl);
   void oop_iterate_no_header(OopClosure* cl);
   void object_iterate(ObjectClosure* cl);
 
--- a/src/share/vm/gc/parallel/parallelScavengeHeap.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/parallelScavengeHeap.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -30,26 +30,22 @@
 #include "gc/parallel/psParallelCompact.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
-inline size_t ParallelScavengeHeap::total_invocations()
-{
+inline size_t ParallelScavengeHeap::total_invocations() {
   return UseParallelOldGC ? PSParallelCompact::total_invocations() :
     PSMarkSweep::total_invocations();
 }
 
-inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
-{
+inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
   const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
   return size < eden_size / 2;
 }
 
-inline void ParallelScavengeHeap::invoke_scavenge()
-{
+inline void ParallelScavengeHeap::invoke_scavenge() {
   PSScavenge::invoke();
 }
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
   // Assumes the the old gen address range is lower than that of the young gen.
-  const void* loc = (void*) p;
   bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
   assert(result == young_gen()->is_in_reserved(p),
         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
--- a/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -299,7 +299,7 @@
   // subtracted out.
   size_t eden_limit = max_eden_size;
 
-  const double gc_cost_limit = GCTimeLimit/100.0;
+  const double gc_cost_limit = GCTimeLimit / 100.0;
 
   // Which way should we go?
   // if pause requirement is not met
--- a/src/share/vm/gc/parallel/psMarkSweep.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psMarkSweep.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -60,7 +60,7 @@
 
 void PSMarkSweep::initialize() {
   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
-  _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
+  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
   _counters = new CollectorCounters("PSMarkSweep", 1);
 }
 
--- a/src/share/vm/gc/parallel/psOldGen.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psOldGen.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -486,12 +486,12 @@
   object_space()->verify();
 }
 class VerifyObjectStartArrayClosure : public ObjectClosure {
-  PSOldGen* _gen;
+  PSOldGen* _old_gen;
   ObjectStartArray* _start_array;
 
  public:
-  VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
-    _gen(gen), _start_array(start_array) { }
+  VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
+    _old_gen(old_gen), _start_array(start_array) { }
 
   virtual void do_object(oop obj) {
     HeapWord* test_addr = (HeapWord*)obj + 1;
--- a/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -958,7 +958,7 @@
 {
   // Update the from & to space pointers in space_info, since they are swapped
   // at each young gen gc.  Do the update unconditionally (even though a
-  // promotion failure does not swap spaces) because an unknown number of minor
+  // promotion failure does not swap spaces) because an unknown number of young
   // collections will have swapped the spaces an unknown number of times.
   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
--- a/src/share/vm/gc/parallel/psParallelCompact.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psParallelCompact.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -303,7 +303,7 @@
     // completed(), which is desirable since a region must be claimed before it
     // can be completed.
     bool available() const { return _dc_and_los < dc_one; }
-    bool claimed() const   { return _dc_and_los >= dc_claimed; }
+    bool claimed()   const { return _dc_and_los >= dc_claimed; }
     bool completed() const { return _dc_and_los >= dc_completed; }
 
     // These are not atomic.
@@ -979,7 +979,6 @@
   static bool   _dwl_initialized;
 #endif  // #ifdef ASSERT
 
-
  public:
   static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
 
--- a/src/share/vm/gc/parallel/psScavenge.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psScavenge.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -597,9 +597,9 @@
         // to allow resizes that may have been inhibited by the
         // relative location of the "to" and "from" spaces.
 
-        // Resizing the old gen at minor collects can cause increases
+        // Resizing the old gen at young collections can cause increases
         // that don't feed back to the generation sizing policy until
-        // a major collection.  Don't resize the old gen here.
+        // a full collection.  Don't resize the old gen here.
 
         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                         size_policy->calculated_survivor_size_in_bytes());
--- a/src/share/vm/gc/parallel/psTasks.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psTasks.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -172,10 +172,10 @@
 
 void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
   // There are not old-to-young pointers if the old gen is empty.
-  assert(!_gen->object_space()->is_empty(),
+  assert(!_old_gen->object_space()->is_empty(),
     "Should not be called is there is no work");
-  assert(_gen != NULL, "Sanity");
-  assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
+  assert(_old_gen != NULL, "Sanity");
+  assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
   assert(_stripe_number < ParallelGCThreads, "Sanity");
 
   {
@@ -183,8 +183,8 @@
     CardTableExtension* card_table =
       barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
 
-    card_table->scavenge_contents_parallel(_gen->start_array(),
-                                           _gen->object_space(),
+    card_table->scavenge_contents_parallel(_old_gen->start_array(),
+                                           _old_gen->object_space(),
                                            _gen_top,
                                            pm,
                                            _stripe_number,
--- a/src/share/vm/gc/parallel/psTasks.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/parallel/psTasks.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -160,17 +160,17 @@
 
 class OldToYoungRootsTask : public GCTask {
  private:
-  PSOldGen* _gen;
+  PSOldGen* _old_gen;
   HeapWord* _gen_top;
   uint _stripe_number;
   uint _stripe_total;
 
  public:
-  OldToYoungRootsTask(PSOldGen *gen,
+  OldToYoungRootsTask(PSOldGen *old_gen,
                       HeapWord* gen_top,
                       uint stripe_number,
                       uint stripe_total) :
-    _gen(gen),
+    _old_gen(old_gen),
     _gen_top(gen_top),
     _stripe_number(stripe_number),
     _stripe_total(stripe_total) { }
--- a/src/share/vm/gc/serial/defNewGeneration.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/defNewGeneration.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -106,14 +106,14 @@
   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 {
   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
-  _gen = (DefNewGeneration*)_gch->young_gen();
+  _young_gen = (DefNewGeneration*)_gch->young_gen();
 }
 
 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
   do {
     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
   } while (!_gch->no_allocs_since_save_marks());
-  guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
+  guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 }
 
 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
@@ -200,8 +200,9 @@
   _from_space = new ContiguousSpace();
   _to_space   = new ContiguousSpace();
 
-  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
+  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
     vm_exit_during_initialization("Could not allocate a new gen space");
+  }
 
   // Compute the maximum eden and survivor space sizes. These sizes
   // are computed assuming the entire reserved space is committed.
@@ -655,7 +656,7 @@
     if (ZapUnusedHeapArea) {
       // This is now done here because of the piece-meal mangling which
       // can check for valid mangling at intermediate points in the
-      // collection(s).  When a minor collection fails to collect
+      // collection(s).  When a young collection fails to collect
       // sufficient space resizing of the young generation can occur
       // an redistribute the spaces in the young generation.  Mangle
       // here so that unzapped regions don't get distributed to
--- a/src/share/vm/gc/serial/defNewGeneration.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/defNewGeneration.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -193,7 +193,7 @@
 
   class FastEvacuateFollowersClosure: public VoidClosure {
     GenCollectedHeap* _gch;
-    DefNewGeneration* _gen;
+    DefNewGeneration* _young_gen;
     FastScanClosure* _scan_cur_or_nonheap;
     FastScanClosure* _scan_older;
   public:
--- a/src/share/vm/gc/serial/defNewGeneration.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/defNewGeneration.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -57,8 +57,8 @@
   // each generation, allowing them in turn to examine the modified
   // field.
   //
-  // We could check that p is also in an older generation, but
-  // dirty cards in the youngest gen are never scanned, so the
+  // We could check that p is also in the old generation, but
+  // dirty cards in the young gen are never scanned, so the
   // extra check probably isn't worthwhile.
   if (GenCollectedHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
--- a/src/share/vm/gc/serial/genMarkSweep.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/genMarkSweep.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -67,7 +67,7 @@
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
-  _ref_processor = rp;
+  set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
   GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
@@ -136,7 +136,7 @@
   }
 
   // refs processing: clean slate
-  _ref_processor = NULL;
+  set_ref_processor(NULL);
 
   // Update heap occupancy information which is used as
   // input to soft ref clearing policy at the next gc.
--- a/src/share/vm/gc/serial/markSweep.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/markSweep.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -28,11 +28,20 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
+#include "gc/shared/specialized_oop_closures.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/instanceClassLoaderKlass.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
+#include "oops/instanceRefKlass.inline.hpp"
 #include "oops/methodData.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/stack.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1StringDedup.hpp"
+#endif // INCLUDE_ALL_GCS
 
 uint                    MarkSweep::_total_invocations = 0;
 
@@ -50,176 +59,101 @@
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
 
-void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
-void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
-
-MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
+MarkAndPushClosure            MarkSweep::mark_and_push_closure;
 CLDToOopClosure               MarkSweep::follow_cld_closure(&mark_and_push_closure);
 CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
 
-template <typename T>
-void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p)       { mark_and_push(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(oop* p)        { do_oop_nv(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p)  { do_oop_nv(p); }
+inline void MarkSweep::mark_object(oop obj) {
+#if INCLUDE_ALL_GCS
+  if (G1StringDedup::is_enabled()) {
+    // We must enqueue the object before it is marked
+    // as we otherwise can't read the object's age.
+    G1StringDedup::enqueue_from_mark(obj);
+  }
+#endif
+  // some marks may contain information we need to preserve so we store them away
+  // and overwrite the mark.  We'll restore it at the end of markSweep.
+  markOop mark = obj->mark();
+  obj->set_mark(markOopDesc::prototype()->set_marked());
 
-void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
+  if (mark->must_be_preserved(obj)) {
+    preserve_mark(obj, mark);
+  }
+}
+
+template <class T> inline void MarkSweep::mark_and_push(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
+      mark_object(obj);
+      _marking_stack.push(obj);
+    }
+  }
+}
+
+inline void MarkSweep::follow_klass(Klass* klass) {
+  oop op = klass->klass_holder();
+  MarkSweep::mark_and_push(&op);
+}
+
+inline void MarkSweep::follow_cld(ClassLoaderData* cld) {
   MarkSweep::follow_cld_closure.do_cld(cld);
 }
 
-void InstanceKlass::oop_ms_follow_contents(oop obj) {
-  assert(obj != NULL, "can't follow the content of NULL object");
-  MarkSweep::follow_klass(this);
+template <typename T>
+inline void MarkAndPushClosure::do_oop_nv(T* p)                 { MarkSweep::mark_and_push(p); }
+void MarkAndPushClosure::do_oop(oop* p)                         { do_oop_nv(p); }
+void MarkAndPushClosure::do_oop(narrowOop* p)                   { do_oop_nv(p); }
+inline bool MarkAndPushClosure::do_metadata_nv()                { return true; }
+bool MarkAndPushClosure::do_metadata()                          { return do_metadata_nv(); }
+inline void MarkAndPushClosure::do_klass_nv(Klass* k)           { MarkSweep::follow_klass(k); }
+void MarkAndPushClosure::do_klass(Klass* k)                     { do_klass_nv(k); }
+inline void MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) { MarkSweep::follow_cld(cld); }
+void MarkAndPushClosure::do_cld(ClassLoaderData* cld)           { do_cld_nv(cld); }
 
-  oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::mark_and_push_closure);
+template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
+  mark_and_push(p);
 }
 
-void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
-  InstanceKlass::oop_ms_follow_contents(obj);
-
-  // Follow the klass field in the mirror
-  Klass* klass = java_lang_Class::as_Klass(obj);
-  if (klass != NULL) {
-    // An anonymous class doesn't have its own class loader, so the call
-    // to follow_klass will mark and push its java mirror instead of the
-    // class loader. When handling the java mirror for an anonymous class
-    // we need to make sure its class loader data is claimed, this is done
-    // by calling follow_class_loader explicitly. For non-anonymous classes
-    // the call to follow_class_loader is made when the class loader itself
-    // is handled.
-    if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
-      MarkSweep::follow_class_loader(klass->class_loader_data());
-    } else {
-      MarkSweep::follow_klass(klass);
-    }
-  } else {
-    // If klass is NULL then this a mirror for a primitive type.
-    // We don't have to follow them, since they are handled as strong
-    // roots in Universe::oops_do.
-    assert(java_lang_Class::is_primitive(obj), "Sanity check");
-  }
-
-  oop_oop_iterate_statics<true>(obj, &MarkSweep::mark_and_push_closure);
+void MarkSweep::push_objarray(oop obj, size_t index) {
+  ObjArrayTask task(obj, index);
+  assert(task.is_valid(), "bad ObjArrayTask");
+  _objarray_stack.push(task);
 }
 
-void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
-  InstanceKlass::oop_ms_follow_contents(obj);
-
-  ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
-
-  // We must NULL check here, since the class loader
-  // can be found before the loader data has been set up.
-  if(loader_data != NULL) {
-    MarkSweep::follow_class_loader(loader_data);
+inline void MarkSweep::follow_array(objArrayOop array) {
+  MarkSweep::follow_klass(array->klass());
+  // Don't push empty arrays to avoid unnecessary work.
+  if (array->length() > 0) {
+    MarkSweep::push_objarray(array, 0);
   }
 }
 
-template <class T>
-static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
-  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
-  T heap_oop = oopDesc::load_heap_oop(referent_addr);
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
-    }
-  )
-  if (!oopDesc::is_null(heap_oop)) {
-    oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!referent->is_gc_marked() &&
-        MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
-      // reference was discovered, referent will be traversed later
-      klass->InstanceKlass::oop_ms_follow_contents(obj);
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
-        }
-      )
-      return;
-    } else {
-      // treat referent as normal oop
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " PTR_FORMAT, p2i(obj));
-        }
-      )
-      MarkSweep::mark_and_push(referent_addr);
-    }
-  }
-  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
-  // Treat discovered as normal oop, if ref is not "active",
-  // i.e. if next is non-NULL.
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
-    T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
-    debug_only(
-      if(TraceReferenceGC && PrintGCDetails) {
-        gclog_or_tty->print_cr("   Process discovered as normal "
-                               PTR_FORMAT, p2i(discovered_addr));
-      }
-    )
-    MarkSweep::mark_and_push(discovered_addr);
-  }
-  // treat next as normal oop.  next is a link in the reference queue.
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("   Process next as normal " PTR_FORMAT, p2i(next_addr));
-    }
-  )
-  MarkSweep::mark_and_push(next_addr);
-  klass->InstanceKlass::oop_ms_follow_contents(obj);
-}
-
-void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(this, obj);
+inline void MarkSweep::follow_object(oop obj) {
+  assert(obj->is_gc_marked(), "should be marked");
+  if (obj->is_objArray()) {
+    // Handle object arrays explicitly to allow them to
+    // be split into chunks if needed.
+    MarkSweep::follow_array((objArrayOop)obj);
   } else {
-    oop_ms_follow_contents_specialized<oop>(this, obj);
+    obj->oop_iterate(&mark_and_push_closure);
   }
 }
 
-template <class T>
-static void oop_ms_follow_contents_specialized(oop obj, int index) {
-  objArrayOop a = objArrayOop(obj);
-  const size_t len = size_t(a->length());
-  const size_t beg_index = size_t(index);
+void MarkSweep::follow_array_chunk(objArrayOop array, int index) {
+  const int len = array->length();
+  const int beg_index = index;
   assert(beg_index < len || len == 0, "index too large");
 
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
-  const size_t end_index = beg_index + stride;
-  T* const base = (T*)a->base();
-  T* const beg = base + beg_index;
-  T* const end = base + end_index;
+  const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
+  const int end_index = beg_index + stride;
 
-  // Push the non-NULL elements of the next stride on the marking stack.
-  for (T* e = beg; e < end; e++) {
-    MarkSweep::mark_and_push<T>(e);
-  }
+  array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
 
   if (end_index < len) {
-    MarkSweep::push_objarray(a, end_index); // Push the continuation.
-  }
-}
-
-void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
-  assert (obj->is_array(), "obj must be array");
-  MarkSweep::follow_klass(this);
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(obj, 0);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(obj, 0);
-  }
-}
-
-void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
-  assert(obj->is_typeArray(),"must be a type array");
-  // Performance tweak: We skip iterating over the klass pointer since we
-  // know that Universe::TypeArrayKlass never moves.
-}
-
-void MarkSweep::follow_array(objArrayOop array, int index) {
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(array, index);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(array, index);
+    MarkSweep::push_objarray(array, end_index); // Push the continuation.
   }
 }
 
@@ -233,7 +167,7 @@
     // Process ObjArrays one at a time to avoid marking stack bloat.
     if (!_objarray_stack.is_empty()) {
       ObjArrayTask task = _objarray_stack.pop();
-      follow_array(objArrayOop(task.obj()), task.index());
+      follow_array_chunk(objArrayOop(task.obj()), task.index());
     }
   } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
 }
@@ -242,6 +176,24 @@
 
 void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
 
+template <class T> inline void MarkSweep::follow_root(T* p) {
+  assert(!Universe::heap()->is_in_reserved(p),
+         "roots shouldn't be things within the heap");
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
+      mark_object(obj);
+      follow_object(obj);
+    }
+  }
+  follow_stack();
+}
+
+void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
+void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
+
 void PreservedMark::adjust_pointer() {
   MarkSweep::adjust_pointer(&_obj);
 }
@@ -266,6 +218,11 @@
   }
 }
 
+void MarkSweep::set_ref_processor(ReferenceProcessor* rp) {
+  _ref_processor = rp;
+  mark_and_push_closure.set_ref_processor(_ref_processor);
+}
+
 MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
 
 template <typename T>
@@ -405,3 +362,6 @@
   // know that Universe::TypeArrayKlass never moves.
   return t->object_size();
 }
+
+// Generate MS specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- a/src/share/vm/gc/serial/markSweep.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/markSweep.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -49,6 +49,7 @@
 
 // declared at end
 class PreservedMark;
+class MarkAndPushClosure;
 
 class MarkSweep : AllStatic {
   //
@@ -60,13 +61,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  class MarkAndPushClosure: public ExtendedOopClosure {
-   public:
-    template <typename T> void do_oop_nv(T* p);
-    virtual void do_oop(oop* p);
-    virtual void do_oop(narrowOop* p);
-  };
-
   class FollowStackClosure: public VoidClosure {
    public:
     virtual void do_void();
@@ -146,6 +140,7 @@
 
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
+  static void set_ref_processor(ReferenceProcessor* rp);
 
   // Archive Object handling
   static inline bool is_archive_object(oop object);
@@ -153,34 +148,55 @@
   static STWGCTimer* gc_timer() { return _gc_timer; }
   static SerialOldTracer* gc_tracer() { return _gc_tracer; }
 
+  static void preserve_mark(oop p, markOop mark);
+                                // Save the mark word so it can be restored later
+  static void adjust_marks();   // Adjust the pointers in the preserved marks table
+  static void restore_marks();  // Restore the marks that we saved in preserve_mark
+
+  static int adjust_pointers(oop obj);
+
+  static void follow_stack();   // Empty marking stack.
+
+  static void follow_klass(Klass* klass);
+
+  static void follow_cld(ClassLoaderData* cld);
+
+  template <class T> static inline void adjust_pointer(T* p);
+
+  // Check mark and maybe push on marking stack
+  template <class T> static void mark_and_push(T* p);
+
+ private:
   // Call backs for marking
   static void mark_object(oop obj);
   // Mark pointer and follow contents.  Empty marking stack afterwards.
   template <class T> static inline void follow_root(T* p);
 
-  // Check mark and maybe push on marking stack
-  template <class T> static void mark_and_push(T* p);
-
   static inline void push_objarray(oop obj, size_t index);
 
-  static void follow_stack();   // Empty marking stack.
-
   static void follow_object(oop obj);
 
-  static void follow_array(objArrayOop array, int index);
+  static void follow_array(objArrayOop array);
 
-  static void follow_klass(Klass* klass);
+  static void follow_array_chunk(objArrayOop array, int index);
+};
 
-  static void follow_class_loader(ClassLoaderData* cld);
+class MarkAndPushClosure: public ExtendedOopClosure {
+public:
+  template <typename T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 
-  static int adjust_pointers(oop obj);
+  virtual bool do_metadata();
+  bool do_metadata_nv();
 
-  static void preserve_mark(oop p, markOop mark);
-                                // Save the mark word so it can be restored later
-  static void adjust_marks();   // Adjust the pointers in the preserved marks table
-  static void restore_marks();  // Restore the marks that we saved in preserve_mark
+  virtual void do_klass(Klass* k);
+  void do_klass_nv(Klass* k);
 
-  template <class T> static inline void adjust_pointer(T* p);
+  virtual void do_cld(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+
+  void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/src/share/vm/gc/serial/markSweep.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/markSweep.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -26,38 +26,13 @@
 #define SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
 
 #include "gc/serial/markSweep.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "oops/instanceClassLoaderKlass.inline.hpp"
-#include "oops/instanceKlass.inline.hpp"
-#include "oops/instanceMirrorKlass.inline.hpp"
-#include "oops/instanceRefKlass.inline.hpp"
+#include "memory/universe.hpp"
 #include "oops/markOop.inline.hpp"
-#include "oops/objArrayKlass.inline.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/stack.inline.hpp"
+#include "oops/oop.inline.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #endif // INCLUDE_ALL_GCS
 
-inline void MarkSweep::mark_object(oop obj) {
-#if INCLUDE_ALL_GCS
-  if (G1StringDedup::is_enabled()) {
-    // We must enqueue the object before it is marked
-    // as we otherwise can't read the object's age.
-    G1StringDedup::enqueue_from_mark(obj);
-  }
-#endif
-  // some marks may contain information we need to preserve so we store them away
-  // and overwrite the mark.  We'll restore it at the end of markSweep.
-  markOop mark = obj->mark();
-  obj->set_mark(markOopDesc::prototype()->set_marked());
-
-  if (mark->must_be_preserved(obj)) {
-    preserve_mark(obj, mark);
-  }
-}
-
 inline bool MarkSweep::is_archive_object(oop object) {
 #if INCLUDE_ALL_GCS
   return (G1MarkSweep::archive_check_enabled() &&
@@ -67,51 +42,6 @@
 #endif
 }
 
-inline void MarkSweep::follow_klass(Klass* klass) {
-  oop op = klass->klass_holder();
-  MarkSweep::mark_and_push(&op);
-}
-
-inline void MarkSweep::follow_object(oop obj) {
-  assert(obj->is_gc_marked(), "should be marked");
-
-  obj->ms_follow_contents();
-}
-
-template <class T> inline void MarkSweep::follow_root(T* p) {
-  assert(!Universe::heap()->is_in_reserved(p),
-         "roots shouldn't be things within the heap");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_archive_object(obj)) {
-      mark_object(obj);
-      follow_object(obj);
-    }
-  }
-  follow_stack();
-}
-
-template <class T> inline void MarkSweep::mark_and_push(T* p) {
-//  assert(Universe::heap()->is_in_reserved(p), "should be in object space");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_archive_object(obj)) {
-      mark_object(obj);
-      _marking_stack.push(obj);
-    }
-  }
-}
-
-void MarkSweep::push_objarray(oop obj, size_t index) {
-  ObjArrayTask task(obj, index);
-  assert(task.is_valid(), "bad ObjArrayTask");
-  _objarray_stack.push(task);
-}
-
 inline int MarkSweep::adjust_pointers(oop obj) {
   return obj->ms_adjust_pointers();
 }
@@ -139,8 +69,4 @@
   }
 }
 
-template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
-  mark_and_push(p);
-}
-
 #endif // SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
--- a/src/share/vm/gc/serial/tenuredGeneration.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/tenuredGeneration.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -108,7 +108,7 @@
                     free());
     }
   }
-  // If we had to expand to accommodate promotions from younger generations
+  // If we had to expand to accommodate promotions from the young generation
   if (!result && _capacity_at_prologue < capacity()) {
     result = true;
     if (PrintGC && Verbose) {
@@ -140,11 +140,11 @@
   // that are of interest at this point.
   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
   if (!full && current_is_young) {
-    // Calculate size of data promoted from the younger generations
+    // Calculate size of data promoted from the young generation
     // before doing the collection.
     size_t used_before_gc = used();
 
-    // If the younger gen collections were skipped, then the
+    // If the young gen collection was skipped, then the
     // number of promoted bytes will be 0 and adding it to the
     // average will incorrectly lessen the average.  It is, however,
     // also possible that no promotion was needed.
--- a/src/share/vm/gc/serial/tenuredGeneration.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/serial/tenuredGeneration.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -42,10 +42,10 @@
   friend class VM_PopulateDumpSharedSpace;
 
  protected:
-  ContiguousSpace*  _the_space;       // Actual space holding objects
+  ContiguousSpace*    _the_space;       // Actual space holding objects
 
-  GenerationCounters*   _gen_counters;
-  CSpaceCounters*       _space_counters;
+  GenerationCounters* _gen_counters;
+  CSpaceCounters*     _space_counters;
 
   // Allocation failure
   virtual bool expand(size_t bytes, size_t expand_bytes);
@@ -54,6 +54,7 @@
   ContiguousSpace* space() const { return _the_space; }
 
   void assert_correct_size_change_locking();
+
  public:
   TenuredGeneration(ReservedSpace rs,
                     size_t initial_byte_size,
@@ -66,10 +67,9 @@
   const char* short_name() const { return "Tenured"; }
 
   // Does a "full" (forced) collection invoked on this generation collect
-  // all younger generations as well? Note that this is a
-  // hack to allow the collection of the younger gen first if the flag is
-  // set.
-  virtual bool full_collects_younger_generations() const {
+  // the young generation as well? Note that this is a hack to allow the
+  // collection of the young gen first if the flag is set.
+  virtual bool full_collects_young_generation() const {
     return !ScavengeBeforeFullGC;
   }
 
@@ -99,15 +99,16 @@
                        bool clear_all_soft_refs,
                        size_t size,
                        bool is_tlab);
+
   HeapWord* expand_and_allocate(size_t size,
                                 bool is_tlab,
                                 bool parallel = false);
 
   virtual void prepare_for_verify();
 
-
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
+
   bool should_collect(bool   full,
                       size_t word_size,
                       bool   is_tlab);
--- a/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -266,22 +266,22 @@
     }
 
     // The policy does not have enough data until at least some
-    // minor collections have been done.
+    // young collections have been done.
     _young_gen_policy_is_ready =
       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
 
     // Calculate variables used to estimate pause time vs. gen sizes
-    double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
+    double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
     update_minor_pause_young_estimator(minor_pause_in_ms);
     update_minor_pause_old_estimator(minor_pause_in_ms);
 
     if (PrintAdaptiveSizePolicy && Verbose) {
       gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
-        "minor gc cost: %f  average: %f", collection_cost,
-        _avg_minor_gc_cost->average());
+                          "minor gc cost: %f  average: %f", collection_cost,
+                          _avg_minor_gc_cost->average());
       gclog_or_tty->print_cr("  minor pause: %f minor period %f",
-        minor_pause_in_ms,
-        _latest_minor_mutator_interval_seconds * MILLIUNITS);
+                             minor_pause_in_ms,
+                             _latest_minor_mutator_interval_seconds * MILLIUNITS);
     }
 
     // Calculate variable used to estimate collection cost vs. gen sizes
@@ -295,8 +295,7 @@
   _minor_timer.start();
 }
 
-size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
-                                            uint percent_change) {
+size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
   size_t eden_heap_delta;
   eden_heap_delta = cur_eden / 100 * percent_change;
   return eden_heap_delta;
@@ -312,8 +311,7 @@
   return eden_heap_delta;
 }
 
-size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
-                                             uint percent_change) {
+size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
   size_t promo_heap_delta;
   promo_heap_delta = cur_promo / 100 * percent_change;
   return promo_heap_delta;
--- a/src/share/vm/gc/shared/barrierSet.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/barrierSet.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -132,6 +132,9 @@
   // First the pre-write versions...
   template <class T> inline void write_ref_field_pre(T* field, oop new_val);
 private:
+  // Helper for write_ref_field_pre and friends, testing for specialized cases.
+  bool devirtualize_reference_writes() const;
+
   // Keep this private so as to catch violations at build time.
   virtual void write_ref_field_pre_work(     void* field, oop new_val) { guarantee(false, "Not needed"); };
 protected:
@@ -142,7 +145,7 @@
   // ...then the post-write version.
   inline void write_ref_field(void* field, oop new_val, bool release = false);
 protected:
-  virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
+  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
 public:
 
   // Invoke the barrier, if any, necessary when writing the "bytes"-byte
--- a/src/share/vm/gc/shared/barrierSet.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/barrierSet.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -32,8 +32,18 @@
 // performance-critical calls when the barrier is the most common
 // card-table kind.
 
+inline bool BarrierSet::devirtualize_reference_writes() const {
+  switch (kind()) {
+  case CardTableForRS:
+  case CardTableExtension:
+    return true;
+  default:
+    return false;
+  }
+}
+
 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
   } else {
     write_ref_field_pre_work(field, new_val);
@@ -41,7 +51,7 @@
 }
 
 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
   } else {
     write_ref_field_work(field, new_val, release);
@@ -77,7 +87,7 @@
 
 
 inline void BarrierSet::write_region(MemRegion mr) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
   } else {
     write_region_work(mr);
--- a/src/share/vm/gc/shared/blockOffsetTable.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/blockOffsetTable.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -25,9 +25,12 @@
 #ifndef SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
 #define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
 
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/virtualspace.hpp"
+#include "runtime/globals.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 
 // The CollectedHeap type requires subtypes to implement a method
 // "block_start".  For some subtypes, notably generational
@@ -126,6 +129,19 @@
   VirtualSpace _vs;
   u_char* _offset_array;          // byte array keeping backwards offsets
 
+  void fill_range(size_t start, size_t num_cards, u_char offset) {
+    void* start_ptr = &_offset_array[start];
+#if INCLUDE_ALL_GCS
+    // If collector is concurrent, special handling may be needed.
+    assert(!UseG1GC, "Shouldn't be here when using G1");
+    if (UseConcMarkSweepGC) {
+      memset_with_concurrent_readers(start_ptr, offset, num_cards);
+      return;
+    }
+#endif // INCLUDE_ALL_GCS
+    memset(start_ptr, offset, num_cards);
+  }
+
  protected:
   // Bounds checking accessors:
   // For performance these have to devolve to array accesses in product builds.
@@ -160,20 +176,7 @@
     assert(left  < right, "Heap addresses out of order");
     size_t num_cards = pointer_delta(right, left) >> LogN_words;
 
-    // Below, we may use an explicit loop instead of memset()
-    // because on certain platforms memset() can give concurrent
-    // readers "out-of-thin-air," phantom zeros; see 6948537.
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[index_for(left)], offset, num_cards);
-    } else {
-      size_t i = index_for(left);
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        // Elided until CR 6977974 is fixed properly.
-        // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
-        _offset_array[i] = offset;
-      }
-    }
+    fill_range(index_for(left), num_cards, offset);
   }
 
   void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
@@ -182,20 +185,7 @@
     assert(left  <= right, "indexes out of order");
     size_t num_cards = right - left + 1;
 
-    // Below, we may use an explicit loop instead of memset
-    // because on certain platforms memset() can give concurrent
-    // readers "out-of-thin-air," phantom zeros; see 6948537.
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[left], offset, num_cards);
-    } else {
-      size_t i = left;
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        // Elided until CR 6977974 is fixed properly.
-        // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
-        _offset_array[i] = offset;
-      }
-    }
+    fill_range(left, num_cards, offset);
   }
 
   void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
--- a/src/share/vm/gc/shared/cardTableModRefBS.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/cardTableModRefBS.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -183,7 +183,7 @@
   // these functions here for performance.
 
   void write_ref_field_work(oop obj, size_t offset, oop newVal);
-  virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
+  virtual void write_ref_field_work(void* field, oop newVal, bool release);
 public:
 
   bool has_write_ref_array_opt() { return true; }
--- a/src/share/vm/gc/shared/cardTableModRefBSForCTRS.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/cardTableModRefBSForCTRS.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -31,13 +31,7 @@
 CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
   CardTableModRefBS(
     whole_heap,
-    // Concrete tag should be BarrierSet::CardTableForRS.
-    // That will presently break things in a bunch of places though.
-    // The concrete tag is used as a dispatch key in many places, and
-    // CardTableForRS does not correctly dispatch in some of those
-    // uses. This will be addressed as part of a reorganization of the
-    // BarrierSet hierarchy.
-    BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS)),
+    BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
   // LNC functionality
   _lowest_non_clean(NULL),
   _lowest_non_clean_chunk_size(NULL),
--- a/src/share/vm/gc/shared/cardTableRS.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/cardTableRS.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -80,7 +80,9 @@
         break;
       }
     }
-    if (!seen) return v;
+    if (!seen) {
+      return v;
+    }
   }
   ShouldNotReachHere();
   return 0;
@@ -502,7 +504,7 @@
       //
       // The main point below is that the parallel card scanning code
       // deals correctly with these stale card values. There are two main
-      // cases to consider where we have a stale "younger gen" value and a
+      // cases to consider where we have a stale "young gen" value and a
       // "derivative" case to consider, where we have a stale
       // "cur_younger_gen_and_prev_non_clean" value, as will become
       // apparent in the case analysis below.
--- a/src/share/vm/gc/shared/collectedHeap.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/collectedHeap.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -160,16 +160,20 @@
 // Memory state functions.
 
 
-CollectedHeap::CollectedHeap() {
+CollectedHeap::CollectedHeap() :
+  _barrier_set(NULL),
+  _is_gc_active(false),
+  _total_collections(0),
+  _total_full_collections(0),
+  _gc_cause(GCCause::_no_gc),
+  _gc_lastcause(GCCause::_no_gc),
+  _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
+{
   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
                                              max_len / elements_per_word);
 
-  _barrier_set = NULL;
-  _is_gc_active = false;
-  _total_collections = _total_full_collections = 0;
-  _gc_cause = _gc_lastcause = GCCause::_no_gc;
   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 
@@ -184,7 +188,7 @@
                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
                              80, GCCause::to_string(_gc_lastcause), CHECK);
   }
-  _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
+
   // Create the ring log
   if (LogEvents) {
     _gc_heap_log = new GCHeapLog();
@@ -570,8 +574,8 @@
 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
   if (HeapDumpBeforeFullGC) {
     GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
-    // We are doing a "major" collection and a heap dump before
-    // major collection has been requested.
+    // We are doing a full collection and a heap dump before
+    // full collection has been requested.
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramBeforeFullGC) {
--- a/src/share/vm/gc/shared/collectorPolicy.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/collectorPolicy.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -111,13 +111,6 @@
   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 
-  enum Name {
-    CollectorPolicyKind,
-    GenCollectorPolicyKind,
-    ConcurrentMarkSweepPolicyKind,
-    G1CollectorPolicyKind
-  };
-
   AdaptiveSizePolicy* size_policy() { return _size_policy; }
   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
@@ -150,8 +143,6 @@
 #endif // INCLUDE_ALL_GCS
 
 
-  virtual BarrierSet::Name barrier_set_name() = 0;
-
   virtual GenRemSet* create_rem_set(MemRegion reserved);
 
   // This method controls how a collector satisfies a request
@@ -182,10 +173,6 @@
     ShouldNotReachHere();
   }
 
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::CollectorPolicyKind;
-  }
-
   // Do any updates required to global flags that are due to heap initialization
   // changes
   virtual void post_heap_initialize() = 0;
@@ -298,12 +285,6 @@
   virtual void post_heap_initialize() {
     assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info");
   }
-
-  BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
-
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::GenCollectorPolicyKind;
-  }
 };
 
 class MarkSweepPolicy : public GenCollectorPolicy {
--- a/src/share/vm/gc/shared/gcTrace.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/gcTrace.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -88,6 +88,8 @@
   send_reference_stats_event(REF_WEAK, rps.weak_count());
   send_reference_stats_event(REF_FINAL, rps.final_count());
   send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
+  send_reference_stats_event(REF_CLEANER, rps.cleaner_count());
+  send_reference_stats_event(REF_JNI, rps.jni_weak_ref_count());
 }
 
 #if INCLUDE_SERVICES
@@ -173,6 +175,11 @@
   _tenuring_threshold = tenuring_threshold;
 }
 
+bool YoungGCTracer::should_report_promotion_events() const {
+  return should_report_promotion_in_new_plab_event() ||
+          should_report_promotion_outside_plab_event();
+}
+
 bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
   return should_send_promotion_in_new_plab_event();
 }
--- a/src/share/vm/gc/shared/gcTrace.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/gcTrace.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -172,6 +172,7 @@
    *
    * plab_size is the size of the newly allocated PLAB in bytes.
    */
+  bool should_report_promotion_events() const;
   bool should_report_promotion_in_new_plab_event() const;
   bool should_report_promotion_outside_plab_event() const;
   void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
--- a/src/share/vm/gc/shared/genCollectedHeap.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/genCollectedHeap.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -464,7 +464,7 @@
     bool prepared_for_verification = false;
     bool collected_old = false;
     bool old_collects_young = complete &&
-                              _old_gen->full_collects_younger_generations();
+                              _old_gen->full_collects_young_generation();
     if (!old_collects_young &&
         _young_gen->should_collect(full, size, is_tlab)) {
       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
@@ -521,7 +521,7 @@
     // a whole heap collection.
     complete = complete || collected_old;
 
-    if (complete) { // We did a "major" collection
+    if (complete) { // We did a full collection
       // FIXME: See comment at pre_full_gc_dump call
       post_full_gc_dump(NULL);   // do any post full gc dumps
     }
@@ -668,13 +668,13 @@
 
 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
                                          GenerationType type,
-                                         bool younger_gens_as_roots,
+                                         bool young_gen_as_roots,
                                          ScanningOption so,
                                          bool only_strong_roots,
                                          OopsInGenClosure* not_older_gens,
                                          OopsInGenClosure* older_gens,
                                          CLDClosure* cld_closure) {
-  const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
+  const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
 
   bool is_moving_collection = false;
   if (type == YoungGen || is_adjust_phase) {
@@ -691,7 +691,7 @@
                 cld_closure, weak_cld_closure,
                 &mark_code_closure);
 
-  if (younger_gens_as_roots) {
+  if (young_gen_as_roots) {
     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
       if (type == OldGen) {
         not_older_gens->set_generation(_young_gen);
@@ -763,25 +763,25 @@
 void GenCollectedHeap::collect(GCCause::Cause cause) {
   if (should_do_concurrent_full_gc(cause)) {
 #if INCLUDE_ALL_GCS
-    // mostly concurrent full collection
+    // Mostly concurrent full collection.
     collect_mostly_concurrent(cause);
 #else  // INCLUDE_ALL_GCS
     ShouldNotReachHere();
 #endif // INCLUDE_ALL_GCS
   } else if (cause == GCCause::_wb_young_gc) {
-    // minor collection for WhiteBox API
+    // Young collection for the WhiteBox API.
     collect(cause, YoungGen);
   } else {
 #ifdef ASSERT
   if (cause == GCCause::_scavenge_alot) {
-    // minor collection only
+    // Young collection only.
     collect(cause, YoungGen);
   } else {
-    // Stop-the-world full collection
+    // Stop-the-world full collection.
     collect(cause, OldGen);
   }
 #else
-    // Stop-the-world full collection
+    // Stop-the-world full collection.
     collect(cause, OldGen);
 #endif
   }
--- a/src/share/vm/gc/shared/genCollectedHeap.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/genCollectedHeap.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -173,8 +173,7 @@
 
   size_t max_capacity() const;
 
-  HeapWord* mem_allocate(size_t size,
-                         bool*  gc_overhead_limit_was_exceeded);
+  HeapWord* mem_allocate(size_t size, bool*  gc_overhead_limit_was_exceeded);
 
   // We may support a shared contiguous allocation area, if the youngest
   // generation does.
@@ -403,7 +402,7 @@
 
   void gen_process_roots(StrongRootsScope* scope,
                          GenerationType type,
-                         bool younger_gens_as_roots,
+                         bool young_gen_as_roots,
                          ScanningOption so,
                          bool only_strong_roots,
                          OopsInGenClosure* not_older_gens,
--- a/src/share/vm/gc/shared/genRemSet.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/genRemSet.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -110,13 +110,11 @@
   virtual void print() {}
 
   // Informs the RS that the given memregion contains no references to
-  // younger generations.
+  // the young generation.
   virtual void clear(MemRegion mr) = 0;
 
-  // Informs the RS that there are no references to generations
-  // younger than gen from generations gen and older.
-  // The parameter clear_perm indicates if the perm_gen's
-  // remembered set should also be processed/cleared.
+  // Informs the RS that there are no references to the young generation
+  // from old_gen.
   virtual void clear_into_younger(Generation* old_gen) = 0;
 
   // Informs the RS that refs in the given "mr" may have changed
--- a/src/share/vm/gc/shared/generation.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/generation.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -80,7 +80,6 @@
                               // first two fields are word-sized.)
 };
 
-
 class Generation: public CHeapObj<mtGC> {
   friend class VMStructs;
  private:
@@ -299,8 +298,7 @@
   // word of "obj" may have been overwritten with a forwarding pointer, and
   // also taking care to copy the klass pointer *last*.  Returns the new
   // object if successful, or else NULL.
-  virtual oop par_promote(int thread_num,
-                          oop obj, markOop m, size_t word_sz);
+  virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
 
   // Informs the current generation that all par_promote_alloc's in the
   // collection have been completed; any supporting data structures can be
@@ -315,7 +313,7 @@
 
   // This generation will collect all younger generations
   // during a full collection.
-  virtual bool full_collects_younger_generations() const { return false; }
+  virtual bool full_collects_young_generation() const { return false; }
 
   // This generation does in-place marking, meaning that mark words
   // are mutated during the marking phase and presumably reinitialized
@@ -370,18 +368,18 @@
 
   // Some generations may require some cleanup or preparation actions before
   // allowing a collection.  The default is to do nothing.
-  virtual void gc_prologue(bool full) {};
+  virtual void gc_prologue(bool full) {}
 
   // Some generations may require some cleanup actions after a collection.
   // The default is to do nothing.
-  virtual void gc_epilogue(bool full) {};
+  virtual void gc_epilogue(bool full) {}
 
   // Save the high water marks for the used space in a generation.
-  virtual void record_spaces_top() {};
+  virtual void record_spaces_top() {}
 
   // Some generations may need to be "fixed-up" after some allocation
   // activity to make them parsable again. The default is to do nothing.
-  virtual void ensure_parsability() {};
+  virtual void ensure_parsability() {}
 
   // Time (in ms) when we were last collected or now if a collection is
   // in progress.
@@ -417,7 +415,7 @@
   virtual void adjust_pointers();
   // Mark sweep support phase4
   virtual void compact();
-  virtual void post_compact() {ShouldNotReachHere();}
+  virtual void post_compact() { ShouldNotReachHere(); }
 
   // Support for CMS's rescan. In this general form we return a pointer
   // to an abstract object that can be used, based on specific previously
@@ -432,7 +430,7 @@
 
   // Some generations may require some cleanup actions before allowing
   // a verification.
-  virtual void prepare_for_verify() {};
+  virtual void prepare_for_verify() {}
 
   // Accessing "marks".
 
@@ -483,7 +481,7 @@
 
   // Give each generation an opportunity to do clean up for any
   // contributed scratch.
-  virtual void reset_scratch() {};
+  virtual void reset_scratch() {}
 
   // When an older generation has been collected, and perhaps resized,
   // this method will be invoked on all younger generations (from older to
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shared/memset_with_concurrent_readers.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include <string.h>
+#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/ostream.hpp"
+
+#if INCLUDE_ALL_GCS
+
+// Unit test
+#ifdef ASSERT
+
+static unsigned line_byte(const char* line, size_t i) {
+  return unsigned(line[i]) & 0xFF;
+}
+
+// Verify memset_with_concurrent_readers mimics memset.
+// We don't attempt to verify the concurrent reader case.
+void test_memset_with_concurrent_readers() {
+  const size_t chunk_size = 8 * BytesPerWord;
+  const unsigned chunk_count = 4;
+  const size_t block_size = (chunk_count + 4) * chunk_size;
+  char block[block_size];
+  char clear_block[block_size];
+  char set_block[block_size];
+
+  // block format:
+  // 0: unused leading chunk
+  // 1: chunk written from start index to end of chunk
+  // ... nchunks fully written chunks
+  // N: chunk written from start of chunk to end index
+  // N+1: unused trailing chunk
+
+  const int clear_value = 0;
+  const int set_value = 0xAC;
+
+  memset(clear_block, clear_value, block_size);
+  memset(set_block, set_value, block_size);
+
+  for (unsigned nchunks = 0; nchunks <= chunk_count; ++nchunks) {
+    for (size_t start = 1; start <= chunk_size; ++start) {
+      for (size_t end = 0; end <= chunk_size; ++end) {
+        size_t set_start = chunk_size + start;
+        size_t set_end = (2 + nchunks) * chunk_size + end;
+        size_t set_size = set_end - set_start;
+
+        memset(block, clear_value, block_size);
+        memset_with_concurrent_readers(&block[set_start], set_value, set_size);
+        bool head_clear = !memcmp(clear_block, block, set_start);
+        bool middle_set = !memcmp(set_block, block + set_start, set_size);
+        bool tail_clear = !memcmp(clear_block, block + set_end, block_size - set_end);
+        if (!(head_clear && middle_set && tail_clear)) {
+          tty->print_cr("*** memset_with_concurrent_readers failed: "
+                        "set start " SIZE_FORMAT ", set end " SIZE_FORMAT,
+                        set_start, set_end);
+          for (unsigned chunk = 0; chunk < (block_size / chunk_size); ++chunk) {
+            for (unsigned line = 0; line < (chunk_size / BytesPerWord); ++line) {
+              const char* lp = &block[chunk * chunk_size + line * BytesPerWord];
+              tty->print_cr("%d,%d: %2x %2x  %2x %2x  %2x %2x  %2x %2x",
+                            chunk, line,
+                            line_byte(lp, 0), line_byte(lp, 1),
+                            line_byte(lp, 2), line_byte(lp, 3),
+                            line_byte(lp, 4), line_byte(lp, 5),
+                            line_byte(lp, 6), line_byte(lp, 7));
+            }
+          }
+          assert(head_clear, "leading byte not clear");
+          assert(middle_set, "memset byte not set");
+          assert(tail_clear, "trailing bye not clear");
+        }
+      }
+    }
+  }
+}
+
+#endif // end unit test
+
+#endif // INCLUDE_ALL_GCS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shared/memset_with_concurrent_readers.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SRC_SHARE_VM_GC_SHARED_MEMSETWITHCONCURRENTREADERS_HPP
+#define SRC_SHARE_VM_GC_SHARED_MEMSETWITHCONCURRENTREADERS_HPP
+
+#include <stddef.h>
+#include <string.h>
+#include "utilities/macros.hpp"
+
+// Only used by concurrent collectors.
+#if INCLUDE_ALL_GCS
+
+// Fill a block of memory with value, like memset, but with the
+// understanding that there may be concurrent readers of that memory.
+void memset_with_concurrent_readers(void* to, int value, size_t size);
+
+#ifdef TARGET_ARCH_sparc
+
+// SPARC requires special handling.  See SPARC-specific definition.
+
+#else
+// All others just use memset.
+
+inline void memset_with_concurrent_readers(void* to, int value, size_t size) {
+  ::memset(to, value, size);
+}
+
+#endif // End of target dispatch.
+
+#endif // INCLUDE_ALL_GCS
+
+#endif // include guard
--- a/src/share/vm/gc/shared/modRefBarrierSet.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/modRefBarrierSet.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -60,7 +60,6 @@
     : BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
   ~ModRefBarrierSet() { }
 
-  virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
 public:
   void write_prim_field(HeapWord* field, size_t bytes,
                         juint val1, juint val2) {}
--- a/src/share/vm/gc/shared/referenceProcessor.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/referenceProcessor.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -243,10 +243,13 @@
       process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                  is_alive, keep_alive, complete_gc, task_executor);
 
-    // Process cleaners, but include them in phantom statistics.  We expect
-    // Cleaner references to be temporary, and don't want to deal with
-    // possible incompatibilities arising from making it more visible.
-    phantom_count +=
+  }
+
+  // Cleaners
+  size_t cleaner_count = 0;
+  {
+    GCTraceTime tt("Cleaners", trace_time, false, gc_timer, gc_id);
+    cleaner_count =
       process_discovered_reflist(_discoveredCleanerRefs, NULL, true,
                                  is_alive, keep_alive, complete_gc, task_executor);
   }
@@ -256,15 +259,17 @@
   // that is not how the JDK1.2 specification is. See #4126360. Native code can
   // thus use JNI weak references to circumvent the phantom references and
   // resurrect a "post-mortem" object.
+  size_t jni_weak_ref_count = 0;
   {
     GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id);
     if (task_executor != NULL) {
       task_executor->set_single_threaded_mode();
     }
-    process_phaseJNI(is_alive, keep_alive, complete_gc);
+    jni_weak_ref_count =
+      process_phaseJNI(is_alive, keep_alive, complete_gc);
   }
 
-  return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count);
+  return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count, cleaner_count, jni_weak_ref_count);
 }
 
 #ifndef PRODUCT
@@ -291,17 +296,17 @@
 }
 #endif
 
-void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
-                                          OopClosure*        keep_alive,
-                                          VoidClosure*       complete_gc) {
-#ifndef PRODUCT
+size_t ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
+                                            OopClosure*        keep_alive,
+                                            VoidClosure*       complete_gc) {
+  DEBUG_ONLY(size_t check_count = count_jni_refs();)
+  size_t count = JNIHandles::weak_oops_do(is_alive, keep_alive);
+  assert(count == check_count, "Counts didn't match");
+  complete_gc->do_void();
   if (PrintGCDetails && PrintReferenceGC) {
-    unsigned int count = count_jni_refs();
-    gclog_or_tty->print(", %u refs", count);
+    gclog_or_tty->print(", " SIZE_FORMAT " refs", count);
   }
-#endif
-  JNIHandles::weak_oops_do(is_alive, keep_alive);
-  complete_gc->do_void();
+  return count;
 }
 
 
@@ -941,9 +946,10 @@
       list = &_discoveredCleanerRefs[id];
       break;
     case REF_NONE:
+    case REF_JNI:
       // we should not reach here if we are an InstanceRefKlass
     default:
-      ShouldNotReachHere();
+      guarantee(false, err_msg("rt should not be %d", rt));
   }
   if (TraceReferenceGC && PrintGCDetails) {
     gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
@@ -1059,7 +1065,7 @@
     // can mark through them now, rather than delaying that
     // to the reference-processing phase. Since all current
     // time-stamp policies advance the soft-ref clock only
-    // at a major collection cycle, this is always currently
+    // at a full collection cycle, this is always currently
     // accurate.
     if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
       return false;
--- a/src/share/vm/gc/shared/referenceProcessor.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/referenceProcessor.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -247,7 +247,7 @@
   DiscoveredList* _discoveredCleanerRefs;
 
  public:
-  static int number_of_subclasses_of_ref() { return (REF_CLEANER - REF_OTHER); }
+  static int number_of_subclasses_of_ref() { return REF_LISTS_COUNT; }
 
   uint num_q()                             { return _num_q; }
   uint max_num_q()                         { return _max_num_q; }
@@ -271,9 +271,9 @@
                                     VoidClosure*                 complete_gc,
                                     AbstractRefProcTaskExecutor* task_executor);
 
-  void process_phaseJNI(BoolObjectClosure* is_alive,
-                        OopClosure*        keep_alive,
-                        VoidClosure*       complete_gc);
+  size_t process_phaseJNI(BoolObjectClosure* is_alive,
+                          OopClosure*        keep_alive,
+                          VoidClosure*       complete_gc);
 
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise
--- a/src/share/vm/gc/shared/referenceProcessorStats.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/referenceProcessorStats.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -36,22 +36,30 @@
   size_t _weak_count;
   size_t _final_count;
   size_t _phantom_count;
+  size_t _cleaner_count;
+  size_t _jni_weak_ref_count;
 
  public:
   ReferenceProcessorStats() :
     _soft_count(0),
     _weak_count(0),
     _final_count(0),
-    _phantom_count(0) {}
+    _phantom_count(0),
+    _cleaner_count(0),
+    _jni_weak_ref_count(0) {}
 
   ReferenceProcessorStats(size_t soft_count,
                           size_t weak_count,
                           size_t final_count,
-                          size_t phantom_count) :
+                          size_t phantom_count,
+                          size_t cleaner_count,
+                          size_t jni_weak_ref_count) :
     _soft_count(soft_count),
     _weak_count(weak_count),
     _final_count(final_count),
-    _phantom_count(phantom_count)
+    _phantom_count(phantom_count),
+    _cleaner_count(cleaner_count),
+    _jni_weak_ref_count(jni_weak_ref_count)
   {}
 
   size_t soft_count() const {
@@ -69,5 +77,13 @@
   size_t phantom_count() const {
     return _phantom_count;
   }
+
+  size_t cleaner_count() const {
+    return _cleaner_count;
+  }
+
+  size_t jni_weak_ref_count() const {
+    return _jni_weak_ref_count;
+  }
 };
 #endif
--- a/src/share/vm/gc/shared/space.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/space.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -237,7 +237,7 @@
                                                    HeapWord* bottom,    \
                                                    HeapWord* top,       \
                                                    ClosureType* cl) {   \
-  bottom += oop(bottom)->oop_iterate(cl, mr);                           \
+  bottom += oop(bottom)->oop_iterate_size(cl, mr);                      \
   if (bottom < top) {                                                   \
     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
     while (next_obj < top) {                                            \
@@ -508,7 +508,7 @@
     HeapWord* t = mr.end();                                                 \
     while (obj_addr < t) {                                                  \
       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
-      obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
+      obj_addr += oop(obj_addr)->oop_iterate_size(blk);                     \
     }                                                                       \
   }
 
@@ -523,7 +523,7 @@
   HeapWord* t = top();
   // Could call objects iterate, but this is easier.
   while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(blk);
+    obj_addr += oop(obj_addr)->oop_iterate_size(blk);
   }
 }
 
@@ -578,7 +578,7 @@
       Prefetch::write(p, interval);                                       \
       debug_only(HeapWord* prev = p);                                     \
       oop m = oop(p);                                                     \
-      p += m->oop_iterate(blk);                                           \
+      p += m->oop_iterate_size(blk);                                      \
     }                                                                     \
   } while (t < top());                                                    \
                                                                           \
--- a/src/share/vm/gc/shared/specialized_oop_closures.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/specialized_oop_closures.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -42,6 +42,8 @@
 class ScanClosure;
 class FastScanClosure;
 class FilteringClosure;
+// MarkSweep
+class MarkAndPushClosure;
 // ParNew
 class ParScanWithBarrierClosure;
 class ParScanWithoutBarrierClosure;
@@ -87,6 +89,9 @@
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f)             \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
 
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)      \
+  f(MarkAndPushClosure,_nv)
+
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)     \
   f(MarkRefsIntoAndScanClosure,_nv)                     \
@@ -101,10 +106,12 @@
 
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)            \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)           \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)
 #else  // INCLUDE_ALL_GCS
-#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)
 #endif // INCLUDE_ALL_GCS
 
 
--- a/src/share/vm/gc/shared/taskqueue.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/taskqueue.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -258,8 +258,8 @@
 
 #ifdef ASSERT
 bool ObjArrayTask::is_valid() const {
-  return _obj != NULL && _obj->is_objArray() && _index > 0 &&
-    _index < objArrayOop(_obj)->length();
+  return _obj != NULL && _obj->is_objArray() && _index >= 0 &&
+      _index < objArrayOop(_obj)->length();
 }
 #endif // ASSERT
 
--- a/src/share/vm/gc/shared/vmGCOperations.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/gc/shared/vmGCOperations.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -213,15 +213,18 @@
   size_t                   _size;     // size of object to be allocated
   Metaspace::MetadataType  _mdtype;
   ClassLoaderData*         _loader_data;
+
  public:
   VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
-                                  size_t size, Metaspace::MetadataType mdtype,
+                                  size_t size,
+                                  Metaspace::MetadataType mdtype,
                                   uint gc_count_before,
                                   uint full_gc_count_before,
                                   GCCause::Cause gc_cause)
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
   }
+
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
--- a/src/share/vm/memory/filemap.cpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/memory/filemap.cpp	Fri Sep 18 14:21:46 2015 -0700
@@ -707,12 +707,16 @@
                                     addr, string_ranges[i].byte_size(), si->_read_only,
                                     si->_allow_exec);
         if (base == NULL || base != addr) {
+          // dealloc the string regions from java heap
+          dealloc_string_regions();
           fail_continue("Unable to map shared string space at required address.");
           return false;
         }
       }
 
       if (!verify_string_regions()) {
+        // dealloc the string regions from java heap
+        dealloc_string_regions();
         fail_continue("Shared string regions are corrupt");
         return false;
       }
@@ -745,12 +749,14 @@
 }
 
 void FileMapInfo::fixup_string_regions() {
+#if INCLUDE_ALL_GCS
   // If any string regions were found, call the fill routine to make them parseable.
   // Note that string_ranges may be non-NULL even if no ranges were found.
   if (num_ranges != 0) {
     assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
     G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
   }
+#endif
 }
 
 bool FileMapInfo::verify_region_checksum(int i) {
@@ -793,20 +799,14 @@
   }
 }
 
-void FileMapInfo::unmap_string_regions() {
-  for (int i = MetaspaceShared::first_string;
-           i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
-    struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
-    size_t used = si->_used;
-    if (used > 0) {
-      size_t size = align_size_up(used, os::vm_allocation_granularity());
-      char* addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
-                                             (narrowOop)si->_addr._offset));
-      if (!os::unmap_memory(addr, size)) {
-        fail_stop("Unable to unmap shared space.");
-      }
-    }
+// dealloc the archived string region from java heap
+void FileMapInfo::dealloc_string_regions() {
+#if INCLUDE_ALL_GCS
+  if (num_ranges > 0) {
+    assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
+    G1CollectedHeap::heap()->dealloc_archive_regions(string_ranges, num_ranges);
   }
+#endif
 }
 
 void FileMapInfo::assert_mark(bool check) {
@@ -967,7 +967,9 @@
         map_info->_header->_space[i]._addr._base = NULL;
       }
     }
-    map_info->unmap_string_regions();
+    // Dealloc the string regions only without unmapping. The string regions are part
+    // of the java heap. Unmapping of the heap regions are managed by GC.
+    map_info->dealloc_string_regions();
   } else if (DumpSharedSpaces) {
     fail_stop("%s", msg);
   }
--- a/src/share/vm/memory/filemap.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/memory/filemap.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -208,7 +208,7 @@
   bool  verify_string_regions();
   void  fixup_string_regions();
   void  unmap_region(int i);
-  void  unmap_string_regions();
+  void  dealloc_string_regions();
   bool  verify_region_checksum(int i);
   void  close();
   bool  is_open() { return _file_open; }
--- a/src/share/vm/memory/iterator.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/memory/iterator.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -61,7 +61,7 @@
   //
   // 1) do_klass on the header klass pointer.
   // 2) do_klass on the klass pointer in the mirrors.
-  // 3) do_class_loader_data on the class loader data in class loaders.
+  // 3) do_cld   on the class loader data in class loaders.
   //
   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
   // to be updated together, or else the devirtualization will break.
@@ -71,13 +71,14 @@
   // ExtendedOopClosures that don't need to walk the metadata.
   // Currently, only CMS and G1 need these.
 
+  bool do_metadata_nv()      { return false; }
   virtual bool do_metadata() { return do_metadata_nv(); }
-  bool do_metadata_nv()      { return false; }
 
-  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
-  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
+  void do_klass_nv(Klass* k)      { ShouldNotReachHere(); }
+  virtual void do_klass(Klass* k) { do_klass_nv(k); }
 
-  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
+  void do_cld_nv(ClassLoaderData* cld)      { ShouldNotReachHere(); }
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 
   // True iff this closure may be safely applied more than once to an oop
   // location without an intervening "major reset" (like the end of a GC).
@@ -180,13 +181,14 @@
     _klass_closure.initialize(this);
   }
 
-  virtual bool do_metadata()    { return do_metadata_nv(); }
-  inline  bool do_metadata_nv() { return true; }
+  bool do_metadata_nv()      { return true; }
+  virtual bool do_metadata() { return do_metadata_nv(); }
 
-  virtual void do_klass(Klass* k);
   void do_klass_nv(Klass* k);
+  virtual void do_klass(Klass* k) { do_klass_nv(k); }
 
-  virtual void do_class_loader_data(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 };
 
 // ObjectClosure is used for iterating through an object space
@@ -370,6 +372,7 @@
  public:
   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
+  template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 };
 
@@ -378,6 +381,7 @@
  public:
   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
+  template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 };
 
--- a/src/share/vm/memory/iterator.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/memory/iterator.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -36,7 +36,7 @@
 #include "oops/typeArrayKlass.inline.hpp"
 #include "utilities/debug.hpp"
 
-inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopClosure::do_cld_nv(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
@@ -45,11 +45,9 @@
 
 inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
+  do_cld_nv(cld);
 }
 
-inline void MetadataAwareOopClosure::do_klass(Klass* k)       { do_klass_nv(k); }
-
 #ifdef ASSERT
 // This verification is applied to all visited oops.
 // The closures can turn is off by overriding should_verify_oops().
@@ -78,6 +76,10 @@
   closure->do_klass_nv(k);
 }
 template <class OopClosureType>
+void Devirtualizer<true>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
+  closure->do_cld_nv(cld);
+}
+template <class OopClosureType>
 inline bool Devirtualizer<true>::do_metadata(OopClosureType* closure) {
   // Make sure the non-virtual and the virtual versions match.
   assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata");
@@ -96,6 +98,10 @@
   closure->do_klass(k);
 }
 template <class OopClosureType>
+void Devirtualizer<false>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
+  closure->do_cld(cld);
+}
+template <class OopClosureType>
 bool Devirtualizer<false>::do_metadata(OopClosureType* closure) {
   return closure->do_metadata();
 }
--- a/src/share/vm/memory/referenceType.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/memory/referenceType.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -32,11 +32,15 @@
 enum ReferenceType {
   REF_NONE,      // Regular class
   REF_OTHER,     // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
+  ///////////////// Only the types below have their own discovered lists
   REF_SOFT,      // Subclass of java/lang/ref/SoftReference
   REF_WEAK,      // Subclass of java/lang/ref/WeakReference
   REF_FINAL,     // Subclass of java/lang/ref/FinalReference
   REF_PHANTOM,   // Subclass of java/lang/ref/PhantomReference
-  REF_CLEANER    // Subclass of sun/misc/Cleaner
+  REF_CLEANER,   // Subclass of sun/misc/Cleaner
+  ///////////////// Only the types in the above range have their own discovered lists
+  REF_JNI,        // JNI weak refs
+  REF_LISTS_COUNT = REF_CLEANER - REF_OTHER  // Number of discovered lists
 };
 
 #endif // SHARE_VM_MEMORY_REFERENCETYPE_HPP
--- a/src/share/vm/oops/arrayKlass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/arrayKlass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -147,30 +147,30 @@
 // Array oop iteration macros for declarations.
 // Used to generate the declarations in the *ArrayKlass header files.
 
-#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                  \
-  int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
+#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                   \
+  void oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
 
 #if INCLUDE_ALL_GCS
 // Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
-#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)           \
-  int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
+#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)            \
+  void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
 
 // Array oop iteration macros for definitions.
 // Used to generate the definitions in the *ArrayKlass.inline.hpp files.
 
-#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix)                                 \
-                                                                                                         \
-int KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) {  \
-  return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                        \
+#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix)                                  \
+                                                                                                          \
+void KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) {  \
+  oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                                \
 }
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)          \
-int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
-  /* No reverse implementation ATM. */                                                   \
-  return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                          \
+#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)           \
+void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
+  /* No reverse implementation ATM. */                                                    \
+  oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                                  \
 }
 #else
 #define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)
--- a/src/share/vm/oops/instanceClassLoaderKlass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -51,7 +51,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -71,19 +70,19 @@
   // Forward iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
 #if INCLUDE_ALL_GCS
   // Reverse iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif
 
   // Bounded range iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
  public:
 
--- a/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
 
 #include "classfile/javaClasses.hpp"
+#include "memory/iterator.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -34,48 +35,42 @@
 #include "utilities/macros.hpp"
 
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
+inline void InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
     // cld can be null if we have a non-registered class loader.
     if (cld != NULL) {
-      closure->do_class_loader_data(cld);
+      Devirtualizer<nv>::do_cld(closure, cld);
     }
   }
-
-  return size;
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
-  int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
+inline void InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   assert(!Devirtualizer<nv>::do_metadata(closure),
       "Code to handle metadata is not implemented");
-
-  return size;
 }
 #endif // INCLUDE_ALL_GCS
 
 
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
+inline void InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     if (mr.contains(obj)) {
       ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
       // cld can be null if we have a non-registered class loader.
       if (cld != NULL) {
-        closure->do_class_loader_data(cld);
+        Devirtualizer<nv>::do_cld(closure, cld);
       }
     }
   }
-
-  return size;
 }
 
 #define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -1014,7 +1014,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
--- a/src/share/vm/oops/instanceKlass.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceKlass.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -38,6 +38,8 @@
 // as the previous macro based implementation.
 #ifdef TARGET_COMPILER_visCPP
 #define INLINE __forceinline
+#elif defined(TARGET_COMPILER_sparcWorks)
+#define INLINE __attribute__((always_inline))
 #else
 #define INLINE inline
 #endif
--- a/src/share/vm/oops/instanceMirrorKlass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceMirrorKlass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -91,7 +91,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -121,21 +120,21 @@
   // Forward iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
 
   // Reverse iteration
 #if INCLUDE_ALL_GCS
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif
 
 
   // Bounded range iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Iterate over the static fields.
   template <bool nv, class OopClosureType>
--- a/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -53,30 +53,40 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Klass* klass = java_lang_Class::as_Klass(obj);
     // We'll get NULL for primitive mirrors.
     if (klass != NULL) {
-      Devirtualizer<nv>::do_klass(closure, klass);
+      if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+        // An anonymous class doesn't have its own class loader, so when handling
+        // the java mirror for an anonymous class we need to make sure its class
+        // loader data is claimed, this is done by calling do_cld explicitly.
+        // For non-anonymous classes the call to do_cld is made when the class
+        // loader itself is handled.
+        Devirtualizer<nv>::do_cld(closure, klass->class_loader_data());
+      } else {
+        Devirtualizer<nv>::do_klass(closure, klass);
+      }
+    } else {
+      // If klass is NULL then this a mirror for a primitive type.
+      // We don't have to follow them, since they are handled as strong
+      // roots in Universe::oops_do.
+      assert(java_lang_Class::is_primitive(obj), "Sanity check");
     }
   }
 
   oop_oop_iterate_statics<nv>(obj, closure);
-
-  return oop_size(obj);
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
-
-  return oop_size(obj);
 }
 #endif
 
@@ -115,7 +125,7 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+void InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
   InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
@@ -129,8 +139,6 @@
   }
 
   oop_oop_iterate_statics_bounded<nv>(obj, closure, mr);
-
-  return oop_size(obj);
 }
 
 #define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/src/share/vm/oops/instanceRefKlass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceRefKlass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -67,7 +67,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -88,19 +87,19 @@
 private:
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Reverse iteration
 #if INCLUDE_ALL_GCS
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
   // Bounded range iteration
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Reference processing part of the iterators.
 
--- a/src/share/vm/oops/instanceRefKlass.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/instanceRefKlass.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -106,37 +106,27 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
+void InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
-
-  return size;
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::
-oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
+void InstanceRefKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
-
-  return size;
 }
 #endif // INCLUDE_ALL_GCS
 
 
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
+void InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   oop_oop_iterate_ref_processing_bounded<nv>(obj, closure, mr);
-
-  return size;
 }
 
 // Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
--- a/src/share/vm/oops/klass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/klass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -572,7 +572,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  virtual void oop_ms_follow_contents(oop obj) = 0;
   virtual int  oop_ms_adjust_pointers(oop obj) = 0;
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -584,17 +583,17 @@
 
   // Iterators specialized to particular subtypes
   // of ExtendedOopClosure, to avoid closure virtual calls.
-#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                          \
-  virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0;                        \
-  /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */                    \
-  virtual int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
+#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                           \
+  virtual void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0;                        \
+  /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */                     \
+  virtual void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
 
 #if INCLUDE_ALL_GCS
-#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                    \
-  virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
+#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                     \
+  virtual void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
@@ -661,35 +660,35 @@
 // Used to generate declarations in the *Klass header files.
 
 #define OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                    \
-  int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
-  int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
+  void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
+  void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)              \
-  int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
+#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)               \
+  void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
 
 // Oop iteration macros for definitions.
 // Used to generate definitions in the *Klass.inline.hpp files.
 
-#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix)             \
-int KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {  \
-  return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                \
+#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix)              \
+void KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {  \
+  oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                        \
 }
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)             \
-int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
-  return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                  \
+#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)              \
+void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
+  oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                          \
 }
 #else
 #define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)
 #endif
 
-#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix)                           \
-int KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) {  \
-  return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);                          \
+#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix)                            \
+void KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) {  \
+  oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);                                  \
 }
 
 #endif // SHARE_VM_OOPS_KLASS_HPP
--- a/src/share/vm/oops/objArrayKlass.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/objArrayKlass.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -105,7 +105,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -125,15 +124,15 @@
 
   // Iterate over oop elements and metadata.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Iterate over oop elements within mr, and metadata.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Iterate over oop elements with indices within [start, end), and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
+  inline void oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
 
   // Iterate over oop elements within [start, end), and metadata.
   // Specialized for [T = oop] or [T = narrowOop].
--- a/src/share/vm/oops/objArrayKlass.inline.hpp	Fri Sep 18 10:46:35 2015 -0700
+++ b/src/share/vm/oops/objArrayKlass.inline.hpp	Fri Sep 18 14:21:46 2015 -0700
@@ -85,46 +85,31 @@
 }
 
 template <bool nv, typename OopClosureType>
-int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
   assert (obj->is_array(), "obj must be array");
   objArrayOop a = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call.
-  int size = a->object_size();
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Devirtualizer<nv>::do_klass(closure, obj->klass());
   }
 
   oop_oop_iterate_elements<nv>(a, closure);
-
-  return size;
 }
 
 template <bool nv, typename OopClosureType>
-int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
   assert(obj->is_array(), "obj must be array");
   objArrayOop a  = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call
-  int size = a->object_size();
-
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Devirtualizer<nv>::do_klass(closure, a->klass());
   }
 
   oop_oop_iterate_elements_bounded<nv>(a, closure, mr);
-
-  return size;
 }