changeset 11714:84d2d8d3656d

Merge
author aph
date Thu, 14 Jul 2016 15:18:15 +0100
parents cd2c2c56b089 f4c6ee42184c
children 313a38734b07
files test/compiler/c1/6478991/NullCheckTest.java test/compiler/c1/6579789/Test6579789.java test/compiler/c1/6756768/Test6756768.java test/compiler/c1/6756768/Test6756768_2.java test/compiler/c1/6757316/Test6757316.java test/compiler/c1/6758234/Test6758234.java test/compiler/c1/6769124/TestArrayCopy6769124.java test/compiler/c1/6769124/TestDeoptInt6769124.java test/compiler/c1/6769124/TestUnalignedLoad6769124.java test/compiler/c1/6795465/Test6795465.java test/compiler/c1/6849574/Test.java test/compiler/c1/6855215/Test6855215.java test/compiler/c1/6932496/Test6932496.java test/compiler/c1/7042153/Test7042153.java test/compiler/c1/7090976/Test7090976.java test/compiler/c1/7103261/Test7103261.java test/compiler/c1/7123108/Test7123108.java test/compiler/c1/8004051/Test8004051.java test/compiler/c1/8011706/Test8011706.java test/compiler/c1/8011771/Test8011771.java test/compiler/c2/5057225/Test5057225.java test/compiler/c2/5091921/Test5091921.java test/compiler/c2/5091921/Test6186134.java test/compiler/c2/5091921/Test6196102.java test/compiler/c2/5091921/Test6357214.java test/compiler/c2/5091921/Test6559156.java test/compiler/c2/5091921/Test6753639.java test/compiler/c2/5091921/Test6850611.java test/compiler/c2/5091921/Test6890943.java test/compiler/c2/5091921/Test6897150.java test/compiler/c2/5091921/Test6905845.java test/compiler/c2/5091921/Test6931567.java test/compiler/c2/5091921/Test6935022.java test/compiler/c2/5091921/Test6959129.java test/compiler/c2/5091921/Test6985295.java test/compiler/c2/5091921/Test6992759.java test/compiler/c2/5091921/Test7005594.java test/compiler/c2/5091921/Test7005594.sh test/compiler/c2/5091921/Test7020614.java test/compiler/c2/5091921/input6890943.txt test/compiler/c2/5091921/output6890943.txt test/compiler/c2/6340864/TestByteVect.java test/compiler/c2/6340864/TestDoubleVect.java test/compiler/c2/6340864/TestFloatVect.java test/compiler/c2/6340864/TestIntVect.java test/compiler/c2/6340864/TestLongVect.java test/compiler/c2/6340864/TestShortVect.java test/compiler/c2/6443505/Test6443505.java test/compiler/c2/6589834/InlinedArrayCloneTestCase.java test/compiler/c2/6589834/Test_ia32.java test/compiler/c2/6603011/Test.java test/compiler/c2/6636138/Test1.java test/compiler/c2/6636138/Test2.java test/compiler/c2/6646019/Test.java test/compiler/c2/6646020/Tester.java test/compiler/c2/6661247/Test.java test/compiler/c2/6663621/IVTest.java test/compiler/c2/6663848/Tester.java test/compiler/c2/6663854/Test6663854.java test/compiler/c2/6695810/Test.java test/compiler/c2/6700047/Test6700047.java test/compiler/c2/6711100/Test.java test/compiler/c2/6711117/Test.java test/compiler/c2/6712835/Test6712835.java test/compiler/c2/6714694/Tester.java test/compiler/c2/6724218/Test.java test/compiler/c2/6732154/Test6732154.java test/compiler/c2/6741738/Tester.java test/compiler/c2/6772683/InterruptedTest.java test/compiler/c2/6792161/Test6792161.java test/compiler/c2/6795362/Test6795362.java test/compiler/c2/6796786/Test6796786.java test/compiler/c2/6799693/Test.java test/compiler/c2/6800154/Test6800154.java test/compiler/c2/6805724/Test6805724.java test/compiler/c2/6823453/Test.java test/compiler/c2/6832293/Test.java test/compiler/c2/6837011/Test6837011.java test/compiler/c2/6837094/Test.java test/compiler/c2/6843752/Test.java test/compiler/c2/6851282/Test.java test/compiler/c2/6852078/Test6852078.java test/compiler/c2/6857159/Test6857159.java test/compiler/c2/6863155/Test6863155.java test/compiler/c2/6865031/Test.java test/compiler/c2/6866651/Test.java test/compiler/c2/6877254/Test.java test/compiler/c2/6880034/Test6880034.java test/compiler/c2/6885584/Test6885584.java test/compiler/c2/6894807/IsInstanceTest.java test/compiler/c2/6901572/Test.java test/compiler/c2/6910484/Test.java test/compiler/c2/6910605/Test.java test/compiler/c2/6910618/Test.java test/compiler/c2/6912517/Test.java test/compiler/c2/6916644/Test6916644.java test/compiler/c2/6921969/TestMultiplyLongHiZero.java test/compiler/c2/6930043/Test6930043.java test/compiler/c2/6946040/TestCharShortByteSwap.java test/compiler/c2/6956668/Test6956668.java test/compiler/c2/6958485/Test.java test/compiler/c2/6968348/Test6968348.java test/compiler/c2/6973329/Test.java test/compiler/c2/7002666/Test7002666.java test/compiler/c2/7009359/Test7009359.java test/compiler/c2/7017746/Test.java test/compiler/c2/7024475/Test7024475.java test/compiler/c2/7029152/Test.java test/compiler/c2/7041100/Test7041100.java test/compiler/c2/7046096/Test7046096.java test/compiler/c2/7047069/Test7047069.java test/compiler/c2/7048332/Test7048332.java test/compiler/c2/7068051/Test7068051.java test/compiler/c2/7070134/Stemmer.java test/compiler/c2/7070134/words test/compiler/c2/7110586/Test7110586.java test/compiler/c2/7125879/Test7125879.java test/compiler/c2/7160610/Test7160610.java test/compiler/c2/7169782/Test7169782.java test/compiler/c2/7174363/Test7174363.java test/compiler/c2/7177917/Test7177917.java test/compiler/c2/7179138/Test7179138_1.java test/compiler/c2/7179138/Test7179138_2.java test/compiler/c2/7190310/Test7190310.java test/compiler/c2/7190310/Test7190310_unsafe.java test/compiler/c2/7192963/TestByteVect.java test/compiler/c2/7192963/TestDoubleVect.java test/compiler/c2/7192963/TestFloatVect.java test/compiler/c2/7192963/TestIntVect.java test/compiler/c2/7192963/TestLongVect.java test/compiler/c2/7192963/TestShortVect.java test/compiler/c2/7199742/Test7199742.java test/compiler/c2/7200264/Test7200264.sh test/compiler/c2/7200264/TestIntVect.java test/compiler/c2/8000805/Test8000805.java test/compiler/c2/8002069/Test8002069.java test/compiler/c2/8004741/Test8004741.java test/compiler/c2/8004867/TestIntAtomicCAS.java test/compiler/c2/8004867/TestIntAtomicOrdered.java test/compiler/c2/8004867/TestIntAtomicVolatile.java test/compiler/c2/8004867/TestIntUnsafeCAS.java test/compiler/c2/8004867/TestIntUnsafeOrdered.java test/compiler/c2/8004867/TestIntUnsafeVolatile.java test/compiler/c2/8005956/PolynomialRoot.java test/compiler/c2/8007294/Test8007294.java test/compiler/c2/8007722/Test8007722.java test/compiler/codegen/6378821/Test6378821.java test/compiler/codegen/6431242/Test.java test/compiler/codegen/6797305/Test6797305.java test/compiler/codegen/6814842/Test6814842.java test/compiler/codegen/6823354/Test6823354.java test/compiler/codegen/6875866/Test.java test/compiler/codegen/6879902/Test6879902.java test/compiler/codegen/6896617/Test6896617.java test/compiler/codegen/6909839/Test6909839.java test/compiler/codegen/6935535/Test.java test/compiler/codegen/6942326/Test.java test/compiler/codegen/7009231/Test7009231.java test/compiler/codegen/7088419/CRCTest.java test/compiler/codegen/7100757/Test7100757.java test/compiler/codegen/7119644/TestBooleanVect.java test/compiler/codegen/7119644/TestByteDoubleVect.java test/compiler/codegen/7119644/TestByteFloatVect.java test/compiler/codegen/7119644/TestByteIntVect.java test/compiler/codegen/7119644/TestByteLongVect.java test/compiler/codegen/7119644/TestByteShortVect.java test/compiler/codegen/7119644/TestByteVect.java test/compiler/codegen/7119644/TestCharShortVect.java test/compiler/codegen/7119644/TestCharVect.java test/compiler/codegen/7119644/TestDoubleVect.java test/compiler/codegen/7119644/TestFloatDoubleVect.java test/compiler/codegen/7119644/TestFloatVect.java test/compiler/codegen/7119644/TestIntDoubleVect.java test/compiler/codegen/7119644/TestIntFloatVect.java test/compiler/codegen/7119644/TestIntLongVect.java test/compiler/codegen/7119644/TestIntVect.java test/compiler/codegen/7119644/TestLongDoubleVect.java test/compiler/codegen/7119644/TestLongFloatVect.java test/compiler/codegen/7119644/TestLongVect.java test/compiler/codegen/7119644/TestShortDoubleVect.java test/compiler/codegen/7119644/TestShortFloatVect.java test/compiler/codegen/7119644/TestShortIntVect.java test/compiler/codegen/7119644/TestShortLongVect.java test/compiler/codegen/7119644/TestShortVect.java test/compiler/codegen/7184394/TestAESBase.java test/compiler/codegen/7184394/TestAESDecode.java test/compiler/codegen/7184394/TestAESEncode.java test/compiler/codegen/7184394/TestAESMain.java test/compiler/codegen/8001183/TestCharVect.java test/compiler/codegen/8005033/Test8005033.java test/compiler/codegen/8011901/Test8011901.java test/compiler/codegen/8144028/BitTests.java test/compiler/eliminateAutobox/6934604/TestByteBoxing.java test/compiler/eliminateAutobox/6934604/TestDoubleBoxing.java test/compiler/eliminateAutobox/6934604/TestFloatBoxing.java test/compiler/eliminateAutobox/6934604/TestIntBoxing.java test/compiler/eliminateAutobox/6934604/TestLongBoxing.java test/compiler/eliminateAutobox/6934604/TestShortBoxing.java test/compiler/escapeAnalysis/6689060/Test.java test/compiler/escapeAnalysis/6716441/Tester.java test/compiler/escapeAnalysis/6726999/Test.java test/compiler/escapeAnalysis/6775880/Test.java test/compiler/escapeAnalysis/6795161/Test.java test/compiler/escapeAnalysis/6895383/Test.java test/compiler/escapeAnalysis/6896727/Test.java test/compiler/interpreter/6539464/Test.java test/compiler/interpreter/6833129/Test.java test/compiler/interpreter/7116216/LargeFrame.java test/compiler/interpreter/7116216/StackOverflow.java test/compiler/intrinsics/6982370/Test6982370.java test/compiler/intrinsics/8005419/Test8005419.java test/compiler/intrinsics/adler32/TestAdler32.java test/compiler/intrinsics/class/TestClassIsPrimitive.java test/compiler/intrinsics/classcast/NullCheckDroppingsTest.java test/compiler/intrinsics/clone/TestObjectClone.java test/compiler/intrinsics/crc32/TestCRC32.java test/compiler/intrinsics/crc32c/TestCRC32C.java test/compiler/intrinsics/hashcode/TestHashCode.java test/compiler/intrinsics/montgomerymultiply/MontgomeryMultiplyTest.java test/compiler/intrinsics/muladd/TestMulAdd.java test/compiler/intrinsics/multiplytolen/TestMultiplyToLen.java test/compiler/intrinsics/multiplytolen/TestMultiplyToLenReturnProfile.java test/compiler/intrinsics/squaretolen/TestSquareToLen.java test/compiler/intrinsics/stringequals/TestStringEqualsBadLength.java test/compiler/jsr292/6990212/Test6990212.java test/compiler/jsr292/7082949/Test7082949.java test/compiler/loopopts/6659207/Test.java test/compiler/loopopts/6855164/Test.java test/compiler/loopopts/6860469/Test.java test/compiler/loopopts/7044738/Test7044738.java test/compiler/loopopts/7052494/Test7052494.java test/compiler/native/TestDirtyInt.java test/compiler/native/libTestDirtyInt.c test/compiler/runtime/6778657/Test.java test/compiler/runtime/6826736/Test.java test/compiler/runtime/6859338/Test6859338.java test/compiler/runtime/6863420/Test.java test/compiler/runtime/6865265/StackOverflowBug.java test/compiler/runtime/6891750/Test6891750.java test/compiler/runtime/6892265/Test.java test/compiler/runtime/7088020/Test7088020.java test/compiler/runtime/7141637/SpreadNullArg.java test/compiler/runtime/7196199/Test7196199.java test/compiler/runtime/8010927/Test8010927.java test/compiler/runtime/8015436/Test8015436.java test/compiler/uncommontrap/8009761/Test8009761.java
diffstat 1289 files changed, 176878 insertions(+), 170827 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Jul 13 15:19:34 2016 +0100
+++ b/.hgtags	Thu Jul 14 15:18:15 2016 +0100
@@ -527,3 +527,5 @@
 af6b4ad908e732d23021f12e8322b204433d5cf6 jdk-9+122
 75f81e1fecfb444f34f357295fe06af60e2762d9 jdk-9+123
 479631362b4930be985245ea063d87d821a472eb jdk-9+124
+bb640b49741af3f57f9994129934c46fc173219f jdk-9+125
+adc8c84b7cf8c540d920182f78a2bc982366432a jdk-9+126
--- a/make/gensrc/GensrcDtrace.gmk	Wed Jul 13 15:19:34 2016 +0100
+++ b/make/gensrc/GensrcDtrace.gmk	Thu Jul 14 15:18:15 2016 +0100
@@ -45,7 +45,8 @@
   $(DTRACE_GENSRC_DIR)/%.h: $(DTRACE_SOURCE_DIR)/%.d
 	$(call LogInfo, Generating dtrace header file $(@F))
 	$(call MakeDir, $(@D) $(DTRACE_SUPPORT_DIR))
-	$(call ExecuteWithLog, $(DTRACE_SUPPORT_DIR)/$(@F).d, $(CC) -E $(DTRACE_CPP_FLAGS) $< > $(DTRACE_SUPPORT_DIR)/$(@F).d)
+	$(call ExecuteWithLog, $(DTRACE_SUPPORT_DIR)/$(@F).d, \
+	    ( $(CC) -E $(DTRACE_CPP_FLAGS) $< > $(DTRACE_SUPPORT_DIR)/$(@F).d ) )
 	$(call ExecuteWithLog, $@, $(DTRACE) $(DTRACE_FLAGS) -h -o $@ -s $(DTRACE_SUPPORT_DIR)/$(@F).d)
 
   # Process all .d files in DTRACE_SOURCE_DIR. They are:
--- a/make/lib/CompileDtracePostJvm.gmk	Wed Jul 13 15:19:34 2016 +0100
+++ b/make/lib/CompileDtracePostJvm.gmk	Thu Jul 14 15:18:15 2016 +0100
@@ -68,7 +68,7 @@
       $1: $$(BUILD_DTRACE_GEN_OFFSETS)
 	$$(call LogInfo, Generating dtrace $2 file $$(@F))
 	$$(call MakeDir, $$(@D))
-	$$(call ExecuteWithLog, $$@, $$(DTRACE_GEN_OFFSETS_TOOL) -$$(strip $2) > $$@)
+	$$(call ExecuteWithLog, $$@, ( $$(DTRACE_GEN_OFFSETS_TOOL) -$$(strip $2) > $$@ ) )
 
       TARGETS += $1
     endef
--- a/make/test/JtregNative.gmk	Wed Jul 13 15:19:34 2016 +0100
+++ b/make/test/JtregNative.gmk	Thu Jul 14 15:18:15 2016 +0100
@@ -50,8 +50,9 @@
     $(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
     $(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
     $(HOTSPOT_TOPDIR)/test/compiler/calls \
-    $(HOTSPOT_TOPDIR)/test/compiler/native \
+    $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
     $(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
+    $(HOTSPOT_TOPDIR)/test/compiler/jvmci/jdk.vm.ci.code.test \
     #
 
 # Add conditional directories here when needed.
@@ -64,6 +65,7 @@
 ifeq ($(TOOLCHAIN_TYPE), solstudio)
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_liboverflow := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libSimpleClassFileLoadHook := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libGetNamedModuleTest := -lc
 endif
 
 BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -2434,7 +2434,7 @@
   __ ldrsb(r0, field);
   __ push(ztos);
   // Rewrite bytecode to be faster
-  if (!is_static) {
+  if (rc == may_rewrite) {
     // use btos rewriting, no truncating to t/f bit is needed for getfield.
     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
   }
@@ -2670,7 +2670,7 @@
     if (!is_static) pop_and_check_object(obj);
     __ andw(r0, r0, 0x1);
     __ strb(r0, field);
-    if (!is_static) {
+    if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
     }
     __ b(Done);
--- a/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -32,7 +32,7 @@
 
 // Indicates whether the C calling conventions require that
 // 32-bit integer argument values are extended to 64 bits.
-const bool CCallingConventionRequiresIntsAsLongs = false;
+const bool CCallingConventionRequiresIntsAsLongs = true;
 
 #define SUPPORTS_NATIVE_CX8
 
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotRegisterConfig.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotRegisterConfig.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,17 +26,41 @@
 import static jdk.vm.ci.meta.Value.ILLEGAL;
 import static jdk.vm.ci.sparc.SPARC.REGISTER_SAFE_AREA_SIZE;
 import static jdk.vm.ci.sparc.SPARC.d0;
+import static jdk.vm.ci.sparc.SPARC.d10;
+import static jdk.vm.ci.sparc.SPARC.d12;
+import static jdk.vm.ci.sparc.SPARC.d14;
+import static jdk.vm.ci.sparc.SPARC.d16;
+import static jdk.vm.ci.sparc.SPARC.d18;
 import static jdk.vm.ci.sparc.SPARC.d2;
+import static jdk.vm.ci.sparc.SPARC.d20;
+import static jdk.vm.ci.sparc.SPARC.d22;
+import static jdk.vm.ci.sparc.SPARC.d24;
+import static jdk.vm.ci.sparc.SPARC.d26;
+import static jdk.vm.ci.sparc.SPARC.d28;
+import static jdk.vm.ci.sparc.SPARC.d30;
 import static jdk.vm.ci.sparc.SPARC.d4;
 import static jdk.vm.ci.sparc.SPARC.d6;
+import static jdk.vm.ci.sparc.SPARC.d8;
 import static jdk.vm.ci.sparc.SPARC.f0;
 import static jdk.vm.ci.sparc.SPARC.f1;
+import static jdk.vm.ci.sparc.SPARC.f11;
+import static jdk.vm.ci.sparc.SPARC.f13;
+import static jdk.vm.ci.sparc.SPARC.f15;
+import static jdk.vm.ci.sparc.SPARC.f17;
+import static jdk.vm.ci.sparc.SPARC.f19;
 import static jdk.vm.ci.sparc.SPARC.f2;
+import static jdk.vm.ci.sparc.SPARC.f21;
+import static jdk.vm.ci.sparc.SPARC.f23;
+import static jdk.vm.ci.sparc.SPARC.f25;
+import static jdk.vm.ci.sparc.SPARC.f27;
+import static jdk.vm.ci.sparc.SPARC.f29;
 import static jdk.vm.ci.sparc.SPARC.f3;
+import static jdk.vm.ci.sparc.SPARC.f31;
 import static jdk.vm.ci.sparc.SPARC.f4;
 import static jdk.vm.ci.sparc.SPARC.f5;
 import static jdk.vm.ci.sparc.SPARC.f6;
 import static jdk.vm.ci.sparc.SPARC.f7;
+import static jdk.vm.ci.sparc.SPARC.f9;
 import static jdk.vm.ci.sparc.SPARC.g0;
 import static jdk.vm.ci.sparc.SPARC.g2;
 import static jdk.vm.ci.sparc.SPARC.g6;
@@ -95,11 +119,6 @@
 
     private final RegisterAttributes[] attributesMap;
 
-    /**
-     * Does native code (C++ code) spill arguments in registers to the parent frame?
-     */
-    private final boolean addNativeRegisterArgumentSlots;
-
     @Override
     public RegisterArray getAllocatableRegisters() {
         return allocatable;
@@ -124,10 +143,18 @@
     private final RegisterArray cpuCallerParameterRegisters = new RegisterArray(o0, o1, o2, o3, o4, o5);
     private final RegisterArray cpuCalleeParameterRegisters = new RegisterArray(i0, i1, i2, i3, i4, i5);
 
-    private final RegisterArray fpuFloatParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
-    private final RegisterArray fpuDoubleParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
+    private final RegisterArray fpuFloatJavaParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
+    private final RegisterArray fpuDoubleJavaParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
 
     // @formatter:off
+    private final RegisterArray fpuFloatNativeParameterRegisters = new RegisterArray(
+                    f1,   f3,  f5,  f7,  f9, f11, f13, f15,
+                    f17, f19, f21, f23, f25, f27, f29, f31);
+
+    private final RegisterArray fpuDoubleNativeParameterRegisters = new RegisterArray(
+                     d0,  d2,  d4,  d6,  d8, d10, d12, d14,
+                    d16, d18, d20, d22, d24, d26, d28, d30);
+
     private final RegisterArray callerSaveRegisters;
 
     /**
@@ -170,7 +197,6 @@
     public SPARCHotSpotRegisterConfig(TargetDescription target, RegisterArray allocatable) {
         this.target = target;
         this.allocatable = allocatable;
-        this.addNativeRegisterArgumentSlots = false;
         HashSet<Register> callerSaveSet = new HashSet<>(target.arch.getAvailableValueRegisters().asList());
         for (Register cs : windowSaveRegisters) {
             callerSaveSet.remove(cs);
@@ -220,7 +246,7 @@
                 return hotspotType == HotSpotCallingConventionType.JavaCallee ? cpuCalleeParameterRegisters : cpuCallerParameterRegisters;
             case Double:
             case Float:
-                return fpuFloatParameterRegisters;
+                return fpuFloatJavaParameterRegisters;
             default:
                 throw JVMCIError.shouldNotReachHere("Unknown JavaKind " + kind);
         }
@@ -233,48 +259,77 @@
         int currentGeneral = 0;
         int currentFloating = 0;
         int currentStackOffset = 0;
+        boolean isNative = type == HotSpotCallingConventionType.NativeCall;
 
         for (int i = 0; i < parameterTypes.length; i++) {
             final JavaKind kind = parameterTypes[i].getJavaKind().getStackKind();
-
-            switch (kind) {
-                case Byte:
-                case Boolean:
-                case Short:
-                case Char:
-                case Int:
-                case Long:
-                case Object:
-                    if (currentGeneral < generalParameterRegisters.size()) {
-                        Register register = generalParameterRegisters.get(currentGeneral++);
-                        locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
-                    }
-                    break;
-                case Double:
-                    if (currentFloating < fpuFloatParameterRegisters.size()) {
-                        if (currentFloating % 2 != 0) {
-                            // Make register number even to be a double reg
-                            currentFloating++;
+            if (isNative) {
+                RegisterArray registerSet;
+                switch (kind) {
+                    case Byte:
+                    case Boolean:
+                    case Short:
+                    case Char:
+                    case Int:
+                    case Long:
+                    case Object:
+                        registerSet = generalParameterRegisters;
+                        break;
+                    case Double:
+                        registerSet = fpuDoubleNativeParameterRegisters;
+                        break;
+                    case Float:
+                        registerSet = fpuFloatNativeParameterRegisters;
+                        break;
+                    default:
+                        throw JVMCIError.shouldNotReachHere();
+                }
+                if (i < registerSet.size()) {
+                    locations[i] = registerSet.get(i).asValue(valueKindFactory.getValueKind(kind));
+                    currentStackOffset += target.arch.getWordSize();
+                }
+            } else {
+                switch (kind) {
+                    case Byte:
+                    case Boolean:
+                    case Short:
+                    case Char:
+                    case Int:
+                    case Long:
+                    case Object:
+                        if (currentGeneral < generalParameterRegisters.size()) {
+                            Register register = generalParameterRegisters.get(currentGeneral++);
+                            locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
                         }
-                        Register register = fpuDoubleParameterRegisters.get(currentFloating);
-                        currentFloating += 2; // Only every second is a double register
-                        locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
-                    }
-                    break;
-                case Float:
-                    if (currentFloating < fpuFloatParameterRegisters.size()) {
-                        Register register = fpuFloatParameterRegisters.get(currentFloating++);
-                        locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
-                    }
-                    break;
-                default:
-                    throw JVMCIError.shouldNotReachHere();
+                        break;
+                    case Double:
+                        if (currentFloating < fpuFloatJavaParameterRegisters.size()) {
+                            if (currentFloating % 2 != 0) {
+                                // Make register number even to be a double reg
+                                currentFloating++;
+                            }
+                            Register register = fpuDoubleJavaParameterRegisters.get(currentFloating);
+                            currentFloating += 2; // Only every second is a double register
+                            locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
+                        }
+                        break;
+                    case Float:
+                        if (currentFloating < fpuFloatJavaParameterRegisters.size()) {
+                            Register register = fpuFloatJavaParameterRegisters.get(currentFloating++);
+                            locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
+                        }
+                        break;
+                    default:
+                        throw JVMCIError.shouldNotReachHere();
+                }
             }
 
             if (locations[i] == null) {
                 ValueKind<?> valueKind = valueKindFactory.getValueKind(kind);
-                // Stack slot is always aligned to its size in bytes but minimum wordsize
                 int typeSize = valueKind.getPlatformKind().getSizeInBytes();
+                if (isNative) {
+                    currentStackOffset += target.arch.getWordSize() - typeSize;
+                }
                 currentStackOffset = roundUp(currentStackOffset, typeSize);
                 int slotOffset = currentStackOffset + REGISTER_SAFE_AREA_SIZE;
                 locations[i] = StackSlot.get(valueKind, slotOffset, !type.out);
@@ -284,15 +339,7 @@
 
         JavaKind returnKind = returnType == null ? Void : returnType.getJavaKind();
         AllocatableValue returnLocation = returnKind == Void ? ILLEGAL : getReturnRegister(returnKind, type).asValue(valueKindFactory.getValueKind(returnKind.getStackKind()));
-
-        int outArgSpillArea;
-        if (type == HotSpotCallingConventionType.NativeCall && addNativeRegisterArgumentSlots) {
-            // Space for native callee which may spill our outgoing arguments
-            outArgSpillArea = Math.min(locations.length, generalParameterRegisters.size()) * target.wordSize;
-        } else {
-            outArgSpillArea = 0;
-        }
-        return new CallingConvention(currentStackOffset + outArgSpillArea, returnLocation, locations);
+        return new CallingConvention(currentStackOffset, returnLocation, locations);
     }
 
     private static int roundUp(int number, int mod) {
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCICompilerConfig.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCICompilerConfig.java	Thu Jul 14 15:18:15 2016 +0100
@@ -32,6 +32,11 @@
 
 final class HotSpotJVMCICompilerConfig {
 
+    /**
+     * This factory allows JVMCI initialization to succeed but raises an error if the VM asks JVMCI
+     * to perform a compilation. This allows the reflective parts of the JVMCI API to be used
+     * without requiring a compiler implementation to be available.
+     */
     private static class DummyCompilerFactory extends JVMCICompilerFactory implements JVMCICompiler {
 
         public HotSpotCompilationRequestResult compileMethod(CompilationRequest request) {
@@ -67,7 +72,6 @@
                 for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
                     if (f.getCompilerName().equals(compilerName)) {
                         Services.exportJVMCITo(f.getClass());
-                        f.onSelection();
                         factory = f;
                     }
                 }
@@ -75,8 +79,21 @@
                     throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
                 }
             } else {
-                factory = new DummyCompilerFactory();
+                // Auto select a single available compiler
+                for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
+                    if (factory == null) {
+                        factory = f;
+                    } else {
+                        // Multiple factories seen - cancel auto selection
+                        factory = null;
+                        break;
+                    }
+                }
+                if (factory == null) {
+                    factory = new DummyCompilerFactory();
+                }
             }
+            factory.onSelection();
             compilerFactory = factory;
         }
         return compilerFactory;
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodHandleAccessProvider.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodHandleAccessProvider.java	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,10 +28,12 @@
 import jdk.vm.ci.common.JVMCIError;
 import jdk.vm.ci.meta.ConstantReflectionProvider;
 import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.JavaKind;
 import jdk.vm.ci.meta.MethodHandleAccessProvider;
 import jdk.vm.ci.meta.ResolvedJavaField;
 import jdk.vm.ci.meta.ResolvedJavaMethod;
 import jdk.vm.ci.meta.ResolvedJavaType;
+import jdk.vm.ci.meta.Signature;
 
 public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProvider {
 
@@ -51,46 +53,80 @@
         static final ResolvedJavaMethod lambdaFormCompileToBytecodeMethod;
         static final HotSpotResolvedJavaField memberNameVmtargetField;
 
+        static final ResolvedJavaType CLASS = fromObjectClass(LazyInitialization.class);
+
         /**
          * Search for an instance field with the given name in a class.
          *
          * @param className name of the class to search in
          * @param fieldName name of the field to be searched
-         * @return resolved java field
+         * @param fieldType resolved Java type of the field
+         * @return resolved Java field
          * @throws ClassNotFoundException
+         * @throws NoSuchFieldError
          */
-        private static ResolvedJavaField findFieldInClass(String className, String fieldName) throws ClassNotFoundException {
+        private static ResolvedJavaField findFieldInClass(String className, String fieldName, ResolvedJavaType fieldType)
+                throws ClassNotFoundException {
             Class<?> clazz = Class.forName(className);
             ResolvedJavaType type = runtime().fromClass(clazz);
             ResolvedJavaField[] fields = type.getInstanceFields(false);
             for (ResolvedJavaField field : fields) {
-                if (field.getName().equals(fieldName)) {
+                if (field.getName().equals(fieldName) && field.getType().equals(fieldType)) {
                     return field;
                 }
             }
-            return null;
+            throw new NoSuchFieldError(fieldType.getName() + " " + className + "." + fieldName);
         }
 
-        private static ResolvedJavaMethod findMethodInClass(String className, String methodName) throws ClassNotFoundException {
+        private static ResolvedJavaMethod findMethodInClass(String className, String methodName,
+                ResolvedJavaType resultType, ResolvedJavaType[] parameterTypes) throws ClassNotFoundException {
             Class<?> clazz = Class.forName(className);
             HotSpotResolvedObjectTypeImpl type = fromObjectClass(clazz);
             ResolvedJavaMethod result = null;
             for (ResolvedJavaMethod method : type.getDeclaredMethods()) {
-                if (method.getName().equals(methodName)) {
-                    assert result == null : "more than one method found: " + className + "." + methodName;
+                if (method.getName().equals(methodName) && signatureMatches(method, resultType, parameterTypes)) {
                     result = method;
                 }
             }
-            assert result != null : "method not found: " + className + "." + methodName;
+            if (result == null) {
+                StringBuilder sig = new StringBuilder("(");
+                for (ResolvedJavaType t : parameterTypes) {
+                    sig.append(t.getName()).append(",");
+                }
+                if (sig.length() > 1) {
+                    sig.replace(sig.length() - 1, sig.length(), ")");
+                } else {
+                    sig.append(')');
+                }
+                throw new NoSuchMethodError(resultType.getName() + " " + className + "." + methodName + sig.toString());
+            }
             return result;
         }
 
+        private static boolean signatureMatches(ResolvedJavaMethod m, ResolvedJavaType resultType,
+                ResolvedJavaType[] parameterTypes) {
+            Signature s = m.getSignature();
+            if (!s.getReturnType(CLASS).equals(resultType)) {
+                return false;
+            }
+            for (int i = 0; i < s.getParameterCount(false); ++i) {
+                if (!s.getParameterType(i, CLASS).equals(parameterTypes[i])) {
+                    return false;
+                }
+            }
+            return true;
+        }
+
         static {
             try {
-                methodHandleFormField = findFieldInClass("java.lang.invoke.MethodHandle", "form");
-                lambdaFormVmentryField = findFieldInClass("java.lang.invoke.LambdaForm", "vmentry");
-                lambdaFormCompileToBytecodeMethod = findMethodInClass("java.lang.invoke.LambdaForm", "compileToBytecode");
-                memberNameVmtargetField = (HotSpotResolvedJavaField) findFieldInClass("java.lang.invoke.MemberName", "vmtarget");
+                methodHandleFormField = findFieldInClass("java.lang.invoke.MethodHandle", "form",
+                    fromObjectClass(Class.forName("java.lang.invoke.LambdaForm")));
+                lambdaFormVmentryField = findFieldInClass("java.lang.invoke.LambdaForm", "vmentry",
+                    fromObjectClass(Class.forName("java.lang.invoke.MemberName")));
+                lambdaFormCompileToBytecodeMethod = findMethodInClass("java.lang.invoke.LambdaForm", "compileToBytecode",
+                    new HotSpotResolvedPrimitiveType(JavaKind.Void), new ResolvedJavaType[]{});
+                memberNameVmtargetField = (HotSpotResolvedJavaField) findFieldInClass("java.lang.invoke.MemberName", "vmtarget",
+                    new HotSpotResolvedPrimitiveType(JavaKind.Long));
             } catch (Throwable ex) {
                 throw new JVMCIError(ex);
             }
@@ -134,14 +170,12 @@
             return null;
         }
 
-        JavaConstant memberName;
         if (forceBytecodeGeneration) {
             /* Invoke non-public method: MemberName LambdaForm.compileToBytecode() */
-            memberName = LazyInitialization.lambdaFormCompileToBytecodeMethod.invoke(lambdaForm, new JavaConstant[0]);
-        } else {
-            /* Load non-public field: MemberName LambdaForm.vmentry */
-            memberName = constantReflection.readFieldValue(LazyInitialization.lambdaFormVmentryField, lambdaForm);
+            LazyInitialization.lambdaFormCompileToBytecodeMethod.invoke(lambdaForm, new JavaConstant[0]);
         }
+        /* Load non-public field: MemberName LambdaForm.vmentry */
+        JavaConstant memberName = constantReflection.readFieldValue(LazyInitialization.lambdaFormVmentryField, lambdaForm);
         return getTargetMethod(memberName);
     }
 
@@ -163,3 +197,4 @@
         return compilerToVM().getResolvedJavaMethod(object, LazyInitialization.memberNameVmtargetField.offset());
     }
 }
+
--- a/src/os/aix/vm/libo4.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/aix/vm/libo4.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,17 +22,49 @@
  *
  */
 
-// This is only a stub. Will flesh out later when/if we add further support
-// for PASE.
-
 #include "libo4.hpp"
 
-bool libo4::init() { return false; }
-void libo4::cleanup() {}
-bool libo4::get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
-  unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free) {
+// global variables
+
+// whether initialization worked
+static bool g_initialized = false;
+
+//////////////////////////
+//  class libo4 - impl  //
+//////////////////////////
+
+bool libo4::init() {
+  if (g_initialized) {
+    return true;
+  }
   return false;
 }
-bool libo4::get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15) { return false; }
-bool libo4::realpath (const char* file_name, char* resolved_name, int resolved_name_len) { return false; }
 
+void libo4::cleanup() {
+  if (g_initialized) {
+    g_initialized = false;
+  }
+}
+
+bool libo4::get_memory_info(unsigned long long* p_virt_total,
+                            unsigned long long* p_real_total,
+                            unsigned long long* p_real_free,
+                            unsigned long long* p_pgsp_total,
+                            unsigned long long* p_pgsp_free) {
+  return false;
+}
+
+bool libo4::get_load_avg(double* p_avg1, double* p_avg5, double* p_avg15) {
+  return false;
+}
+
+bool libo4::realpath(const char* file_name, char* resolved_name,
+                     int resolved_name_len) {
+  return false;
+}
+
+bool libo4::removeEscapeMessageFromJoblogByContext(const void* context) {
+  // Note: no tracing here! We run in signal handling context
+
+  return false;
+}
--- a/src/os/aix/vm/libo4.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/aix/vm/libo4.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,56 +22,69 @@
  *
  */
 
-// A C++ wrapper around the libo4 porting library. The libo4 porting library
-// is a set of bridge functions into native AS/400 functionality.
+// Class libo4 is a C++ wrapper around the libo4 porting library. It handles
+// basic stuff like dynamic loading, library initialization etc.
+// The libo4 porting library is a set of functions that bridge from the AIX
+// runtime environment on OS/400 (aka PASE layer) into native OS/400
+// functionality (aka ILE layer) to close some functional gaps that exist in
+// the PASE layer.
 
 #ifndef OS_AIX_VM_LIBO4_HPP
 #define OS_AIX_VM_LIBO4_HPP
 
-
 class libo4 {
 public:
-
   // Initialize the libo4 porting library.
   // Returns true if succeeded, false if error.
   static bool init();
 
-  // cleanup of the libo4 porting library.
+  // Triggers cleanup of the libo4 porting library.
   static void cleanup();
 
-  // returns a number of memory statistics from the
-  // AS/400.
+  // Returns a number of memory statistics from OS/400.
+  //
+  // See libo4.h for details on this API.
   //
   // Specify NULL for numbers you are not interested in.
   //
-  // returns false if an error happened. Activate OsMisc trace for
+  // Returns false if an error happened. Activate OsMisc trace for
   // trace output.
   //
-  static bool get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
-    unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free);
+  static bool get_memory_info(unsigned long long* p_virt_total,
+                              unsigned long long* p_real_total,
+                              unsigned long long* p_real_free,
+                              unsigned long long* p_pgsp_total,
+                              unsigned long long* p_pgsp_free);
 
-  // returns information about system load
+  // Returns information about system load
   // (similar to "loadavg()" under other Unices)
   //
+  // See libo4.h for details on this API.
+  //
   // Specify NULL for numbers you are not interested in.
   //
-  // returns false if an error happened. Activate OsMisc trace for
+  // Returns false if an error happened. Activate OsMisc trace for
   // trace output.
   //
-  static bool get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15);
+  static bool get_load_avg(double* p_avg1, double* p_avg5, double* p_avg15);
 
-  // this is a replacement for the "realpath()" API which does not really work
-  // on PASE
+  // This is a replacement for the "realpath()" API which does not really work
+  // in PASE together with the (case insensitive but case preserving)
+  // filesystem on OS/400.
   //
-  // Specify NULL for numbers you are not interested in.
+  // See libo4.h for details on this API.
   //
-  // returns false if an error happened. Activate OsMisc trace for
+  // Returns false if an error happened. Activate OsMisc trace for
   // trace output.
   //
-  static bool realpath (const char* file_name,
-      char* resolved_name, int resolved_name_len);
+  static bool realpath(const char* file_name, char* resolved_name,
+                       int resolved_name_len);
 
+  // Call libo4_RemoveEscapeMessageFromJoblogByContext API to remove messages
+  // from the OS/400 job log.
+  //
+  // See libo4.h for details on this API.
+  static bool removeEscapeMessageFromJoblogByContext(const void* context);
 };
 
 #endif // OS_AIX_VM_LIBO4_HPP
-
--- a/src/os/aix/vm/libperfstat_aix.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/aix/vm/libperfstat_aix.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -180,10 +180,12 @@
   memset (&psct, '\0', sizeof(psct));
 
   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(PERFSTAT_CPU_TOTAL_T_LATEST), 1)) {
-    if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
-      if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
+    if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_71), 1)) {
+      if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
+        if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
           trcVerbose("perfstat_cpu_total() failed (errno=%d)", errno);
           return false;
+        }
       }
     }
   }
--- a/src/os/aix/vm/libperfstat_aix.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/aix/vm/libperfstat_aix.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -337,10 +337,109 @@
   int spurrflag;                        /* set if running in spurr mode */
   u_longlong_t  version;                /* version number (1, 2, etc.,) */
 /*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
-#define CURR_VERSION_CPU_TOTAL 1              /* Incremented by one for every new release *
+/* #define CURR_VERSION_CPU_TOTAL 1              Incremented by one for every new release *
                                                * of perfstat_cpu_total_t data structure   */
 } perfstat_cpu_total_t_71;
 
+typedef struct { /* global cpu information AIX 7.2  / 6.1 TL6 (see oslevel -r) */
+  int ncpus;                /* number of active logical processors */
+  int ncpus_cfg;             /* number of configured processors */
+  char description[IDENTIFIER_LENGTH]; /* processor description (type/official name) */
+  u_longlong_t processorHZ; /* processor speed in Hz */
+  u_longlong_t user;        /*  raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;         /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;        /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;        /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;     /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;     /* number of system calls executed */
+  u_longlong_t sysread;     /* number of read system calls executed */
+  u_longlong_t syswrite;    /* number of write system calls executed */
+  u_longlong_t sysfork;     /* number of forks system calls executed */
+  u_longlong_t sysexec;     /* number of execs system calls executed */
+  u_longlong_t readch;      /* number of characters tranferred with read system call */
+  u_longlong_t writech;     /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;    /* number of device interrupts */
+  u_longlong_t softintrs;   /* number of software interrupts */
+  time_t lbolt;             /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];  /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.    */
+                            /* To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;      /* length of the run queue (processes ready) */
+  u_longlong_t swpque;      /* ength of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;       /* number of blocks read */
+  u_longlong_t bwrite;      /* number of blocks written */
+  u_longlong_t lread;       /* number of logical read requests */
+  u_longlong_t lwrite;      /* number of logical write requests */
+  u_longlong_t phread;      /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;     /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;      /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                             * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;      /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                             * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;        /* number of inode lookups */
+  u_longlong_t namei;       /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;      /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;         /* number of IPC message operations */
+  u_longlong_t sema;        /* number of IPC semaphore operations */
+  u_longlong_t rcvint;      /* number of tty receive interrupts */
+  u_longlong_t xmtint;      /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;      /* number of modem interrupts */
+  u_longlong_t tty_rawinch; /* number of raw input characters  */
+  u_longlong_t tty_caninch; /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;/* number of raw output characters */
+  u_longlong_t ksched;      /* number of kernel processes created */
+  u_longlong_t koverf;      /* kernel process creation attempts where:
+                             * -the user has forked to their maximum limit
+                             * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;       /* number of kernel processes that became zombies */
+  u_longlong_t rbread;      /* number of remote read requests */
+  u_longlong_t rcread;      /* number of cached remote reads */
+  u_longlong_t rbwrt;       /* number of remote writes */
+  u_longlong_t rcwrt;       /* number of cached remote writes */
+  u_longlong_t traps;       /* number of traps */
+  int ncpus_high;           /* index of highest processor online */
+  u_longlong_t puser;       /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;        /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;       /* raw number of physical processor tics idle */
+  u_longlong_t pwait;       /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;   /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;   /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;   /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;  /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr; /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;/* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr; /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;/* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;  /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr; /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;  /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr; /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;             /* number of processes that are asleep waiting for buffered I/O */
+  short physio;             /* number of processes waiting for raw I/O */
+  longlong_t twait;         /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;         /* number of hypervisor page-ins */
+  u_longlong_t hpit;        /* Time spent in hypervisor page-ins (in nanoseconds) */
+  u_longlong_t puser_spurr; /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;  /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr; /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr; /* number of spurr cycles spent in wait mode */
+  int spurrflag;            /* set if running in spurr mode */
+  u_longlong_t  version;    /* version number (1, 2, etc.,) */
+  u_longlong_t tb_last;     /*time base counter */
+  u_longlong_t purr_coalescing;   /* If the calling partition is
+                                   * authorized to see pool wide statistics then
+                                   * PURR cycles consumed to coalesce data
+                                   * else set to zero.*/
+  u_longlong_t spurr_coalescing;  /* If the calling partition is
+                                   * authorized to see pool wide statistics then
+                                   * SPURR cycles consumed to coalesce data
+                                   * else set to zero.*/
+
+/*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_CPU_TOTAL 2 /* Incremented by one for every new release *
+                                  * of perfstat_cpu_total_t data structure   */
+} perfstat_cpu_total_t_72;
+
+
 typedef union {
   uint    w;
   struct {
@@ -756,7 +855,7 @@
 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 #define PERFSTAT_PARTITON_TOTAL_T_LATEST perfstat_partition_total_t_71_1/* latest perfstat_partition_total_t structure */
-#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_71             /* latest perfstat_cpu_total_t structure */
+#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_72             /* latest perfstat_cpu_total_t structure */
 #define PERFSTAT_WPAR_TOTAL_T_LATEST perfstat_wpar_total_t_71           /* latest perfstat_wpar_total_t structure */
 
 class libperfstat {
--- a/src/os/linux/vm/os_linux.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1742,11 +1742,11 @@
   }
 
   typedef struct {
-    Elf32_Half  code;         // Actual value as defined in elf.h
-    Elf32_Half  compat_class; // Compatibility of archs at VM's sense
-    char        elf_class;    // 32 or 64 bit
-    char        endianess;    // MSB or LSB
-    char*       name;         // String representation
+    Elf32_Half    code;         // Actual value as defined in elf.h
+    Elf32_Half    compat_class; // Compatibility of archs at VM's sense
+    unsigned char elf_class;    // 32 or 64 bit
+    unsigned char endianess;    // MSB or LSB
+    char*         name;         // String representation
   } arch_t;
 
 #ifndef EM_486
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1320,36 +1320,8 @@
 }
 
 bool os::supports_vtime() { return true; }
-
-bool os::enable_vtime() {
-  int fd = ::open("/proc/self/ctl", O_WRONLY);
-  if (fd == -1) {
-    return false;
-  }
-
-  long cmd[] = { PCSET, PR_MSACCT };
-  int res = ::write(fd, cmd, sizeof(long) * 2);
-  ::close(fd);
-  if (res != sizeof(long) * 2) {
-    return false;
-  }
-  return true;
-}
-
-bool os::vtime_enabled() {
-  int fd = ::open("/proc/self/status", O_RDONLY);
-  if (fd == -1) {
-    return false;
-  }
-
-  pstatus_t status;
-  int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
-  ::close(fd);
-  if (res != sizeof(pstatus_t)) {
-    return false;
-  }
-  return status.pr_flags & PR_MSACCT;
-}
+bool os::enable_vtime() { return false; }
+bool os::vtime_enabled() { return false; }
 
 double os::elapsedVTime() {
   return (double)gethrvtime() / (double)hrtime_hz;
--- a/src/os/windows/vm/os_windows.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -5250,6 +5250,12 @@
 
 static int mallocDebugIntervalCounter = 0;
 static int mallocDebugCounter = 0;
+
+// For debugging possible bugs inside HeapWalk (a ring buffer)
+#define SAVE_COUNT 8
+static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
+static int saved_heap_entry_index;
+
 bool os::check_heap(bool force) {
   if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
   if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
@@ -5270,13 +5276,28 @@
     if (HeapLock(heap) != 0) {
       PROCESS_HEAP_ENTRY phe;
       phe.lpData = NULL;
+      memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
+      saved_heap_entry_index = 0;
+      int count = 0;
+
       while (HeapWalk(heap, &phe) != 0) {
+        count ++;
         if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
             !HeapValidate(heap, 0, phe.lpData)) {
           tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
-          tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
+          tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
           HeapUnlock(heap);
           fatal("corrupted C heap");
+        } else {
+          // Save previous seen entries in a ring buffer. We have seen strange
+          // heap corruption fatal errors that produced mdmp files, but when we load
+          // these mdmp files in WinDBG, "!heap -triage" shows no error.
+          // We can examine the saved_heap_entries[] array in the mdmp file to
+          // diagnose such seemingly spurious errors reported by HeapWalk.
+          saved_heap_entries[saved_heap_entry_index++] = phe;
+          if (saved_heap_entry_index >= SAVE_COUNT) {
+            saved_heap_entry_index = 0;
+          }
         }
       }
       DWORD err = GetLastError();
--- a/src/share/vm/ci/ciEnv.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -204,11 +204,13 @@
 }
 
 ciEnv::~ciEnv() {
-  CompilerThread* current_thread = CompilerThread::current();
-  _factory->remove_symbols();
-  // Need safepoint to clear the env on the thread.  RedefineClasses might
-  // be reading it.
-  GUARDED_VM_ENTRY(current_thread->set_env(NULL);)
+  GUARDED_VM_ENTRY(
+      CompilerThread* current_thread = CompilerThread::current();
+      _factory->remove_symbols();
+      // Need safepoint to clear the env on the thread.  RedefineClasses might
+      // be reading it.
+      current_thread->set_env(NULL);
+  )
 }
 
 // ------------------------------------------------------------------
--- a/src/share/vm/ci/ciReplay.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/ci/ciReplay.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -490,7 +490,8 @@
     int comp_level = parse_int(comp_level_label);
     // old version w/o comp_level
     if (had_error() && (error_message() == comp_level_label)) {
-      comp_level = CompLevel_full_optimization;
+      // use highest available tier
+      comp_level = TieredCompilation ? TieredStopAtLevel : CompLevel_highest_tier;
     }
     if (!is_valid_comp_level(comp_level)) {
       return;
--- a/src/share/vm/classfile/altHashing.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/altHashing.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -224,7 +224,7 @@
 static const jbyte THREE_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82};
 static const jbyte FOUR_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83};
 static const jchar TWO_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382};
-static const jint ONE_INT[] = { 0x83828180};
+static const jint ONE_INT[] = { (jint)0x83828180};
 static const jbyte SIX_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85};
 static const jchar THREE_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382, (jchar) 0x8584};
 static const jbyte EIGHT_BYTE[] = {
@@ -235,7 +235,7 @@
   (jchar) 0x8180, (jchar) 0x8382,
   (jchar) 0x8584, (jchar) 0x8786};
 
-static const jint TWO_INT[] = { 0x83828180, 0x87868584};
+static const jint TWO_INT[] = { (jint)0x83828180, (jint)0x87868584};
 
 static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
 
--- a/src/share/vm/classfile/classLoaderData.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/classLoaderData.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -142,7 +142,9 @@
 
   f->do_oop(&_class_loader);
   _dependencies.oops_do(f);
-  _handles->oops_do(f);
+  if (_handles != NULL) {
+    _handles->oops_do(f);
+  }
   if (klass_closure != NULL) {
     classes_do(klass_closure);
   }
@@ -501,13 +503,26 @@
   }
 }
 
-/**
- * Returns true if this class loader data is for the platform class loader.
- */
+// Returns true if this class loader data is for the system class loader.
+bool ClassLoaderData::is_system_class_loader_data() const {
+  return SystemDictionary::is_system_class_loader(class_loader());
+}
+
+// Returns true if this class loader data is for the platform class loader.
 bool ClassLoaderData::is_platform_class_loader_data() const {
   return SystemDictionary::is_platform_class_loader(class_loader());
 }
 
+// Returns true if this class loader data is one of the 3 builtin
+// (boot, application/system or platform) class loaders. Note, the
+// builtin loaders are not freed by a GC.
+bool ClassLoaderData::is_builtin_class_loader_data() const {
+  Handle classLoaderHandle = class_loader();
+  return (is_the_null_class_loader_data() ||
+          SystemDictionary::is_system_class_loader(classLoaderHandle) ||
+          SystemDictionary::is_platform_class_loader(classLoaderHandle));
+}
+
 Metaspace* ClassLoaderData::metaspace_non_null() {
   assert(!DumpSharedSpaces, "wrong metaspace!");
   // If the metaspace has not been allocated, create a new one.  Might want
@@ -957,12 +972,6 @@
   data = _head;
   while (data != NULL) {
     if (data->is_alive(is_alive_closure)) {
-      if (data->packages_defined()) {
-        data->packages()->purge_all_package_exports();
-      }
-      if (data->modules_defined()) {
-        data->modules()->purge_all_module_reads();
-      }
       // clean metaspace
       if (walk_all_metadata) {
         data->classes_do(InstanceKlass::purge_previous_versions);
@@ -990,6 +999,23 @@
   }
 
   if (seen_dead_loader) {
+    // Walk a ModuleEntry's reads and a PackageEntry's exports lists
+    // to determine if there are modules on those lists that are now
+    // dead and should be removed.  A module's life cycle is equivalent
+    // to its defining class loader's life cycle.  Since a module is
+    // considered dead if its class loader is dead, these walks must
+    // occur after each class loader's aliveness is determined.
+    data = _head;
+    while (data != NULL) {
+      if (data->packages_defined()) {
+        data->packages()->purge_all_package_exports();
+      }
+      if (data->modules_defined()) {
+        data->modules()->purge_all_module_reads();
+      }
+      data = data->next();
+    }
+
     post_class_unload_events();
   }
 
--- a/src/share/vm/classfile/classLoaderData.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/classLoaderData.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -270,7 +270,9 @@
   bool is_the_null_class_loader_data() const {
     return this == _the_null_class_loader_data;
   }
+  bool is_system_class_loader_data() const;
   bool is_platform_class_loader_data() const;
+  bool is_builtin_class_loader_data() const;
 
   // The Metaspace is created lazily so may be NULL.  This
   // method will allocate a Metaspace if needed.
--- a/src/share/vm/classfile/compactHashtable.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/compactHashtable.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -248,7 +248,7 @@
     } else {
       u4*entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]);
       while (entry < entry_max) {
-        iterator.do_value(_base_address, entry[0]);
+        iterator.do_value(_base_address, entry[1]);
         entry += 2;
       }
     }
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -871,12 +871,17 @@
 
 int  java_lang_Class::oop_size(oop java_class) {
   assert(_oop_size_offset != 0, "must be set");
-  return java_class->int_field(_oop_size_offset);
-}
+  int size = java_class->int_field(_oop_size_offset);
+  assert(size > 0, "Oop size must be greater than zero, not %d", size);
+  return size;
+}
+
 void java_lang_Class::set_oop_size(oop java_class, int size) {
   assert(_oop_size_offset != 0, "must be set");
+  assert(size > 0, "Oop size must be greater than zero, not %d", size);
   java_class->int_field_put(_oop_size_offset, size);
 }
+
 int  java_lang_Class::static_oop_field_count(oop java_class) {
   assert(_static_oop_field_count_offset != 0, "must be set");
   return java_class->int_field(_static_oop_field_count_offset);
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -275,7 +275,6 @@
   static int static_oop_field_count(oop java_class);
   static void set_static_oop_field_count(oop java_class, int size);
 
-
   static GrowableArray<Klass*>* fixup_mirror_list() {
     return _fixup_mirror_list;
   }
--- a/src/share/vm/classfile/moduleEntry.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/moduleEntry.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -40,7 +40,6 @@
 
 ModuleEntry* ModuleEntryTable::_javabase_module = NULL;
 
-
 void ModuleEntry::set_location(Symbol* location) {
   if (_location != NULL) {
     // _location symbol's refcounts are managed by ModuleEntry,
@@ -115,10 +114,35 @@
       // Lazily create a module's reads list
       _reads = new (ResourceObj::C_HEAP, mtModule)GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, true);
     }
+
+    // Determine, based on this newly established read edge to module m,
+    // if this module's read list should be walked at a GC safepoint.
+    set_read_walk_required(m->loader_data());
+
+    // Establish readability to module m
     _reads->append_if_missing(m);
   }
 }
 
+// If the module's loader, that a read edge is being established to, is
+// not the same loader as this module's and is not one of the 3 builtin
+// class loaders, then this module's reads list must be walked at GC
+// safepoint. Modules have the same life cycle as their defining class
+// loaders and should be removed if dead.
+void ModuleEntry::set_read_walk_required(ClassLoaderData* m_loader_data) {
+  assert_locked_or_safepoint(Module_lock);
+  if (!_must_walk_reads &&
+      loader_data() != m_loader_data &&
+      !m_loader_data->is_builtin_class_loader_data()) {
+    _must_walk_reads = true;
+    if (log_is_enabled(Trace, modules)) {
+      ResourceMark rm;
+      log_trace(modules)("ModuleEntry::set_read_walk_required(): module %s reads list must be walked",
+                         (name() != NULL) ? name()->as_C_string() : UNNAMED_MODULE);
+    }
+  }
+}
+
 bool ModuleEntry::has_reads() const {
   assert_locked_or_safepoint(Module_lock);
   return ((_reads != NULL) && !_reads->is_empty());
@@ -127,14 +151,28 @@
 // Purge dead module entries out of reads list.
 void ModuleEntry::purge_reads() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  if (has_reads()) {
+
+  if (_must_walk_reads && has_reads()) {
+    // This module's _must_walk_reads flag will be reset based
+    // on the remaining live modules on the reads list.
+    _must_walk_reads = false;
+
+    if (log_is_enabled(Trace, modules)) {
+      ResourceMark rm;
+      log_trace(modules)("ModuleEntry::purge_reads(): module %s reads list being walked",
+                         (name() != NULL) ? name()->as_C_string() : UNNAMED_MODULE);
+    }
+
     // Go backwards because this removes entries that are dead.
     int len = _reads->length();
     for (int idx = len - 1; idx >= 0; idx--) {
       ModuleEntry* module_idx = _reads->at(idx);
-      ClassLoaderData* cld = module_idx->loader();
-      if (cld->is_unloading()) {
+      ClassLoaderData* cld_idx = module_idx->loader_data();
+      if (cld_idx->is_unloading()) {
         _reads->delete_at(idx);
+      } else {
+        // Update the need to walk this module's reads based on live modules
+        set_read_walk_required(cld_idx);
       }
     }
   }
@@ -248,7 +286,7 @@
     entry->set_module(loader_data->add_handle(module_handle));
   }
 
-  entry->set_loader(loader_data);
+  entry->set_loader_data(loader_data);
   entry->set_version(version);
   entry->set_location(location);
 
@@ -375,11 +413,11 @@
 
 void ModuleEntry::print(outputStream* st) {
   ResourceMark rm;
-  st->print_cr("entry "PTR_FORMAT" name %s module "PTR_FORMAT" loader %s version %s location %s strict %s next "PTR_FORMAT,
+  st->print_cr("entry " PTR_FORMAT " name %s module " PTR_FORMAT " loader %s version %s location %s strict %s next " PTR_FORMAT,
                p2i(this),
                name() == NULL ? UNNAMED_MODULE : name()->as_C_string(),
                p2i(module()),
-               loader()->loader_name(),
+               loader_data()->loader_name(),
                version() != NULL ? version()->as_C_string() : "NULL",
                location() != NULL ? location()->as_C_string() : "NULL",
                BOOL_TO_STR(!can_read_all_unnamed()), p2i(next()));
@@ -401,5 +439,5 @@
 }
 
 void ModuleEntry::verify() {
-  guarantee(loader() != NULL, "A module entry must be associated with a loader.");
+  guarantee(loader_data() != NULL, "A module entry must be associated with a loader.");
 }
--- a/src/share/vm/classfile/moduleEntry.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/moduleEntry.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -43,6 +43,7 @@
 // It contains:
 //   - Symbol* containing the module's name.
 //   - pointer to the java.lang.reflect.Module for this module.
+//   - pointer to the java.security.ProtectionDomain shared by classes defined to this module.
 //   - ClassLoaderData*, class loader of this module.
 //   - a growable array containg other module entries that this module can read.
 //   - a flag indicating if this module can read all unnamed modules.
@@ -54,56 +55,58 @@
   jobject _module;                     // java.lang.reflect.Module
   jobject _pd;                         // java.security.ProtectionDomain, cached
                                        // for shared classes from this module
-  ClassLoaderData* _loader;
+  ClassLoaderData* _loader_data;
   GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
   Symbol* _version;                    // module version number
   Symbol* _location;                   // module location
   bool _can_read_all_unnamed;
   bool _has_default_read_edges;        // JVMTI redefine/retransform support
+  bool _must_walk_reads;               // walk module's reads list at GC safepoints to purge out dead modules
   TRACE_DEFINE_TRACE_ID_FIELD;
   enum {MODULE_READS_SIZE = 101};      // Initial size of list of modules that the module can read.
 
 public:
   void init() {
     _module = NULL;
-    _loader = NULL;
+    _loader_data = NULL;
     _pd = NULL;
     _reads = NULL;
     _version = NULL;
     _location = NULL;
     _can_read_all_unnamed = false;
     _has_default_read_edges = false;
+    _must_walk_reads = false;
   }
 
-  Symbol*            name() const          { return literal(); }
-  void               set_name(Symbol* n)   { set_literal(n); }
+  Symbol*          name() const          { return literal(); }
+  void             set_name(Symbol* n)   { set_literal(n); }
 
-  jobject            module() const        { return _module; }
-  void               set_module(jobject j) { _module = j; }
+  jobject          module() const        { return _module; }
+  void             set_module(jobject j) { _module = j; }
 
   // The shared ProtectionDomain reference is set once the VM loads a shared class
   // originated from the current Module. The referenced ProtectionDomain object is
   // created by the ClassLoader when loading a class (shared or non-shared) from the
   // Module for the first time. This ProtectionDomain object is used for all
   // classes from the Module loaded by the same ClassLoader.
-  Handle             shared_protection_domain();
-  void               set_shared_protection_domain(ClassLoaderData *loader_data,
-                                                  Handle pd);
+  Handle           shared_protection_domain();
+  void             set_shared_protection_domain(ClassLoaderData *loader_data, Handle pd);
 
-  ClassLoaderData*   loader() const                 { return _loader; }
-  void               set_loader(ClassLoaderData* l) { _loader = l; }
+  ClassLoaderData* loader_data() const                 { return _loader_data; }
+  void             set_loader_data(ClassLoaderData* l) { _loader_data = l; }
 
-  Symbol*            version() const                { return _version; }
-  void               set_version(Symbol* version);
+  Symbol*          version() const                     { return _version; }
+  void             set_version(Symbol* version);
 
-  Symbol*            location() const               { return _location; }
-  void               set_location(Symbol* location);
+  Symbol*          location() const                    { return _location; }
+  void             set_location(Symbol* location);
 
-  bool               can_read(ModuleEntry* m) const;
-  bool               has_reads() const;
-  void               add_read(ModuleEntry* m);
+  bool             can_read(ModuleEntry* m) const;
+  bool             has_reads() const;
+  void             add_read(ModuleEntry* m);
+  void             set_read_walk_required(ClassLoaderData* m_loader_data);
 
-  bool               is_named() const               { return (literal() != NULL); }
+  bool             is_named() const                    { return (name() != NULL); }
 
   bool can_read_all_unnamed() const {
     assert(is_named() || _can_read_all_unnamed == true,
@@ -178,7 +181,7 @@
   ModuleEntry* _unnamed_module;
 
   ModuleEntry* new_entry(unsigned int hash, Handle module_handle, Symbol* name, Symbol* version,
-                         Symbol* location, ClassLoaderData* class_loader);
+                         Symbol* location, ClassLoaderData* loader_data);
   void add_entry(int index, ModuleEntry* new_entry);
 
   int entry_size() const { return BasicHashtable<mtModule>::entry_size(); }
--- a/src/share/vm/classfile/modules.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/modules.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -113,7 +113,7 @@
   const char *package_name = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(package));
   if (package_name == NULL) return NULL;
   TempNewSymbol pkg_symbol = SymbolTable::new_symbol(package_name, CHECK_NULL);
-  PackageEntryTable* package_entry_table = module_entry->loader()->packages();
+  PackageEntryTable* package_entry_table = module_entry->loader_data()->packages();
   assert(package_entry_table != NULL, "Unexpected null package entry table");
   return package_entry_table->lookup_only(pkg_symbol);
 }
@@ -820,6 +820,28 @@
 }
 
 
+jobject Modules::get_named_module(Handle h_loader, const char* package_str, TRAPS) {
+  assert(ModuleEntryTable::javabase_defined(),
+         "Attempt to call get_named_module before java.base is defined");
+  assert(h_loader.is_null() || java_lang_ClassLoader::is_subclass(h_loader->klass()),
+         "Class loader is not a subclass of java.lang.ClassLoader");
+  assert(package_str != NULL, "the package_str should not be NULL");
+
+  if (strlen(package_str) == 0) {
+    return NULL;
+  }
+  TempNewSymbol package_sym = SymbolTable::new_symbol(package_str, CHECK_NULL);
+  const PackageEntry* const pkg_entry =
+    get_package_entry_by_name(package_sym, h_loader, THREAD);
+  const ModuleEntry* const module_entry = (pkg_entry != NULL ? pkg_entry->module() : NULL);
+
+  if (module_entry != NULL && module_entry->module() != NULL && module_entry->is_named()) {
+    return JNIHandles::make_local(THREAD, JNIHandles::resolve(module_entry->module()));
+  }
+  return NULL;
+}
+
+
 // This method is called by JFR and by the above method.
 jobject Modules::get_module(Symbol* package_name, Handle h_loader, TRAPS) {
   const PackageEntry* const pkg_entry =
@@ -868,7 +890,7 @@
                      package_name, module_entry->name()->as_C_string());
 
   TempNewSymbol pkg_symbol = SymbolTable::new_symbol(package_name, CHECK);
-  PackageEntryTable* package_table = module_entry->loader()->packages();
+  PackageEntryTable* package_table = module_entry->loader_data()->packages();
   assert(package_table != NULL, "Missing package_table");
 
   bool pkg_exists = false;
--- a/src/share/vm/classfile/modules.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/modules.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -121,6 +121,7 @@
   // IllegalArgumentException is thrown if loader is neither null nor a subtype of
   // java/lang/ClassLoader.
   static jobject get_module_by_package_name(jobject loader, jstring package, TRAPS);
+  static jobject get_named_module(Handle h_loader, const char* package, TRAPS);
 
   // If package is defined by loader, return the
   // java.lang.reflect.Module object for the module in which the package is defined.
--- a/src/share/vm/classfile/packageEntry.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/packageEntry.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "classfile/packageEntry.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
@@ -53,12 +54,40 @@
   if (!has_qual_exports_list()) {
     // Lazily create a package's qualified exports list.
     // Initial size is small, do not anticipate export lists to be large.
-    _qualified_exports =
-      new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, true);
+    _qualified_exports = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, true);
   }
+
+  // Determine, based on this newly established export to module m,
+  // if this package's export list should be walked at a GC safepoint.
+  set_export_walk_required(m->loader_data());
+
+  // Establish exportability to module m
   _qualified_exports->append_if_missing(m);
 }
 
+// If the module's loader, that an export is being established to, is
+// not the same loader as this module's and is not one of the 3 builtin
+// class loaders, then this package's export list must be walked at GC
+// safepoint. Modules have the same life cycle as their defining class
+// loaders and should be removed if dead.
+void PackageEntry::set_export_walk_required(ClassLoaderData* m_loader_data) {
+  assert_locked_or_safepoint(Module_lock);
+  ModuleEntry* this_pkg_mod = module();
+  if (!_must_walk_exports &&
+      (this_pkg_mod == NULL || this_pkg_mod->loader_data() != m_loader_data) &&
+      !m_loader_data->is_builtin_class_loader_data()) {
+    _must_walk_exports = true;
+    if (log_is_enabled(Trace, modules)) {
+      ResourceMark rm;
+      assert(name() != NULL, "PackageEntry without a valid name");
+      log_trace(modules)("PackageEntry::set_export_walk_required(): package %s defined in module %s, exports list must be walked",
+                         name()->as_C_string(),
+                         (this_pkg_mod == NULL || this_pkg_mod->name() == NULL) ?
+                           UNNAMED_MODULE : this_pkg_mod->name()->as_C_string());
+    }
+  }
+}
+
 // Set the package's exported states based on the value of the ModuleEntry.
 void PackageEntry::set_exported(ModuleEntry* m) {
   MutexLocker m1(Module_lock);
@@ -96,14 +125,34 @@
 // Remove dead module entries within the package's exported list.
 void PackageEntry::purge_qualified_exports() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  if (_qualified_exports != NULL) {
+  if (_must_walk_exports &&
+      _qualified_exports != NULL &&
+      !_qualified_exports->is_empty()) {
+    ModuleEntry* pkg_module = module();
+
+    // This package's _must_walk_exports flag will be reset based
+    // on the remaining live modules on the exports list.
+    _must_walk_exports = false;
+
+    if (log_is_enabled(Trace, modules)) {
+      ResourceMark rm;
+      assert(name() != NULL, "PackageEntry without a valid name");
+      ModuleEntry* pkg_mod = module();
+      log_trace(modules)("PackageEntry::purge_qualified_exports(): package %s defined in module %s, exports list being walked",
+                         name()->as_C_string(),
+                         (pkg_mod == NULL || pkg_mod->name() == NULL) ? UNNAMED_MODULE : pkg_mod->name()->as_C_string());
+    }
+
     // Go backwards because this removes entries that are dead.
     int len = _qualified_exports->length();
     for (int idx = len - 1; idx >= 0; idx--) {
       ModuleEntry* module_idx = _qualified_exports->at(idx);
-      ClassLoaderData* cld = module_idx->loader();
-      if (cld->is_unloading()) {
+      ClassLoaderData* cld_idx = module_idx->loader_data();
+      if (cld_idx->is_unloading()) {
         _qualified_exports->delete_at(idx);
+      } else {
+        // Update the need to walk this package's exports based on live modules
+        set_export_walk_required(cld_idx);
       }
     }
   }
@@ -297,8 +346,8 @@
 
 void PackageEntry::print(outputStream* st) {
   ResourceMark rm;
-  st->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index "
-               INT32_FORMAT " is_exported_unqualified %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
+  st->print_cr("package entry " PTR_FORMAT " name %s module %s classpath_index "
+               INT32_FORMAT " is_exported_unqualified %d is_exported_allUnnamed %d " "next " PTR_FORMAT,
                p2i(this), name()->as_C_string(),
                (module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE),
                _classpath_index, _is_exported_unqualified, _is_exported_allUnnamed, p2i(next()));
--- a/src/share/vm/classfile/packageEntry.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/packageEntry.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -69,6 +69,7 @@
   s2 _classpath_index;
   bool _is_exported_unqualified;
   bool _is_exported_allUnnamed;
+  bool _must_walk_exports;
   GrowableArray<ModuleEntry*>* _exported_pending_delete; // transitioned from qualified to unqualified, delete at safepoint
   GrowableArray<ModuleEntry*>* _qualified_exports;
   TRACE_DEFINE_TRACE_ID_FIELD;
@@ -82,6 +83,7 @@
     _classpath_index = -1;
     _is_exported_unqualified = false;
     _is_exported_allUnnamed = false;
+    _must_walk_exports = false;
     _exported_pending_delete = NULL;
     _qualified_exports = NULL;
   }
@@ -147,6 +149,7 @@
 
   // add the module to the package's qualified exports
   void add_qexport(ModuleEntry* m);
+  void set_export_walk_required(ClassLoaderData* m_loader_data);
 
   PackageEntry* next() const {
     return (PackageEntry*)HashtableEntry<Symbol*, mtModule>::next();
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -175,9 +175,18 @@
    return false;
 }
 
-/**
- * Returns true if the passed class loader is the platform class loader.
- */
+// Returns true if the passed class loader is the builtin application class loader
+// or a custom system class loader. A customer system class loader can be
+// specified via -Djava.system.class.loader.
+bool SystemDictionary::is_system_class_loader(Handle class_loader) {
+  if (class_loader.is_null()) {
+    return false;
+  }
+  return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
+          class_loader() == _java_system_loader);
+}
+
+// Returns true if the passed class loader is the platform class loader.
 bool SystemDictionary::is_platform_class_loader(Handle class_loader) {
   if (class_loader.is_null()) {
     return false;
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/classfile/systemDictionary.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -660,6 +660,7 @@
   static instanceKlassHandle load_shared_class(Symbol* class_name,
                                                Handle class_loader,
                                                TRAPS);
+  static bool is_system_class_loader(Handle class_loader);
   static bool is_platform_class_loader(Handle class_loader);
 
 protected:
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1256,9 +1256,7 @@
       // set between the last GC or pause and now. We need to clear the
       // incremental collection set and then start rebuilding it afresh
       // after this full GC.
-      abandon_collection_set(collection_set()->inc_head());
-      collection_set()->clear_incremental();
-      collection_set()->stop_incremental_building();
+      abandon_collection_set(collection_set());
 
       tear_down_region_sets(false /* free_list_only */);
       collector_state()->set_gcs_are_young(true);
@@ -1379,7 +1377,6 @@
       _verifier->check_bitmaps("Full GC End");
 
       // Start a new incremental collection set for the next pause
-      assert(collection_set()->head() == NULL, "must be");
       collection_set()->start_incremental_building();
 
       clear_cset_fast_test();
@@ -1724,8 +1721,6 @@
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
   _in_cset_fast_test(),
-  _worker_cset_start_region(NULL),
-  _worker_cset_start_region_time_stamp(NULL),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
 
@@ -1748,8 +1743,6 @@
   uint n_queues = ParallelGCThreads;
   _task_queues = new RefToScanQueueSet(n_queues);
 
-  _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
-  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
 
   for (uint i = 0; i < n_queues; i++) {
@@ -1758,7 +1751,6 @@
     _task_queues->register_queue(i, q);
     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
   }
-  clear_cset_start_regions();
 
   // Initialize the G1EvacuationFailureALot counters and flags.
   NOT_PRODUCT(reset_evacuation_should_fail();)
@@ -1987,6 +1979,8 @@
 
   _preserved_marks_set.init(ParallelGCThreads);
 
+  _collection_set.initialize(max_regions());
+
   return JNI_OK;
 }
 
@@ -2420,117 +2414,12 @@
   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
 }
 
-// Clear the cached CSet starting regions and (more importantly)
-// the time stamps. Called when we reset the GC time stamp.
-void G1CollectedHeap::clear_cset_start_regions() {
-  assert(_worker_cset_start_region != NULL, "sanity");
-  assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
-
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _worker_cset_start_region[i] = NULL;
-    _worker_cset_start_region_time_stamp[i] = 0;
-  }
+void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
+  _collection_set.iterate(cl);
 }
 
-// Given the id of a worker, obtain or calculate a suitable
-// starting region for iterating over the current collection set.
-HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
-  assert(get_gc_time_stamp() > 0, "should have been updated by now");
-
-  HeapRegion* result = NULL;
-  unsigned gc_time_stamp = get_gc_time_stamp();
-
-  if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
-    // Cached starting region for current worker was set
-    // during the current pause - so it's valid.
-    // Note: the cached starting heap region may be NULL
-    // (when the collection set is empty).
-    result = _worker_cset_start_region[worker_i];
-    assert(result == NULL || result->in_collection_set(), "sanity");
-    return result;
-  }
-
-  // The cached entry was not valid so let's calculate
-  // a suitable starting heap region for this worker.
-
-  // We want the parallel threads to start their collection
-  // set iteration at different collection set regions to
-  // avoid contention.
-  // If we have:
-  //          n collection set regions
-  //          p threads
-  // Then thread t will start at region floor ((t * n) / p)
-
-  result = collection_set()->head();
-  uint cs_size = collection_set()->region_length();
-  uint active_workers = workers()->active_workers();
-
-  uint end_ind   = (cs_size * worker_i) / active_workers;
-  uint start_ind = 0;
-
-  if (worker_i > 0 &&
-      _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
-    // Previous workers starting region is valid
-    // so let's iterate from there
-    start_ind = (cs_size * (worker_i - 1)) / active_workers;
-    OrderAccess::loadload();
-    result = _worker_cset_start_region[worker_i - 1];
-  }
-
-  for (uint i = start_ind; i < end_ind; i++) {
-    result = result->next_in_collection_set();
-  }
-
-  // Note: the calculated starting heap region may be NULL
-  // (when the collection set is empty).
-  assert(result == NULL || result->in_collection_set(), "sanity");
-  assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
-         "should be updated only once per pause");
-  _worker_cset_start_region[worker_i] = result;
-  OrderAccess::storestore();
-  _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
-  return result;
-}
-
-void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
-  HeapRegion* r = collection_set()->head();
-  while (r != NULL) {
-    HeapRegion* next = r->next_in_collection_set();
-    if (cl->doHeapRegion(r)) {
-      cl->incomplete();
-      return;
-    }
-    r = next;
-  }
-}
-
-void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
-                                                  HeapRegionClosure *cl) {
-  if (r == NULL) {
-    // The CSet is empty so there's nothing to do.
-    return;
-  }
-
-  assert(r->in_collection_set(),
-         "Start region must be a member of the collection set.");
-  HeapRegion* cur = r;
-  while (cur != NULL) {
-    HeapRegion* next = cur->next_in_collection_set();
-    if (cl->doHeapRegion(cur) && false) {
-      cl->incomplete();
-      return;
-    }
-    cur = next;
-  }
-  cur = collection_set()->head();
-  while (cur != r) {
-    HeapRegion* next = cur->next_in_collection_set();
-    if (cl->doHeapRegion(cur) && false) {
-      cl->incomplete();
-      return;
-    }
-    cur = next;
-  }
+void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
+  _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
 }
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
@@ -3090,6 +2979,18 @@
   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
 }
 
+class G1PrintCollectionSetClosure : public HeapRegionClosure {
+private:
+  G1HRPrinter* _hr_printer;
+public:
+  G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _hr_printer->cset(r);
+    return false;
+  }
+};
+
 bool
 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   assert_at_safepoint(true /* should_be_vm_thread */);
@@ -3268,11 +3169,8 @@
         _cm->verify_no_cset_oops();
 
         if (_hr_printer.is_active()) {
-          HeapRegion* hr = collection_set()->head();
-          while (hr != NULL) {
-            _hr_printer.cset(hr);
-            hr = hr->next_in_collection_set();
-          }
+          G1PrintCollectionSetClosure cl(&_hr_printer);
+          _collection_set.iterate(&cl);
         }
 
         // Initialize the GC alloc regions.
@@ -3287,12 +3185,10 @@
         post_evacuate_collection_set(evacuation_info, &per_thread_states);
 
         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
-        free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
+        free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
 
         eagerly_reclaim_humongous_regions();
 
-        collection_set()->clear_head();
-
         record_obj_copy_mem_stats();
         _survivor_evac_stats.adjust_desired_plab_sz();
         _old_evac_stats.adjust_desired_plab_sz();
@@ -4704,120 +4600,139 @@
   workers()->run_task(&g1_par_scrub_rs_task);
 }
 
-void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
-  size_t pre_used = 0;
-  FreeRegionList local_free_list("Local List for CSet Freeing");
-
-  double young_time_ms     = 0.0;
-  double non_young_time_ms = 0.0;
-
-  _eden.clear();
-
-  G1Policy* policy = g1_policy();
-
-  double start_sec = os::elapsedTime();
-  bool non_young = true;
-
-  HeapRegion* cur = cs_head;
-  int age_bound = -1;
-  size_t rs_lengths = 0;
-
-  while (cur != NULL) {
-    assert(!is_on_master_free_list(cur), "sanity");
-    if (non_young) {
-      if (cur->is_young()) {
-        double end_sec = os::elapsedTime();
-        double elapsed_ms = (end_sec - start_sec) * 1000.0;
-        non_young_time_ms += elapsed_ms;
-
-        start_sec = os::elapsedTime();
-        non_young = false;
-      }
+class G1FreeCollectionSetClosure : public HeapRegionClosure {
+private:
+  const size_t* _surviving_young_words;
+
+  FreeRegionList _local_free_list;
+  size_t _rs_lengths;
+  // Bytes used in successfully evacuated regions before the evacuation.
+  size_t _before_used_bytes;
+  // Bytes used in unsucessfully evacuated regions before the evacuation
+  size_t _after_used_bytes;
+
+  size_t _bytes_allocated_in_old_since_last_gc;
+
+  size_t _failure_used_words;
+  size_t _failure_waste_words;
+
+  double _young_time;
+  double _non_young_time;
+public:
+  G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
+    HeapRegionClosure(),
+    _surviving_young_words(surviving_young_words),
+    _local_free_list("Local Region List for CSet Freeing"),
+    _rs_lengths(0),
+    _before_used_bytes(0),
+    _after_used_bytes(0),
+    _bytes_allocated_in_old_since_last_gc(0),
+    _failure_used_words(0),
+    _failure_waste_words(0),
+    _young_time(0.0),
+    _non_young_time(0.0) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    double start_time = os::elapsedTime();
+
+    bool is_young = r->is_young();
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    assert(!g1h->is_on_master_free_list(r), "sanity");
+
+    _rs_lengths += r->rem_set()->occupied_locked();
+
+    assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
+    g1h->clear_in_cset(r);
+
+    if (is_young) {
+      int index = r->young_index_in_cset();
+      assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
+      assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
+      size_t words_survived = _surviving_young_words[index];
+      r->record_surv_words_in_group(words_survived);
     } else {
-      if (!cur->is_young()) {
-        double end_sec = os::elapsedTime();
-        double elapsed_ms = (end_sec - start_sec) * 1000.0;
-        young_time_ms += elapsed_ms;
-
-        start_sec = os::elapsedTime();
-        non_young = true;
-      }
+      assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
     }
 
-    rs_lengths += cur->rem_set()->occupied_locked();
-
-    HeapRegion* next = cur->next_in_collection_set();
-    assert(cur->in_collection_set(), "bad CS");
-    cur->set_next_in_collection_set(NULL);
-    clear_in_cset(cur);
-
-    if (cur->is_young()) {
-      int index = cur->young_index_in_cset();
-      assert(index != -1, "invariant");
-      assert((uint) index < collection_set()->young_region_length(), "invariant");
-      size_t words_survived = surviving_young_words[index];
-      cur->record_surv_words_in_group(words_survived);
-
+    if (!r->evacuation_failed()) {
+      assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
+      _before_used_bytes += r->used();
+      g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
     } else {
-      int index = cur->young_index_in_cset();
-      assert(index == -1, "invariant");
-    }
-
-    assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
-            (!cur->is_young() && cur->young_index_in_cset() == -1),
-            "invariant" );
-
-    if (!cur->evacuation_failed()) {
-      MemRegion used_mr = cur->used_region();
-
-      // And the region is empty.
-      assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
-      pre_used += cur->used();
-      free_region(cur, &local_free_list, false /* par */, true /* locked */);
-    } else {
-      cur->uninstall_surv_rate_group();
-      if (cur->is_young()) {
-        cur->set_young_index_in_cset(-1);
-      }
-      cur->set_evacuation_failed(false);
+      r->uninstall_surv_rate_group();
+      r->set_young_index_in_cset(-1);
+      r->set_evacuation_failed(false);
       // When moving a young gen region to old gen, we "allocate" that whole region
       // there. This is in addition to any already evacuated objects. Notify the
       // policy about that.
       // Old gen regions do not cause an additional allocation: both the objects
       // still in the region and the ones already moved are accounted for elsewhere.
-      if (cur->is_young()) {
-        policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
+      if (is_young) {
+        _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
       }
       // The region is now considered to be old.
-      cur->set_old();
+      r->set_old();
       // Do some allocation statistics accounting. Regions that failed evacuation
       // are always made old, so there is no need to update anything in the young
       // gen statistics, but we need to update old gen statistics.
-      size_t used_words = cur->marked_bytes() / HeapWordSize;
-      _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
-      _old_set.add(cur);
-      evacuation_info.increment_collectionset_used_after(cur->used());
+      size_t used_words = r->marked_bytes() / HeapWordSize;
+
+      _failure_used_words += used_words;
+      _failure_waste_words += HeapRegion::GrainWords - used_words;
+
+      g1h->old_set_add(r);
+      _after_used_bytes += r->used();
     }
-    cur = next;
+
+    if (is_young) {
+      _young_time += os::elapsedTime() - start_time;
+    } else {
+      _non_young_time += os::elapsedTime() - start_time;
+    }
+    return false;
   }
 
-  evacuation_info.set_regions_freed(local_free_list.length());
-  policy->record_max_rs_lengths(rs_lengths);
+  FreeRegionList* local_free_list() { return &_local_free_list; }
+  size_t rs_lengths() const { return _rs_lengths; }
+  size_t before_used_bytes() const { return _before_used_bytes; }
+  size_t after_used_bytes() const { return _after_used_bytes; }
+
+  size_t bytes_allocated_in_old_since_last_gc() const { return _bytes_allocated_in_old_since_last_gc; }
+
+  size_t failure_used_words() const { return _failure_used_words; }
+  size_t failure_waste_words() const { return _failure_waste_words; }
+
+  double young_time() const { return _young_time; }
+  double non_young_time() const { return _non_young_time; }
+};
+
+void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
+  _eden.clear();
+
+  G1FreeCollectionSetClosure cl(surviving_young_words);
+  collection_set_iterate(&cl);
+
+  evacuation_info.set_regions_freed(cl.local_free_list()->length());
+  evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
+
+  G1Policy* policy = g1_policy();
+
+  policy->record_max_rs_lengths(cl.rs_lengths());
   policy->cset_regions_freed();
 
-  double end_sec = os::elapsedTime();
-  double elapsed_ms = (end_sec - start_sec) * 1000.0;
-
-  if (non_young) {
-    non_young_time_ms += elapsed_ms;
-  } else {
-    young_time_ms += elapsed_ms;
-  }
-
-  prepend_to_freelist(&local_free_list);
-  decrement_summary_bytes(pre_used);
-  policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
-  policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
+  prepend_to_freelist(cl.local_free_list());
+  decrement_summary_bytes(cl.before_used_bytes());
+
+  policy->add_bytes_allocated_in_old_since_last_gc(cl.bytes_allocated_in_old_since_last_gc());
+
+  _old_evac_stats.add_failure_used_and_waste(cl.failure_used_words(), cl.failure_waste_words());
+
+  policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
+  policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);
+
+  collection_set->clear();
 }
 
 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
@@ -4960,25 +4875,22 @@
                                                                     cl.humongous_free_count());
 }
 
-// This routine is similar to the above but does not record
-// any policy statistics or update free lists; we are abandoning
-// the current incremental collection set in preparation of a
-// full collection. After the full GC we will start to build up
-// the incremental collection set again.
-// This is only called when we're doing a full collection
-// and is immediately followed by the tearing down of the young list.
-
-void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
-  HeapRegion* cur = cs_head;
-
-  while (cur != NULL) {
-    HeapRegion* next = cur->next_in_collection_set();
-    assert(cur->in_collection_set(), "bad CS");
-    cur->set_next_in_collection_set(NULL);
-    clear_in_cset(cur);
-    cur->set_young_index_in_cset(-1);
-    cur = next;
+class G1AbandonCollectionSetClosure : public HeapRegionClosure {
+public:
+  virtual bool doHeapRegion(HeapRegion* r) {
+    assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
+    G1CollectedHeap::heap()->clear_in_cset(r);
+    r->set_young_index_in_cset(-1);
+    return false;
   }
+};
+
+void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
+  G1AbandonCollectionSetClosure cl;
+  collection_set->iterate(&cl);
+
+  collection_set->clear();
+  collection_set->stop_incremental_building();
 }
 
 void G1CollectedHeap::set_free_regions_coming() {
--- a/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -778,13 +778,13 @@
   // The closure used to refine a single card.
   RefineCardTableEntryClosure* _refine_cte_cl;
 
-  // After a collection pause, make the regions in the CS into free
+  // After a collection pause, convert the regions in the collection set into free
   // regions.
-  void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
+  void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 
   // Abandon the current collection set without recording policy
   // statistics or updating free lists.
-  void abandon_collection_set(HeapRegion* cs_head);
+  void abandon_collection_set(G1CollectionSet* collection_set);
 
   // The concurrent marker (and the thread it runs in.)
   G1ConcurrentMark* _cm;
@@ -930,16 +930,6 @@
   // discovery.
   G1CMIsAliveClosure _is_alive_closure_cm;
 
-  // Cache used by G1CollectedHeap::start_cset_region_for_worker().
-  HeapRegion** _worker_cset_start_region;
-
-  // Time stamp to validate the regions recorded in the cache
-  // used by G1CollectedHeap::start_cset_region_for_worker().
-  // The heap region entry for a given worker is valid iff
-  // the associated time stamp value matches the current value
-  // of G1CollectedHeap::_gc_time_stamp.
-  uint* _worker_cset_start_region_time_stamp;
-
   volatile bool _free_regions_coming;
 
 public:
@@ -1211,19 +1201,14 @@
                                HeapRegionClaimer* hrclaimer,
                                bool concurrent = false) const;
 
-  // Clear the cached cset start regions and (more importantly)
-  // the time stamps. Called when we reset the GC time stamp.
-  void clear_cset_start_regions();
-
-  // Given the id of a worker, obtain or calculate a suitable
-  // starting region for iterating over the current collection set.
-  HeapRegion* start_cset_region_for_worker(uint worker_i);
-
   // Iterate over the regions (if any) in the current collection set.
   void collection_set_iterate(HeapRegionClosure* blk);
 
-  // As above but starting from region r
-  void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
+  // Iterate over the regions (if any) in the current collection set. Starts the
+  // iteration over the entire collection set so that the start regions of a given
+  // worker id over the set active_workers are evenly spread across the set of
+  // collection set regions.
+  void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
 
   HeapRegion* next_compaction_region(const HeapRegion* from) const;
 
--- a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -89,16 +89,13 @@
 }
 
 inline void G1CollectedHeap::reset_gc_time_stamp() {
+  assert_at_safepoint(true);
   _gc_time_stamp = 0;
-  OrderAccess::fence();
-  // Clear the cached CSet starting regions and time stamps.
-  // Their validity is dependent on the GC timestamp.
-  clear_cset_start_regions();
 }
 
 inline void G1CollectedHeap::increment_gc_time_stamp() {
+  assert_at_safepoint(true);
   ++_gc_time_stamp;
-  OrderAccess::fence();
 }
 
 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
--- a/src/share/vm/gc/g1/g1CollectionSet.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectionSet.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -30,6 +30,7 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "logging/logStream.hpp"
 #include "utilities/debug.hpp"
 
 G1CollectorState* G1CollectionSet::collector_state() {
@@ -55,48 +56,63 @@
   _eden_region_length(0),
   _survivor_region_length(0),
   _old_region_length(0),
-
-  _head(NULL),
   _bytes_used_before(0),
   _recorded_rs_lengths(0),
+  _collection_set_regions(NULL),
+  _collection_set_cur_length(0),
+  _collection_set_max_length(0),
   // Incremental CSet attributes
   _inc_build_state(Inactive),
-  _inc_head(NULL),
-  _inc_tail(NULL),
   _inc_bytes_used_before(0),
   _inc_recorded_rs_lengths(0),
   _inc_recorded_rs_lengths_diffs(0),
   _inc_predicted_elapsed_time_ms(0.0),
-  _inc_predicted_elapsed_time_ms_diffs(0.0),
-  _inc_region_length(0) {}
+  _inc_predicted_elapsed_time_ms_diffs(0.0) {
+}
 
 G1CollectionSet::~G1CollectionSet() {
+  if (_collection_set_regions != NULL) {
+    FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
+  }
   delete _cset_chooser;
 }
 
 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
                                           uint survivor_cset_region_length) {
+  assert_at_safepoint(true);
+
   _eden_region_length     = eden_cset_region_length;
   _survivor_region_length = survivor_cset_region_length;
 
-  assert(young_region_length() == _inc_region_length, "should match %u == %u", young_region_length(), _inc_region_length);
+  assert((size_t) young_region_length() == _collection_set_cur_length,
+         "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
 
   _old_region_length      = 0;
 }
 
+void G1CollectionSet::initialize(uint max_region_length) {
+  guarantee(_collection_set_regions == NULL, "Must only initialize once.");
+  _collection_set_max_length = max_region_length;
+  _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
+}
+
 void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
   _recorded_rs_lengths = rs_lengths;
 }
 
 // Add the heap region at the head of the non-incremental collection set
 void G1CollectionSet::add_old_region(HeapRegion* hr) {
+  assert_at_safepoint(true);
+
   assert(_inc_build_state == Active, "Precondition");
   assert(hr->is_old(), "the region should be old");
 
   assert(!hr->in_collection_set(), "should not already be in the CSet");
   _g1->register_old_region_with_cset(hr);
-  hr->set_next_in_collection_set(_head);
-  _head = hr;
+
+  _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
+  assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
+
   _bytes_used_before += hr->used();
   size_t rs_length = hr->rem_set()->occupied();
   _recorded_rs_lengths += rs_length;
@@ -105,12 +121,10 @@
 
 // Initialize the per-collection-set information
 void G1CollectionSet::start_incremental_building() {
+  assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
   assert(_inc_build_state == Inactive, "Precondition");
 
-  _inc_head = NULL;
-  _inc_tail = NULL;
   _inc_bytes_used_before = 0;
-  _inc_region_length = 0;
 
   _inc_recorded_rs_lengths = 0;
   _inc_recorded_rs_lengths_diffs = 0;
@@ -151,6 +165,38 @@
   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 }
 
+void G1CollectionSet::clear() {
+  assert_at_safepoint(true);
+  _collection_set_cur_length = 0;
+}
+
+void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
+  iterate_from(cl, 0, 1);
+}
+
+void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
+  size_t len = _collection_set_cur_length;
+  OrderAccess::loadload();
+  if (len == 0) {
+    return;
+  }
+  size_t start_pos = (worker_id * len) / total_workers;
+  size_t cur_pos = start_pos;
+
+  do {
+    HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
+    bool result = cl->doHeapRegion(r);
+    if (result) {
+      cl->incomplete();
+      return;
+    }
+    cur_pos++;
+    if (cur_pos == len) {
+      cur_pos = 0;
+    }
+  } while (cur_pos != start_pos);
+}
+
 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
                                                      size_t new_rs_length) {
   // Update the CSet information that is dependent on the new RS length
@@ -183,8 +229,16 @@
   assert(hr->is_young(), "invariant");
   assert(_inc_build_state == Active, "Precondition");
 
-  hr->set_young_index_in_cset(_inc_region_length);
-  _inc_region_length++;
+  size_t collection_set_length = _collection_set_cur_length;
+  assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
+  hr->set_young_index_in_cset((int)collection_set_length);
+
+  _collection_set_regions[collection_set_length] = hr->hrm_index();
+  // Concurrent readers must observe the store of the value in the array before an
+  // update to the length field.
+  OrderAccess::storestore();
+  _collection_set_cur_length++;
+  assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
 
   // This routine is used when:
   // * adding survivor regions to the incremental cset at the end of an
@@ -218,59 +272,81 @@
 
   assert(!hr->in_collection_set(), "invariant");
   _g1->register_young_region_with_cset(hr);
-  assert(hr->next_in_collection_set() == NULL, "invariant");
 }
 
-// Add the region at the RHS of the incremental cset
 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
-  // We should only ever be appending survivors at the end of a pause
-  assert(hr->is_survivor(), "Logic");
-
-  // Do the 'common' stuff
+  assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
   add_young_region_common(hr);
-
-  // Now add the region at the right hand side
-  if (_inc_tail == NULL) {
-    assert(_inc_head == NULL, "invariant");
-    _inc_head = hr;
-  } else {
-    _inc_tail->set_next_in_collection_set(hr);
-  }
-  _inc_tail = hr;
 }
 
-// Add the region to the LHS of the incremental cset
 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
-  // Survivors should be added to the RHS at the end of a pause
-  assert(hr->is_eden(), "Logic");
-
-  // Do the 'common' stuff
+  assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
   add_young_region_common(hr);
-
-  // Add the region at the left hand side
-  hr->set_next_in_collection_set(_inc_head);
-  if (_inc_head == NULL) {
-    assert(_inc_tail == NULL, "Invariant");
-    _inc_tail = hr;
-  }
-  _inc_head = hr;
 }
 
 #ifndef PRODUCT
-void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
-  assert(list_head == inc_head() || list_head == head(), "must be");
+class G1VerifyYoungAgesClosure : public HeapRegionClosure {
+public:
+  bool _valid;
+public:
+  G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
 
+  virtual bool doHeapRegion(HeapRegion* r) {
+    guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
+
+    SurvRateGroup* group = r->surv_rate_group();
+
+    if (group == NULL) {
+      log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
+      _valid = false;
+    }
+
+    if (r->age_in_surv_rate_group() < 0) {
+      log_error(gc, verify)("## encountered negative age in young region");
+      _valid = false;
+    }
+
+    return false;
+  }
+
+  bool valid() const { return _valid; }
+};
+
+bool G1CollectionSet::verify_young_ages() {
+  assert_at_safepoint(true);
+
+  G1VerifyYoungAgesClosure cl;
+  iterate(&cl);
+
+  if (!cl.valid()) {
+    LogStreamHandle(Error, gc, verify) log;
+    print(&log);
+  }
+
+  return cl.valid();
+}
+
+class G1PrintCollectionSetClosure : public HeapRegionClosure {
+  outputStream* _st;
+public:
+  G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
+    _st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
+                  HR_FORMAT_PARAMS(r),
+                  p2i(r->prev_top_at_mark_start()),
+                  p2i(r->next_top_at_mark_start()),
+                  r->age_in_surv_rate_group_cond());
+    return false;
+  }
+};
+
+void G1CollectionSet::print(outputStream* st) {
   st->print_cr("\nCollection_set:");
-  HeapRegion* csr = list_head;
-  while (csr != NULL) {
-    HeapRegion* next = csr->next_in_collection_set();
-    assert(csr->in_collection_set(), "bad CS");
-    st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
-                 HR_FORMAT_PARAMS(csr),
-                 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
-                 csr->age_in_surv_rate_group_cond());
-    csr = next;
-  }
+
+  G1PrintCollectionSetClosure cl(st);
+  iterate(&cl);
 }
 #endif // !PRODUCT
 
@@ -281,7 +357,6 @@
 
   guarantee(target_pause_time_ms > 0.0,
             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
-  guarantee(_head == NULL, "Precondition");
 
   size_t pending_cards = _policy->pending_cards();
   double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
@@ -305,7 +380,6 @@
   // Clear the fields that point to the survivor list - they are all young now.
   survivors->convert_to_eden();
 
-  _head = _inc_head;
   _bytes_used_before = _inc_bytes_used_before;
   time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
 
@@ -422,23 +496,41 @@
 }
 
 #ifdef ASSERT
-void G1CollectionSet::verify_young_cset_indices() const {
-  ResourceMark rm;
-  uint* heap_region_indices = NEW_RESOURCE_ARRAY(uint, young_region_length());
-  for (uint i = 0; i < young_region_length(); ++i) {
-    heap_region_indices[i] = (uint)-1;
+class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
+private:
+  size_t _young_length;
+  int* _heap_region_indices;
+public:
+  G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
+    _heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC);
+    for (size_t i = 0; i < young_length; i++) {
+      _heap_region_indices[i] = -1;
+    }
+  }
+  ~G1VerifyYoungCSetIndicesClosure() {
+    FREE_C_HEAP_ARRAY(int, _heap_region_indices);
   }
 
-  for (HeapRegion* hr = _inc_head; hr != NULL; hr = hr->next_in_collection_set()) {
-    const int idx = hr->young_index_in_cset();
-    assert(idx > -1, "must be set for all inc cset regions");
-    assert((uint)idx < young_region_length(), "young cset index too large");
+  virtual bool doHeapRegion(HeapRegion* r) {
+    const int idx = r->young_index_in_cset();
 
-    assert(heap_region_indices[idx] == (uint)-1,
-           "index %d used by multiple regions, first use by %u, second by %u",
-           idx, heap_region_indices[idx], hr->hrm_index());
+    assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
+    assert((size_t)idx < _young_length, "Young cset index too large for region %u", r->hrm_index());
 
-    heap_region_indices[idx] = hr->hrm_index();
+    assert(_heap_region_indices[idx] == -1,
+           "Index %d used by multiple regions, first use by region %u, second by region %u",
+           idx, _heap_region_indices[idx], r->hrm_index());
+
+    _heap_region_indices[idx] = r->hrm_index();
+
+    return false;
   }
+};
+
+void G1CollectionSet::verify_young_cset_indices() const {
+  assert_at_safepoint(true);
+
+  G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
+  iterate(&cl);
 }
 #endif
--- a/src/share/vm/gc/g1/g1CollectionSet.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectionSet.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -47,10 +47,15 @@
   uint _survivor_region_length;
   uint _old_region_length;
 
-  // The head of the list (via "next_in_collection_set()") representing the
-  // current collection set. Set from the incrementally built collection
-  // set at the start of the pause.
-  HeapRegion* _head;
+  // The actual collection set as a set of region indices.
+  // All entries in _collection_set_regions below _collection_set_cur_length are
+  // assumed to be valid entries.
+  // We assume that at any time there is at most only one writer and (one or more)
+  // concurrent readers. This means we are good with using storestore and loadload
+  // barriers on the writer and reader respectively only.
+  uint* _collection_set_regions;
+  volatile size_t _collection_set_cur_length;
+  size_t _collection_set_max_length;
 
   // The number of bytes in the collection set before the pause. Set from
   // the incrementally built collection set at the start of an evacuation
@@ -71,12 +76,6 @@
 
   CSetBuildType _inc_build_state;
 
-  // The head of the incrementally built collection set.
-  HeapRegion* _inc_head;
-
-  // The tail of the incrementally built collection set.
-  HeapRegion* _inc_tail;
-
   // The number of bytes in the incrementally built collection set.
   // Used to set _collection_set_bytes_used_before at the start of
   // an evacuation pause.
@@ -105,8 +104,6 @@
   // See the comment for _inc_recorded_rs_lengths_diffs.
   double _inc_predicted_elapsed_time_ms_diffs;
 
-  uint _inc_region_length;
-
   G1CollectorState* collector_state();
   G1GCPhaseTimes* phase_times();
 
@@ -117,6 +114,9 @@
   G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
   ~G1CollectionSet();
 
+  // Initializes the collection set giving the maximum possible length of the collection set.
+  void initialize(uint max_region_length);
+
   CollectionSetChooser* cset_chooser();
 
   void init_region_lengths(uint eden_cset_region_length,
@@ -133,36 +133,31 @@
   uint survivor_region_length() const { return _survivor_region_length; }
   uint old_region_length() const      { return _old_region_length;      }
 
-  // Incremental CSet Support
-
-  // The head of the incrementally built collection set.
-  HeapRegion* inc_head() { return _inc_head; }
-
-  // The tail of the incrementally built collection set.
-  HeapRegion* inc_tail() { return _inc_tail; }
+  // Incremental collection set support
 
   // Initialize incremental collection set info.
   void start_incremental_building();
 
-  // Perform any final calculations on the incremental CSet fields
+  // Perform any final calculations on the incremental collection set fields
   // before we can use them.
   void finalize_incremental_building();
 
-  void clear_incremental() {
-    _inc_head = NULL;
-    _inc_tail = NULL;
-    _inc_region_length = 0;
-  }
+  // Reset the contents of the collection set.
+  void clear();
 
-  // Stop adding regions to the incremental collection set
+  // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
+  // If may_be_aborted is true, iteration may be aborted using the return value of the
+  // called closure method.
+  void iterate(HeapRegionClosure* cl) const;
+
+  // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
+  // trying to optimally spread out starting position of total_workers workers given the
+  // caller's worker_id.
+  void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
+
+  // Stop adding regions to the incremental collection set.
   void stop_incremental_building() { _inc_build_state = Inactive; }
 
-  // The head of the list (via "next_in_collection_set()") representing the
-  // current collection set.
-  HeapRegion* head() { return _head; }
-
-  void clear_head() { _head = NULL; }
-
   size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
 
   size_t bytes_used_before() const {
@@ -174,33 +169,32 @@
   }
 
   // Choose a new collection set.  Marks the chosen regions as being
-  // "in_collection_set", and links them together.  The head and number of
-  // the collection set are available via access methods.
+  // "in_collection_set".
   double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
   void finalize_old_part(double time_remaining_ms);
 
-  // Add old region "hr" to the CSet.
+  // Add old region "hr" to the collection set.
   void add_old_region(HeapRegion* hr);
 
   // Update information about hr in the aggregated information for
   // the incrementally built collection set.
   void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
 
-  // Add hr to the LHS of the incremental collection set.
+  // Add eden region to the collection set.
   void add_eden_region(HeapRegion* hr);
 
-  // Add hr to the RHS of the incremental collection set.
+  // Add survivor region to the collection set.
   void add_survivor_regions(HeapRegion* hr);
 
 #ifndef PRODUCT
-  void print(HeapRegion* list_head, outputStream* st);
+  bool verify_young_ages();
+
+  void print(outputStream* st);
 #endif // !PRODUCT
 
 private:
-  // Update the incremental cset information when adding a region
-  // (should not be called directly).
+  // Update the incremental collection set information when adding a region.
   void add_young_region_common(HeapRegion* hr);
-
 };
 
 #endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
--- a/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -394,37 +394,6 @@
   }
 }
 
-#ifndef PRODUCT
-bool G1DefaultPolicy::verify_young_ages() {
-  bool ret = true;
-
-  for (HeapRegion* curr = _collection_set->inc_head();
-       curr != NULL;
-       curr = curr->next_in_collection_set()) {
-    guarantee(curr->is_young(), "Region must be young");
-
-    SurvRateGroup* group = curr->surv_rate_group();
-
-    if (group == NULL) {
-      log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
-      ret = false;
-    }
-
-    if (curr->age_in_surv_rate_group() < 0) {
-      log_error(gc, verify)("## encountered negative age in young region");
-      ret = false;
-    }
-  }
-
-  if (!ret) {
-    LogStreamHandle(Error, gc, verify) log;
-    _collection_set->print(_collection_set->inc_head(), &log);
-  }
-
-  return ret;
-}
-#endif // PRODUCT
-
 void G1DefaultPolicy::record_full_collection_start() {
   _full_collection_start_sec = os::elapsedTime();
   // Release the future to-space so that it is available for compaction into.
@@ -488,7 +457,7 @@
   _short_lived_surv_rate_group->stop_adding_regions();
   _survivors_age_table.clear();
 
-  assert( verify_young_ages(), "region age verification" );
+  assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
 }
 
 void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
--- a/src/share/vm/gc/g1/g1DefaultPolicy.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1DefaultPolicy.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -89,10 +89,6 @@
 
   size_t _rs_lengths_prediction;
 
-#ifndef PRODUCT
-  bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
-#endif // PRODUCT
-
   size_t _pending_cards;
 
   // The amount of allocated bytes in old gen during the last mutator and the following
@@ -116,10 +112,6 @@
     hr->install_surv_rate_group(_survivor_surv_rate_group);
   }
 
-#ifndef PRODUCT
-  bool verify_young_ages();
-#endif // PRODUCT
-
   void record_max_rs_lengths(size_t rs_lengths) {
     _max_rs_lengths = rs_lengths;
   }
--- a/src/share/vm/gc/g1/g1EvacFailure.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1EvacFailure.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -251,6 +251,5 @@
 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
   RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
 
-  HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
-  _g1h->collection_set_iterate_from(hr, &rsfp_cl);
+  _g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
 }
--- a/src/share/vm/gc/g1/g1HeapVerifier.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1HeapVerifier.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -580,15 +580,20 @@
   }
 }
 
-void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) {
-  G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
-  for (HeapRegion* hr = head; hr != NULL; hr = hr->next_in_collection_set()) {
-    verify_dirty_region(hr);
+class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
+private:
+  G1HeapVerifier* _verifier;
+public:
+  G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _verifier->verify_dirty_region(r);
+    return false;
   }
-}
+};
 
 void G1HeapVerifier::verify_dirty_young_regions() {
-  verify_dirty_young_list(_g1h->collection_set()->inc_head());
+  G1VerifyDirtyYoungListClosure cl(this);
+  _g1h->collection_set()->iterate(&cl);
 }
 
 bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,
--- a/src/share/vm/gc/g1/g1HeapVerifier.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1HeapVerifier.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -108,7 +108,6 @@
 
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
-  void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
   void verify_dirty_young_regions() PRODUCT_RETURN;
 };
 
--- a/src/share/vm/gc/g1/g1RemSet.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1RemSet.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -382,10 +382,8 @@
                               uint worker_i) {
   double rs_time_start = os::elapsedTime();
 
-  HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
-
   G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i);
-  _g1->collection_set_iterate_from(startRegion, &cl);
+  _g1->collection_set_iterate_from(&cl, worker_i);
 
    double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
                               cl.strong_code_root_scan_time_sec();
--- a/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -154,8 +154,8 @@
 }
 
 void G1StringDedupQueue::print_statistics() {
-  log_debug(gc, stringdedup)("   [Queue]");
-  log_debug(gc, stringdedup)("      [Dropped: " UINTX_FORMAT "]", _queue->_dropped);
+  log_debug(gc, stringdedup)("  Queue");
+  log_debug(gc, stringdedup)("    Dropped: " UINTX_FORMAT, _queue->_dropped);
 }
 
 void G1StringDedupQueue::verify() {
--- a/src/share/vm/gc/g1/g1StringDedupStat.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupStat.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,9 @@
   _idle(0),
   _exec(0),
   _block(0),
-  _start(0.0),
+  _start_concurrent(0.0),
+  _end_concurrent(0.0),
+  _start_phase(0.0),
   _idle_elapsed(0.0),
   _exec_elapsed(0.0),
   _block_elapsed(0.0) {
@@ -69,7 +71,13 @@
   _block_elapsed       += stat._block_elapsed;
 }
 
-void G1StringDedupStat::print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
+void G1StringDedupStat::print_start(const G1StringDedupStat& last_stat) {
+  log_info(gc, stringdedup)(
+     "Concurrent String Deduplication (" G1_STRDEDUP_TIME_FORMAT ")",
+     G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent));
+}
+
+void G1StringDedupStat::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
   double total_deduped_bytes_percent = 0.0;
 
   if (total_stat._new_bytes > 0) {
@@ -79,13 +87,16 @@
 
   log_info(gc, stringdedup)(
     "Concurrent String Deduplication "
-    G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS "), avg "
-    G1_STRDEDUP_PERCENT_FORMAT_NS ", " G1_STRDEDUP_TIME_FORMAT,
+    G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS ") "
+    "avg " G1_STRDEDUP_PERCENT_FORMAT_NS " "
+    "(" G1_STRDEDUP_TIME_FORMAT ", " G1_STRDEDUP_TIME_FORMAT ") " G1_STRDEDUP_TIME_FORMAT_MS,
     G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
     G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes - last_stat._deduped_bytes),
     G1_STRDEDUP_BYTES_PARAM(last_stat._deduped_bytes),
     total_deduped_bytes_percent,
-    last_stat._exec_elapsed);
+    G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent),
+    G1_STRDEDUP_TIME_PARAM(last_stat._end_concurrent),
+    G1_STRDEDUP_TIME_PARAM_MS(last_stat._exec_elapsed));
 }
 
 void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool total) {
@@ -134,23 +145,31 @@
 
   if (total) {
     log_debug(gc, stringdedup)(
-      "   [Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
-      stat._exec, stat._exec_elapsed, stat._idle, stat._idle_elapsed, stat._block, stat._block_elapsed);
+      "  Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
+      ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
+      ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
+      stat._exec, G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
+      stat._idle, G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
+      stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
   } else {
     log_debug(gc, stringdedup)(
-      "   [Last Exec: " G1_STRDEDUP_TIME_FORMAT ", Idle: " G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
-      stat._exec_elapsed, stat._idle_elapsed, stat._block, stat._block_elapsed);
+      "  Last Exec: " G1_STRDEDUP_TIME_FORMAT_MS
+      ", Idle: " G1_STRDEDUP_TIME_FORMAT_MS
+      ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
+      G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
+      G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
+      stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
   }
-  log_debug(gc, stringdedup)("      [Inspected:    " G1_STRDEDUP_OBJECTS_FORMAT "]", stat._inspected);
-  log_debug(gc, stringdedup)("         [Skipped:   " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._skipped, skipped_percent);
-  log_debug(gc, stringdedup)("         [Hashed:    " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._hashed, hashed_percent);
-  log_debug(gc, stringdedup)("         [Known:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._known, known_percent);
-  log_debug(gc, stringdedup)("         [New:       " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "]",
+  log_debug(gc, stringdedup)("    Inspected:    " G1_STRDEDUP_OBJECTS_FORMAT, stat._inspected);
+  log_debug(gc, stringdedup)("      Skipped:    " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._skipped, skipped_percent);
+  log_debug(gc, stringdedup)("      Hashed:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._hashed, hashed_percent);
+  log_debug(gc, stringdedup)("      Known:      " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._known, known_percent);
+  log_debug(gc, stringdedup)("      New:        " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT,
                              stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes));
-  log_debug(gc, stringdedup)("      [Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+  log_debug(gc, stringdedup)("    Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
                              stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent);
-  log_debug(gc, stringdedup)("         [Young:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+  log_debug(gc, stringdedup)("      Young:      " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
                              stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent);
-  log_debug(gc, stringdedup)("         [Old:       " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+  log_debug(gc, stringdedup)("      Old:        " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
                              stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
 }
--- a/src/share/vm/gc/g1/g1StringDedupStat.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupStat.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,11 +30,14 @@
 
 // Macros for GC log output formating
 #define G1_STRDEDUP_OBJECTS_FORMAT         UINTX_FORMAT_W(12)
-#define G1_STRDEDUP_TIME_FORMAT            "%1.7lf secs"
-#define G1_STRDEDUP_PERCENT_FORMAT         "%5.1lf%%"
-#define G1_STRDEDUP_PERCENT_FORMAT_NS      "%.1lf%%"
-#define G1_STRDEDUP_BYTES_FORMAT           "%8.1lf%s"
-#define G1_STRDEDUP_BYTES_FORMAT_NS        "%.1lf%s"
+#define G1_STRDEDUP_TIME_FORMAT            "%.3fs"
+#define G1_STRDEDUP_TIME_PARAM(time)       (time)
+#define G1_STRDEDUP_TIME_FORMAT_MS         "%.3fms"
+#define G1_STRDEDUP_TIME_PARAM_MS(time)    ((time) * MILLIUNITS)
+#define G1_STRDEDUP_PERCENT_FORMAT         "%5.1f%%"
+#define G1_STRDEDUP_PERCENT_FORMAT_NS      "%.1f%%"
+#define G1_STRDEDUP_BYTES_FORMAT           "%8.1f%s"
+#define G1_STRDEDUP_BYTES_FORMAT_NS        "%.1f%s"
 #define G1_STRDEDUP_BYTES_PARAM(bytes)     byte_size_in_proper_unit((double)(bytes)), proper_unit_for_byte_size((bytes))
 
 //
@@ -60,7 +63,9 @@
   uintx  _block;
 
   // Time spent by the deduplication thread in different phases
-  double _start;
+  double _start_concurrent;
+  double _end_concurrent;
+  double _start_phase;
   double _idle_elapsed;
   double _exec_elapsed;
   double _block_elapsed;
@@ -104,38 +109,41 @@
   }
 
   void mark_idle() {
-    _start = os::elapsedTime();
+    _start_phase = os::elapsedTime();
     _idle++;
   }
 
   void mark_exec() {
     double now = os::elapsedTime();
-    _idle_elapsed = now - _start;
-    _start = now;
+    _idle_elapsed = now - _start_phase;
+    _start_phase = now;
+    _start_concurrent = now;
     _exec++;
   }
 
   void mark_block() {
     double now = os::elapsedTime();
-    _exec_elapsed += now - _start;
-    _start = now;
+    _exec_elapsed += now - _start_phase;
+    _start_phase = now;
     _block++;
   }
 
   void mark_unblock() {
     double now = os::elapsedTime();
-    _block_elapsed += now - _start;
-    _start = now;
+    _block_elapsed += now - _start_phase;
+    _start_phase = now;
   }
 
   void mark_done() {
     double now = os::elapsedTime();
-    _exec_elapsed += now - _start;
+    _exec_elapsed += now - _start_phase;
+    _end_concurrent = now;
   }
 
   void add(const G1StringDedupStat& stat);
 
-  static void print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
+  static void print_start(const G1StringDedupStat& last_stat);
+  static void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
   static void print_statistics(const G1StringDedupStat& stat, bool total);
 };
 
--- a/src/share/vm/gc/g1/g1StringDedupTable.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupTable.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,16 +37,16 @@
 #include "runtime/mutexLocker.hpp"
 
 //
-// Freelist in the deduplication table entry cache. Links table
+// List of deduplication table entries. Links table
 // entries together using their _next fields.
 //
-class G1StringDedupEntryFreeList : public CHeapObj<mtGC> {
+class G1StringDedupEntryList : public CHeapObj<mtGC> {
 private:
   G1StringDedupEntry* _list;
   size_t              _length;
 
 public:
-  G1StringDedupEntryFreeList() :
+  G1StringDedupEntryList() :
     _list(NULL),
     _length(0) {
   }
@@ -66,6 +66,12 @@
     return entry;
   }
 
+  G1StringDedupEntry* remove_all() {
+    G1StringDedupEntry* list = _list;
+    _list = NULL;
+    return list;
+  }
+
   size_t length() {
     return _length;
   }
@@ -87,43 +93,53 @@
 //
 class G1StringDedupEntryCache : public CHeapObj<mtGC> {
 private:
-  // One freelist per GC worker to allow lock less freeing of
-  // entries while doing a parallel scan of the table. Using
-  // PaddedEnd to avoid false sharing.
-  PaddedEnd<G1StringDedupEntryFreeList>* _lists;
-  size_t                                 _nlists;
+  // One cache/overflow list per GC worker to allow lock less freeing of
+  // entries while doing a parallel scan of the table. Using PaddedEnd to
+  // avoid false sharing.
+  size_t                             _nlists;
+  size_t                             _max_list_length;
+  PaddedEnd<G1StringDedupEntryList>* _cached;
+  PaddedEnd<G1StringDedupEntryList>* _overflowed;
 
 public:
-  G1StringDedupEntryCache();
+  G1StringDedupEntryCache(size_t max_size);
   ~G1StringDedupEntryCache();
 
-  // Get a table entry from the cache freelist, or allocate a new
-  // entry if the cache is empty.
+  // Set max number of table entries to cache.
+  void set_max_size(size_t max_size);
+
+  // Get a table entry from the cache, or allocate a new entry if the cache is empty.
   G1StringDedupEntry* alloc();
 
-  // Insert a table entry into the cache freelist.
+  // Insert a table entry into the cache.
   void free(G1StringDedupEntry* entry, uint worker_id);
 
   // Returns current number of entries in the cache.
   size_t size();
 
-  // If the cache has grown above the given max size, trim it down
-  // and deallocate the memory occupied by trimmed of entries.
-  void trim(size_t max_size);
+  // Deletes overflowed entries.
+  void delete_overflowed();
 };
 
-G1StringDedupEntryCache::G1StringDedupEntryCache() {
-  _nlists = ParallelGCThreads;
-  _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
+G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) :
+  _nlists(ParallelGCThreads),
+  _max_list_length(0),
+  _cached(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)),
+  _overflowed(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) {
+  set_max_size(max_size);
 }
 
 G1StringDedupEntryCache::~G1StringDedupEntryCache() {
   ShouldNotReachHere();
 }
 
+void G1StringDedupEntryCache::set_max_size(size_t size) {
+  _max_list_length = size / _nlists;
+}
+
 G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
   for (size_t i = 0; i < _nlists; i++) {
-    G1StringDedupEntry* entry = _lists[i].remove();
+    G1StringDedupEntry* entry = _cached[i].remove();
     if (entry != NULL) {
       return entry;
     }
@@ -134,31 +150,54 @@
 void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
   assert(entry->obj() != NULL, "Double free");
   assert(worker_id < _nlists, "Invalid worker id");
+
   entry->set_obj(NULL);
   entry->set_hash(0);
-  _lists[worker_id].add(entry);
+
+  if (_cached[worker_id].length() < _max_list_length) {
+    // Cache is not full
+    _cached[worker_id].add(entry);
+  } else {
+    // Cache is full, add to overflow list for later deletion
+    _overflowed[worker_id].add(entry);
+  }
 }
 
 size_t G1StringDedupEntryCache::size() {
   size_t size = 0;
   for (size_t i = 0; i < _nlists; i++) {
-    size += _lists[i].length();
+    size += _cached[i].length();
   }
   return size;
 }
 
-void G1StringDedupEntryCache::trim(size_t max_size) {
-  size_t cache_size = 0;
+void G1StringDedupEntryCache::delete_overflowed() {
+  double start = os::elapsedTime();
+  uintx count = 0;
+
   for (size_t i = 0; i < _nlists; i++) {
-    G1StringDedupEntryFreeList* list = &_lists[i];
-    cache_size += list->length();
-    while (cache_size > max_size) {
-      G1StringDedupEntry* entry = list->remove();
-      assert(entry != NULL, "Should not be null");
-      cache_size--;
+    G1StringDedupEntry* entry;
+
+    {
+      // The overflow list can be modified during safepoints, therefore
+      // we temporarily join the suspendible thread set while removing
+      // all entries from the list.
+      SuspendibleThreadSetJoiner sts_join;
+      entry = _overflowed[i].remove_all();
+    }
+
+    // Delete all entries
+    while (entry != NULL) {
+      G1StringDedupEntry* next = entry->next();
       delete entry;
+      entry = next;
+      count++;
     }
   }
+
+  double end = os::elapsedTime();
+  log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT_MS,
+                             count, G1_STRDEDUP_TIME_PARAM_MS(end - start));
 }
 
 G1StringDedupTable*      G1StringDedupTable::_table = NULL;
@@ -195,7 +234,7 @@
 
 void G1StringDedupTable::create() {
   assert(_table == NULL, "One string deduplication table allowed");
-  _entry_cache = new G1StringDedupEntryCache();
+  _entry_cache = new G1StringDedupEntryCache(_min_size * _max_cache_factor);
   _table = new G1StringDedupTable(_min_size);
 }
 
@@ -389,6 +428,9 @@
   // Update statistics
   _resize_count++;
 
+  // Update max cache size
+  _entry_cache->set_max_size(size * _max_cache_factor);
+
   // Allocate the new table. The new table will be populated by workers
   // calling unlink_or_oops_do() and finally installed by finish_resize().
   return new G1StringDedupTable(size, _table->_hash_seed);
@@ -441,7 +483,7 @@
     removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id);
   }
 
-  // Delayed update avoid contention on the table lock
+  // Delayed update to avoid contention on the table lock
   if (removed > 0) {
     MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
     _table->_entries -= removed;
@@ -563,22 +605,20 @@
   }
 }
 
-void G1StringDedupTable::trim_entry_cache() {
-  MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
-  size_t max_cache_size = (size_t)(_table->_size * _max_cache_factor);
-  _entry_cache->trim(max_cache_size);
+void G1StringDedupTable::clean_entry_cache() {
+  _entry_cache->delete_overflowed();
 }
 
 void G1StringDedupTable::print_statistics() {
   Log(gc, stringdedup) log;
-  log.debug("   [Table]");
-  log.debug("      [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]",
+  log.debug("  Table");
+  log.debug("    Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS,
             G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)));
-  log.debug("      [Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT "]", _table->_size, _min_size, _max_size);
-  log.debug("      [Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT "]",
+  log.debug("    Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size);
+  log.debug("    Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT,
             _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed);
-  log.debug("      [Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")]",
+  log.debug("    Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")",
             _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0);
-  log.debug("      [Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x]", _rehash_count, _rehash_threshold, _table->_hash_seed);
-  log.debug("      [Age Threshold: " UINTX_FORMAT "]", StringDeduplicationAgeThreshold);
+  log.debug("    Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed);
+  log.debug("    Age Threshold: " UINTX_FORMAT, StringDeduplicationAgeThreshold);
 }
--- a/src/share/vm/gc/g1/g1StringDedupTable.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupTable.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -229,8 +229,8 @@
   // and deletes the previously active table.
   static void finish_rehash(G1StringDedupTable* rehashed_table);
 
-  // If the table entry cache has grown too large, trim it down according to policy
-  static void trim_entry_cache();
+  // If the table entry cache has grown too large, delete overflowed entries.
+  static void clean_entry_cache();
 
   static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id);
 
--- a/src/share/vm/gc/g1/g1StringDedupThread.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupThread.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -103,6 +103,7 @@
       SuspendibleThreadSetJoiner sts_join;
 
       stat.mark_exec();
+      print_start(stat);
 
       // Process the queue
       for (;;) {
@@ -121,30 +122,30 @@
         }
       }
 
-      G1StringDedupTable::trim_entry_cache();
-
       stat.mark_done();
 
-      // Print statistics
       total_stat.add(stat);
-      print(stat, total_stat);
+      print_end(stat, total_stat);
     }
+
+    G1StringDedupTable::clean_entry_cache();
   }
-
 }
 
 void G1StringDedupThread::stop_service() {
   G1StringDedupQueue::cancel_wait();
 }
 
-void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
-  if (log_is_enabled(Info, gc, stringdedup)) {
-    G1StringDedupStat::print_summary(last_stat, total_stat);
-    if (log_is_enabled(Debug, gc, stringdedup)) {
-      G1StringDedupStat::print_statistics(last_stat, false);
-      G1StringDedupStat::print_statistics(total_stat, true);
-      G1StringDedupTable::print_statistics();
-      G1StringDedupQueue::print_statistics();
-    }
+void G1StringDedupThread::print_start(const G1StringDedupStat& last_stat) {
+  G1StringDedupStat::print_start(last_stat);
+}
+
+void G1StringDedupThread::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
+  G1StringDedupStat::print_end(last_stat, total_stat);
+  if (log_is_enabled(Debug, gc, stringdedup)) {
+    G1StringDedupStat::print_statistics(last_stat, false);
+    G1StringDedupStat::print_statistics(total_stat, true);
+    G1StringDedupTable::print_statistics();
+    G1StringDedupQueue::print_statistics();
   }
 }
--- a/src/share/vm/gc/g1/g1StringDedupThread.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupThread.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -43,7 +43,8 @@
   G1StringDedupThread();
   ~G1StringDedupThread();
 
-  void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
+  void print_start(const G1StringDedupStat& last_stat);
+  void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
 
   void run_service();
   void stop_service();
--- a/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -71,38 +71,51 @@
   _monitor.notify();
 }
 
+class G1YoungRemSetSamplingClosure : public HeapRegionClosure {
+  SuspendibleThreadSetJoiner* _sts;
+  size_t _regions_visited;
+  size_t _sampled_rs_lengths;
+public:
+  G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
+    HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    size_t rs_length = r->rem_set()->occupied();
+    _sampled_rs_lengths += rs_length;
+
+    // Update the collection set policy information for this region
+    G1CollectedHeap::heap()->collection_set()->update_young_region_prediction(r, rs_length);
+
+    _regions_visited++;
+
+    if (_regions_visited == 10) {
+      if (_sts->should_yield()) {
+        _sts->yield();
+        // A gc may have occurred and our sampling data is stale and further
+        // traversal of the collection set is unsafe
+        return true;
+      }
+      _regions_visited = 0;
+    }
+    return false;
+  }
+
+  size_t sampled_rs_lengths() const { return _sampled_rs_lengths; }
+};
+
 void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
   SuspendibleThreadSetJoiner sts;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1Policy* g1p = g1h->g1_policy();
-  G1CollectionSet* g1cs = g1h->collection_set();
+
   if (g1p->adaptive_young_list_length()) {
-    int regions_visited = 0;
-    HeapRegion* hr = g1cs->inc_head();
-    size_t sampled_rs_lengths = 0;
+    G1YoungRemSetSamplingClosure cl(&sts);
 
-    while (hr != NULL) {
-      size_t rs_length = hr->rem_set()->occupied();
-      sampled_rs_lengths += rs_length;
+    G1CollectionSet* g1cs = g1h->collection_set();
+    g1cs->iterate(&cl);
 
-      // Update the collection set policy information for this region
-      g1cs->update_young_region_prediction(hr, rs_length);
-
-      ++regions_visited;
-
-      // we try to yield every time we visit 10 regions
-      if (regions_visited == 10) {
-        if (sts.should_yield()) {
-          sts.yield();
-          // A gc may have occurred and our sampling data is stale and further
-          // traversal of the collection set is unsafe
-          return;
-        }
-        regions_visited = 0;
-      }
-      assert(hr == g1cs->inc_tail() || hr->next_in_collection_set() != NULL, "next should only be null at tail of icset");
-      hr = hr->next_in_collection_set();
+    if (cl.complete()) {
+      g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
     }
-    g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
   }
 }
--- a/src/share/vm/gc/g1/heapRegion.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/heapRegion.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -284,7 +284,6 @@
     _hrm_index(hrm_index),
     _allocation_context(AllocationContext::system()),
     _humongous_start_region(NULL),
-    _next_in_special_set(NULL),
     _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _next(NULL), _prev(NULL),
--- a/src/share/vm/gc/g1/heapRegion.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/heapRegion.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -261,12 +261,6 @@
   // True iff an attempt to evacuate an object in the region failed.
   bool _evacuation_failed;
 
-  // A heap region may be a member one of a number of special subsets, each
-  // represented as linked lists through the field below.  Currently, there
-  // is only one set:
-  //   The collection set.
-  HeapRegion* _next_in_special_set;
-
   // Fields used by the HeapRegionSetBase class and subclasses.
   HeapRegion* _next;
   HeapRegion* _prev;
@@ -476,9 +470,6 @@
 
   inline bool in_collection_set() const;
 
-  inline HeapRegion* next_in_collection_set() const;
-  inline void set_next_in_collection_set(HeapRegion* r);
-
   void set_allocation_context(AllocationContext_t context) {
     _allocation_context = context;
   }
@@ -744,7 +735,7 @@
 // Terminates the iteration when the "doHeapRegion" method returns "true".
 class HeapRegionClosure : public StackObj {
   friend class HeapRegionManager;
-  friend class G1CollectedHeap;
+  friend class G1CollectionSet;
 
   bool _complete;
   void incomplete() { _complete = false; }
--- a/src/share/vm/gc/g1/heapRegion.inline.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/g1/heapRegion.inline.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -230,18 +230,4 @@
   return G1CollectedHeap::heap()->is_in_cset(this);
 }
 
-inline HeapRegion* HeapRegion::next_in_collection_set() const {
-  assert(in_collection_set(), "should only invoke on member of CS.");
-  assert(_next_in_special_set == NULL ||
-         _next_in_special_set->in_collection_set(),
-         "Malformed CS.");
-  return _next_in_special_set;
-}
-
-void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
-  assert(in_collection_set(), "should only invoke on member of CS.");
-  assert(r == NULL || r->in_collection_set(), "Malformed CS.");
-  _next_in_special_set = r;
-}
-
 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
--- a/src/share/vm/gc/parallel/gcTaskManager.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/parallel/gcTaskManager.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -386,13 +386,21 @@
 
 void GCTaskManager::add_workers(bool initializing) {
   os::ThreadType worker_type = os::pgc_thread;
+  uint previous_created_workers = _created_workers;
+
   _created_workers = WorkerManager::add_workers(this,
                                                 _active_workers,
-                                                (uint) _workers,
+                                                _workers,
                                                 _created_workers,
                                                 worker_type,
                                                 initializing);
   _active_workers = MIN2(_created_workers, _active_workers);
+
+  WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
+}
+
+const char* GCTaskManager::group_name() {
+  return "ParGC Thread";
 }
 
 void GCTaskManager::initialize() {
--- a/src/share/vm/gc/parallel/gcTaskManager.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/parallel/gcTaskManager.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -556,6 +556,8 @@
   GCTaskThread* install_worker(uint worker_id);
   // Add GC workers as needed.
   void add_workers(bool initializing);
+  // Base name (without worker id #) of threads.
+  const char* group_name();
 };
 
 //
--- a/src/share/vm/gc/parallel/gcTaskThread.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/parallel/gcTaskThread.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -45,7 +45,7 @@
   _time_stamp_index(0)
 {
   set_id(which);
-  set_name("ParGC Thread#%d", which);
+  set_name("%s#%d", manager->group_name(), which);
 }
 
 GCTaskThread::~GCTaskThread() {
--- a/src/share/vm/gc/parallel/gcTaskThread.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/parallel/gcTaskThread.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -55,6 +55,7 @@
     return new GCTaskThread(manager, which, processor_id);
   }
  public:
+
   static void destroy(GCTaskThread* manager) {
     if (manager != NULL) {
       delete manager;
--- a/src/share/vm/gc/shared/collectedHeap.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/shared/collectedHeap.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -159,6 +159,8 @@
   inline static void post_allocation_setup_array(KlassHandle klass,
                                                  HeapWord* obj, int length);
 
+  inline static void post_allocation_setup_class(KlassHandle klass, HeapWord* obj, int size);
+
   // Clears an allocated object.
   inline static void init_obj(HeapWord* obj, size_t size);
 
@@ -300,6 +302,7 @@
   inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
   inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
   inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
+  inline static oop class_allocate(KlassHandle klass, int size, TRAPS);
 
   inline static void post_allocation_install_obj_klass(KlassHandle klass,
                                                        oop obj);
--- a/src/share/vm/gc/shared/collectedHeap.inline.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/shared/collectedHeap.inline.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
 
+#include "classfile/javaClasses.hpp"
 #include "gc/shared/allocTracer.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
@@ -96,6 +97,22 @@
   post_allocation_notify(klass, (oop)obj, size);
 }
 
+void CollectedHeap::post_allocation_setup_class(KlassHandle klass,
+                                                HeapWord* obj,
+                                                int size) {
+  // Set oop_size field before setting the _klass field
+  // in post_allocation_setup_common() because the klass field
+  // indicates that the object is parsable by concurrent GC.
+  oop new_cls = (oop)obj;
+  assert(size > 0, "oop_size must be positive.");
+  java_lang_Class::set_oop_size(new_cls, size);
+  post_allocation_setup_common(klass, obj);
+  assert(Universe::is_bootstrapping() ||
+         !new_cls->is_array(), "must not be an array");
+  // notify jvmti and dtrace
+  post_allocation_notify(klass, new_cls, size);
+}
+
 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
                                                 HeapWord* obj,
                                                 int length) {
@@ -207,6 +224,16 @@
   return (oop)obj;
 }
 
+oop CollectedHeap::class_allocate(KlassHandle klass, int size, TRAPS) {
+  debug_only(check_for_valid_allocation_state());
+  assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
+  assert(size >= 0, "int won't convert to size_t");
+  HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
+  post_allocation_setup_class(klass, obj, size); // set oop_size
+  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+  return (oop)obj;
+}
+
 oop CollectedHeap::array_allocate(KlassHandle klass,
                                   int size,
                                   int length,
--- a/src/share/vm/gc/shared/preservedMarks.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/shared/preservedMarks.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -48,10 +48,10 @@
 
 #ifndef PRODUCT
 void PreservedMarks::assert_empty() {
-  assert(_stack.is_empty(), "stack expected to be empty, size = "SIZE_FORMAT,
+  assert(_stack.is_empty(), "stack expected to be empty, size = " SIZE_FORMAT,
          _stack.size());
   assert(_stack.cache_size() == 0,
-         "stack expected to have no cached segments, cache size = "SIZE_FORMAT,
+         "stack expected to have no cached segments, cache size = " SIZE_FORMAT,
          _stack.cache_size());
 }
 #endif // ndef PRODUCT
--- a/src/share/vm/gc/shared/workerManager.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/shared/workerManager.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -47,18 +47,18 @@
   // threads and a failure would not be optimal but should not be fatal.
   template <class WorkerType>
   static uint add_workers (WorkerType* holder,
-                   uint active_workers,
-                   uint total_workers,
-                   uint created_workers,
-                   os::ThreadType worker_type,
-                   bool initializing) {
+                           uint active_workers,
+                           uint total_workers,
+                           uint created_workers,
+                           os::ThreadType worker_type,
+                           bool initializing) {
     uint start = created_workers;
     uint end = MIN2(active_workers, total_workers);
     for (uint worker_id = start; worker_id < end; worker_id += 1) {
       WorkerThread* new_worker = holder->install_worker(worker_id);
       assert(new_worker != NULL, "Failed to allocate GangWorker");
       if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
-        if(initializing) {
+        if (initializing) {
           vm_exit_out_of_memory(0, OOM_MALLOC_ERROR,
                   "Cannot create worker GC thread. Out of system resources.");
         }
@@ -67,11 +67,21 @@
       os::start_thread(new_worker);
     }
 
-    log_trace(gc, task)("AdaptiveSizePolicy::add_workers() : "
-       "active_workers: %u created_workers: %u",
-       active_workers, created_workers);
+    return created_workers;
+  }
 
-    return created_workers;
+  // Log (at trace level) a change in the number of created workers.
+  template <class WorkerType>
+  static void log_worker_creation(WorkerType* holder,
+                                  uint previous_created_workers,
+                                  uint active_workers,
+                                  uint created_workers,
+                                  bool initializing) {
+    if (previous_created_workers < created_workers) {
+      const char* initializing_msg =  initializing ? "Adding initial" : "Creating additional";
+      log_trace(gc, task)("%s %s(s) previously created workers %u active workers %u total created workers %u",
+                          initializing_msg, holder->group_name(), previous_created_workers, active_workers, created_workers);
+    }
   }
 };
 #endif // SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
--- a/src/share/vm/gc/shared/workgroup.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/shared/workgroup.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -66,6 +66,7 @@
   } else {
     worker_type = os::pgc_thread;
   }
+  uint previous_created_workers = _created_workers;
 
   _created_workers = WorkerManager::add_workers(this,
                                                 active_workers,
@@ -74,6 +75,8 @@
                                                 worker_type,
                                                 initializing);
   _active_workers = MIN2(_created_workers, _active_workers);
+
+  WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
 }
 
 AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
--- a/src/share/vm/gc/shared/workgroup.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/gc/shared/workgroup.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -176,6 +176,9 @@
   // Return the Ith worker.
   AbstractGangWorker* worker(uint i) const;
 
+  // Base name (without worker id #) of threads.
+  const char* group_name() { return name(); }
+
   void threads_do(ThreadClosure* tc) const;
 
   // Create a GC worker and install it into the work gang.
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -576,27 +576,27 @@
   // compute auxiliary field attributes
   TosState state  = as_TosState(info.field_type());
 
-  // We need to delay resolving put instructions on final fields
-  // until we actually invoke one. This is required so we throw
-  // exceptions at the correct place. If we do not resolve completely
-  // in the current pass, leaving the put_code set to zero will
-  // cause the next put instruction to reresolve.
-  Bytecodes::Code put_code = (Bytecodes::Code)0;
-
-  // We also need to delay resolving getstatic instructions until the
-  // class is intitialized.  This is required so that access to the static
+  // Put instructions on final fields are not resolved. This is required so we throw
+  // exceptions at the correct place (when the instruction is actually invoked).
+  // If we do not resolve an instruction in the current pass, leaving the put_code
+  // set to zero will cause the next put instruction to the same field to reresolve.
+  //
+  // Also, we need to delay resolving getstatic and putstatic instructions until the
+  // class is initialized.  This is required so that access to the static
   // field will call the initialization function every time until the class
   // is completely initialized ala. in 2.17.5 in JVM Specification.
   InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
   bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
                                !klass->is_initialized());
+
+  Bytecodes::Code put_code = (Bytecodes::Code)0;
+  if (is_put && !info.access_flags().is_final() && !uninitialized_static) {
+    put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
+  }
+
   Bytecodes::Code get_code = (Bytecodes::Code)0;
-
   if (!uninitialized_static) {
     get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
-    if (is_put || !info.access_flags().is_final()) {
-      put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
-    }
   }
 
   cp_cache_entry->set_field(
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -970,7 +970,7 @@
       if (is_initialized_static_final_update || is_initialized_instance_final_update) {
         ss.print("Update to %s final field %s.%s attempted from a different method (%s) than the initializer method %s ",
                  is_static ? "static" : "non-static", resolved_klass()->external_name(), fd.name()->as_C_string(),
-                 current_klass()->external_name(),
+                 m()->name()->as_C_string(),
                  is_static ? "<clinit>" : "<init>");
         THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string());
       }
--- a/src/share/vm/interpreter/rewriter.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/interpreter/rewriter.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -419,21 +419,20 @@
             InstanceKlass* klass = method->method_holder();
             u2 bc_index = Bytes::get_Java_u2(bcp + prefix_length + 1);
             constantPoolHandle cp(method->constants());
-            Symbol* field_name = cp->name_ref_at(bc_index);
-            Symbol* field_sig = cp->signature_ref_at(bc_index);
             Symbol* ref_class_name = cp->klass_name_at(cp->klass_ref_index_at(bc_index));
 
             if (klass->name() == ref_class_name) {
+              Symbol* field_name = cp->name_ref_at(bc_index);
+              Symbol* field_sig = cp->signature_ref_at(bc_index);
+
               fieldDescriptor fd;
               klass->find_field(field_name, field_sig, &fd);
               if (fd.access_flags().is_final()) {
                 if (fd.access_flags().is_static()) {
-                  assert(c == Bytecodes::_putstatic, "must be putstatic");
                   if (!method->is_static_initializer()) {
                     fd.set_has_initialized_final_update(true);
                   }
                 } else {
-                  assert(c == Bytecodes::_putfield, "must be putfield");
                   if (!method->is_object_initializer()) {
                     fd.set_has_initialized_final_update(true);
                   }
--- a/src/share/vm/logging/logConfiguration.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/logging/logConfiguration.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -415,17 +415,8 @@
 void LogConfiguration::describe_current_configuration(outputStream* out){
   out->print_cr("Log output configuration:");
   for (size_t i = 0; i < _n_outputs; i++) {
-    out->print("#" SIZE_FORMAT ": %s ", i, _outputs[i]->name());
-    out->print_raw(_outputs[i]->config_string());
-    out->print(" ");
-    char delimiter[2] = {0};
-    for (size_t d = 0; d < LogDecorators::Count; d++) {
-      LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
-      if (_outputs[i]->decorators().is_decorator(decorator)) {
-        out->print("%s%s", delimiter, LogDecorators::name(decorator));
-        *delimiter = ',';
-      }
-    }
+    out->print("#" SIZE_FORMAT ": ", i);
+    _outputs[i]->describe(out);
     out->cr();
   }
 }
--- a/src/share/vm/logging/logFileOutput.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/logging/logFileOutput.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -428,3 +428,13 @@
   result[result_len] = '\0';
   return result;
 }
+
+void LogFileOutput::describe(outputStream *out) {
+  LogOutput::describe(out);
+  out->print(" ");
+
+  out->print("filecount=%u,filesize=" SIZE_FORMAT "%s", _file_count,
+             byte_size_in_proper_unit(_rotate_size),
+             proper_unit_for_byte_size(_rotate_size));
+}
+
--- a/src/share/vm/logging/logFileOutput.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/logging/logFileOutput.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -85,6 +85,7 @@
   virtual int write(const LogDecorations& decorations, const char* msg);
   virtual int write(LogMessageBuffer::Iterator msg_iterator);
   virtual void force_rotate();
+  virtual void describe(outputStream *out);
 
   virtual const char* name() const {
     return _name;
--- a/src/share/vm/logging/logOutput.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/logging/logOutput.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -83,3 +83,18 @@
     break;
   }
 }
+
+void LogOutput::describe(outputStream *out) {
+  out->print("%s ", name());
+  out->print_raw(config_string());
+  out->print(" ");
+  char delimiter[2] = {0};
+  for (size_t d = 0; d < LogDecorators::Count; d++) {
+    LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
+    if (decorators().is_decorator(decorator)) {
+      out->print("%s%s", delimiter, LogDecorators::name(decorator));
+      *delimiter = ',';
+    }
+  }
+}
+
--- a/src/share/vm/logging/logOutput.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/logging/logOutput.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -83,6 +83,8 @@
     // Do nothing by default.
   }
 
+  virtual void describe(outputStream *out);
+
   virtual const char* name() const = 0;
   virtual bool initialize(const char* options, outputStream* errstream) = 0;
   virtual int write(const LogDecorations& decorations, const char* msg) = 0;
--- a/src/share/vm/logging/logPrefix.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/logging/logPrefix.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -74,6 +74,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, stringtable)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \
--- a/src/share/vm/memory/metaspace.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -3106,10 +3106,6 @@
 
   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 
-  if (MetaspaceSize < 256*K) {
-    vm_exit_during_initialization("Too small initial Metaspace size");
-  }
-
   MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
   MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
 
--- a/src/share/vm/oops/instanceMirrorKlass.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/oops/instanceMirrorKlass.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,13 +50,12 @@
   // Query before forming handle.
   int size = instance_size(k);
   KlassHandle h_k(THREAD, this);
-  instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
+
+  assert(size > 0, "total object size must be positive: %d", size);
 
   // Since mirrors can be variable sized because of the static fields, store
   // the size in the mirror itself.
-  java_lang_Class::set_oop_size(i, size);
-
-  return i;
+  return (instanceOop)CollectedHeap::class_allocate(h_k, size, CHECK_NULL);
 }
 
 int InstanceMirrorKlass::oop_size(oop obj) const {
--- a/src/share/vm/oops/oop.inline.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/oops/oop.inline.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -258,8 +258,8 @@
     }
   }
 
-  assert(s % MinObjAlignment == 0, "alignment check");
-  assert(s > 0, "Bad size calculated");
+  assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
+  assert(s > 0, "Oop size must be greater than zero, not %d", s);
   return s;
 }
 
--- a/src/share/vm/prims/jvmti.xml	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/prims/jvmti.xml	Thu Jul 14 15:18:15 2016 +0100
@@ -6509,6 +6509,59 @@
       <errors>
       </errors>
     </function>
+
+    <function id="GetNamedModule" num="40" since="9">
+      <synopsis>Get Named Module</synopsis>
+      <description>
+        Return the <code>java.lang.reflect.Module</code> object for a named
+        module defined to a class loader that contains a given package.
+        The module is returned via <code>module_ptr</code>.
+        <p/>
+        If a named module is defined to the class loader and it
+        contains the package then that named module is returned,
+        otherwise <code>NULL</code> is returned.
+        <p/>
+      </description>
+      <origin>new</origin>
+      <capabilities>
+      </capabilities>
+      <parameters>
+        <param id="class_loader">
+          <ptrtype>
+            <jobject/>
+            <nullok>the bootstrap loader is assumed</nullok>
+          </ptrtype>
+          <description>
+            A class loader.
+            If the <code>class_loader</code> is not <code>NULL</code>
+            or a subclass of <code>java.lang.ClassLoader</code>
+            this function returns
+            <errorlink id="JVMTI_ERROR_ILLEGAL_ARGUMENT"></errorlink>.
+          </description>
+        </param>
+        <param id="package_name">
+          <inbuf><char/></inbuf>
+          <description>
+            The name of the package, encoded as a
+            <internallink id="mUTF">modified UTF-8</internallink> string.
+            The package name is in internal form (JVMS 4.2.1);
+            identifiers are separated by forward slashes rather than periods.
+          </description>
+        </param>
+        <param id="module_ptr">
+          <outptr><jobject/></outptr>
+          <description>
+            On return, points to a <code>java.lang.reflect.Module</code> object
+            or points to <code>NULL</code>.
+          </description>
+        </param>
+      </parameters>
+      <errors>
+        <error id="JVMTI_ERROR_ILLEGAL_ARGUMENT">
+          If class loader is not <code>NULL</code> and is not a class loader object.
+        </error>
+      </errors>
+    </function>
   </category>
 
   <category id="class" label="Class">
@@ -12462,6 +12515,14 @@
     <code>new_class_data</code> has been set, it becomes the 
     <code>class_data</code> for the next agent.
     <p/>
+    When handling a class load in the live phase, then the
+    <functionlink id="GetNamedModule"></functionlink>
+    function can be used to map class loader and a package name to a module.
+    When a class is being redefined or retransformed then
+    <code>class_being_redefined</code> is non <code>NULL</code> and so
+    the JNI <code>GetModule</code> function can also be used
+    to obtain the Module.
+    <p/>
     The order that this event is sent to each environment differs
     from other events.
     This event is sent to environments in the following order:
@@ -14427,20 +14488,15 @@
   <change date="19 June 2013" version="1.2.3">
       Added support for statically linked agents.
   </change>
-  <change date="20 January 2016" version="9.0.0">
+  <change date="5 July 2016" version="9.0.0">
       Support for modules:
        - The majorversion is 9 now
        - The ClassFileLoadHook events are not sent during the primordial phase anymore.
        - Add new function GetAllModules
-  </change>
-  <change date="17 February 2016" version="9.0.0">
-      Support for modules:
        - Add new capability can_generate_early_vmstart
        - Allow CompiledMethodLoad events at start phase
-  </change>
-  <change date="14 April 2016" version="9.0.0">
-      Support for modules:
        - Add new capability can_generate_early_class_hook_events
+       - Add new function GetNamedModule
   </change>
 </changehistory>
 
--- a/src/share/vm/prims/jvmtiEnv.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoaderExt.hpp"
+#include "classfile/modules.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/bytecodeStream.hpp"
@@ -201,6 +202,28 @@
 } /* end GetAllModules */
 
 
+// class_loader - NULL is a valid value, must be pre-checked
+// package_name - pre-checked for NULL
+// module_ptr - pre-checked for NULL
+jvmtiError
+JvmtiEnv::GetNamedModule(jobject class_loader, const char* package_name, jobject* module_ptr) {
+  JavaThread* THREAD = JavaThread::current(); // pass to macros
+  ResourceMark rm(THREAD);
+
+  Handle h_loader (THREAD, JNIHandles::resolve(class_loader));
+  // Check that loader is a subclass of java.lang.ClassLoader.
+  if (h_loader.not_null() && !java_lang_ClassLoader::is_subclass(h_loader->klass())) {
+    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+  }
+  jobject module = Modules::get_named_module(h_loader, package_name, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    CLEAR_PENDING_EXCEPTION;
+    return JVMTI_ERROR_INTERNAL; // unexpected exception
+  }
+  *module_ptr = module;
+  return JVMTI_ERROR_NONE;
+} /* end GetNamedModule */
+
   //
   // Class functions
   //
--- a/src/share/vm/runtime/arguments.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -584,27 +584,26 @@
 // Parses a size specification string.
 bool Arguments::atojulong(const char *s, julong* result) {
   julong n = 0;
-  int args_read = 0;
-  bool is_hex = false;
-  // Skip leading 0[xX] for hexadecimal
-  if (*s =='0' && (*(s+1) == 'x' || *(s+1) == 'X')) {
-    s += 2;
-    is_hex = true;
-    args_read = sscanf(s, JULONG_FORMAT_X, &n);
-  } else {
-    args_read = sscanf(s, JULONG_FORMAT, &n);
-  }
-  if (args_read != 1) {
+
+  // First char must be a digit. Don't allow negative numbers or leading spaces.
+  if (!isdigit(*s)) {
     return false;
   }
-  while (*s != '\0' && (isdigit(*s) || (is_hex && isxdigit(*s)))) {
-    s++;
-  }
-  // 4705540: illegal if more characters are found after the first non-digit
-  if (strlen(s) > 1) {
+
+  bool is_hex = (s[0] == '0' && (s[1] == 'x' || s[1] == 'X'));
+  char* remainder;
+  errno = 0;
+  n = strtoull(s, &remainder, (is_hex ? 16 : 10));
+  if (errno != 0) {
     return false;
   }
-  switch (*s) {
+
+  // Fail if no number was read at all or if the remainder contains more than a single non-digit character.
+  if (remainder == s || strlen(remainder) > 1) {
+    return false;
+  }
+
+  switch (*remainder) {
     case 'T': case 't':
       *result = n * G * K;
       // Check for overflow.
--- a/src/share/vm/runtime/objectMonitor.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/runtime/objectMonitor.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -131,8 +131,6 @@
 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 static volatile int InitDone        = 0;
 
-#define TrySpin TrySpin_VaryDuration
-
 // -----------------------------------------------------------------------------
 // Theory of operations -- Monitors lists, thread residency, etc:
 //
@@ -1848,13 +1846,8 @@
 // hysteresis control to damp the transition rate between spinning and
 // not spinning.
 
-intptr_t ObjectMonitor::SpinCallbackArgument = 0;
-int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL;
-
 // Spinning: Fixed frequency (100%), vary duration
-
-
-int ObjectMonitor::TrySpin_VaryDuration(Thread * Self) {
+int ObjectMonitor::TrySpin(Thread * Self) {
   // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
   int ctr = Knob_FixedSpin;
   if (ctr != 0) {
@@ -1948,11 +1941,6 @@
         goto Abort;           // abrupt spin egress
       }
       if (Knob_UsePause & 1) SpinPause();
-
-      int (*scb)(intptr_t,int) = SpinCallbackFunction;
-      if (hits > 50 && scb != NULL) {
-        int abend = (*scb)(SpinCallbackArgument, 0);
-      }
     }
 
     if (Knob_UsePause & 2) SpinPause();
--- a/src/share/vm/runtime/objectMonitor.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/runtime/objectMonitor.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,9 +161,6 @@
   Thread * volatile _Responsible;
 
   volatile int _Spinner;            // for exit->spinner handoff optimization
-  volatile int _SpinFreq;           // Spin 1-out-of-N attempts: success rate
-  volatile int _SpinClock;
-  volatile intptr_t _SpinState;     // MCS/CLH list of spinners
   volatile int _SpinDuration;
 
   volatile jint  _count;            // reference count to prevent reclamation/deflation
@@ -238,10 +235,6 @@
   static int cxq_offset_in_bytes()         { return offset_of(ObjectMonitor, _cxq); }
   static int succ_offset_in_bytes()        { return offset_of(ObjectMonitor, _succ); }
   static int EntryList_offset_in_bytes()   { return offset_of(ObjectMonitor, _EntryList); }
-  static int FreeNext_offset_in_bytes()    { return offset_of(ObjectMonitor, FreeNext); }
-  static int WaitSet_offset_in_bytes()     { return offset_of(ObjectMonitor, _WaitSet); }
-  static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible); }
-  static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner); }
 
   // ObjectMonitor references can be ORed with markOopDesc::monitor_value
   // as part of the ObjectMonitor tagging mechanism. When we combine an
@@ -257,11 +250,6 @@
   #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
     ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
 
-  // Eventually we'll make provisions for multiple callbacks, but
-  // now one will suffice.
-  static int (*SpinCallbackFunction)(intptr_t, int);
-  static intptr_t SpinCallbackArgument;
-
   markOop   header() const;
   void      set_header(markOop hdr);
 
@@ -312,8 +300,6 @@
     _cxq           = NULL;
     _WaitSet       = NULL;
     _recursions    = 0;
-    _SpinFreq      = 0;
-    _SpinClock     = 0;
   }
 
  public:
@@ -353,9 +339,7 @@
   void      UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
   int       TryLock(Thread * Self);
   int       NotRunnable(Thread * Self, Thread * Owner);
-  int       TrySpin_Fixed(Thread * Self);
-  int       TrySpin_VaryFrequency(Thread * Self);
-  int       TrySpin_VaryDuration(Thread * Self);
+  int       TrySpin(Thread * Self);
   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
   bool      ExitSuspendEquivalent(JavaThread * Self);
   void      post_monitor_wait_event(EventJavaMonitorWait * event,
--- a/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -388,16 +388,6 @@
   static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2,
                                   int total_args_passed);
 
-  // Compute the new number of arguments in the signature if 32 bit ints
-  // must be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
-  // is true.
-  static int  convert_ints_to_longints_argcnt(int in_args_count, BasicType* in_sig_bt);
-  // Adapt a method's signature if it contains 32 bit integers that must
-  // be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
-  // is true.
-  static void convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
-                                       BasicType*& in_sig_bt, VMRegPair*& in_regs);
-
   static size_t trampoline_size();
 
   static void generate_trampoline(MacroAssembler *masm, address destination);
--- a/src/share/vm/runtime/synchronizer.hpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/runtime/synchronizer.hpp	Thu Jul 14 15:18:15 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,8 +144,6 @@
   static void verify() PRODUCT_RETURN;
   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
 
-  static void RegisterSpinCallback(int(*)(intptr_t, int), intptr_t);
-
  private:
   enum { _BLOCKSIZE = 128 };
   // global list of blocks of monitors
--- a/src/share/vm/utilities/vmError.cpp	Wed Jul 13 15:19:34 2016 +0100
+++ b/src/share/vm/utilities/vmError.cpp	Thu Jul 14 15:18:15 2016 +0100
@@ -205,16 +205,39 @@
 static void print_oom_reasons(outputStream* st) {
   st->print_cr("# Possible reasons:");
   st->print_cr("#   The system is out of physical RAM or swap space");
-  st->print_cr("#   In 32 bit mode, the process size limit was hit");
+  if (UseCompressedOops) {
+    st->print_cr("#   The process is running with CompressedOops enabled, and the Java Heap may be blocking the growth of the native heap");
+  }
+  if (LogBytesPerWord == 2) {
+    st->print_cr("#   In 32 bit mode, the process size limit was hit");
+  }
   st->print_cr("# Possible solutions:");
   st->print_cr("#   Reduce memory load on the system");
   st->print_cr("#   Increase physical memory or swap space");
   st->print_cr("#   Check if swap backing store is full");
-  st->print_cr("#   Use 64 bit Java on a 64 bit OS");
+  if (LogBytesPerWord == 2) {
+    st->print_cr("#   Use 64 bit Java on a 64 bit OS");
+  }
   st->print_cr("#   Decrease Java heap size (-Xmx/-Xms)");
   st->print_cr("#   Decrease number of Java threads");
   st->print_cr("#   Decrease Java thread stack sizes (-Xss)");
   st->print_cr("#   Set larger code cache with -XX:ReservedCodeCacheSize=");
+  if (UseCompressedOops) {
+    switch (Universe::narrow_oop_mode()) {
+      case Universe::UnscaledNarrowOop:
+        st->print_cr("#   JVM is running with Unscaled Compressed Oops mode in which the Java heap is");
+        st->print_cr("#     placed in the first 4GB address space. The Java Heap base address is the");
+        st->print_cr("#     maximum limit for the native heap growth. Please use -XX:HeapBaseMinAddress");
+        st->print_cr("#     to set the Java Heap base and to place the Java Heap above 4GB virtual address.");
+        break;
+      case Universe::ZeroBasedNarrowOop:
+        st->print_cr("#   JVM is running with Zero Based Compressed Oops mode in which the Java heap is");
+        st->print_cr("#     placed in the first 32GB address space. The Java Heap base address is the");
+        st->print_cr("#     maximum limit for the native heap growth. Please use -XX:HeapBaseMinAddress");
+        st->print_cr("#     to set the Java Heap base and to place the Java Heap above 32GB virtual address.");
+        break;
+    }
+  }
   st->print_cr("# This output file may be truncated or incomplete.");
 }
 
--- a/test/TEST.groups	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/TEST.groups	Thu Jul 14 15:18:15 2016 +0100
@@ -130,8 +130,8 @@
 # Tests that require the full JRE
 #
 needs_jre = \
-  compiler/c2/6852078/Test6852078.java \
-  compiler/c2/7047069/Test7047069.java \
+  compiler/c2/Test6852078.java \
+  compiler/c2/Test7047069.java \
   runtime/6294277/SourceDebugExtension.java \
   runtime/ClassFile/JsrRewriting.java \
   runtime/ClassFile/OomWhileParsingRepeatedJsr.java \
@@ -277,16 +277,16 @@
   compiler/arraycopy/ \
   compiler/c1/ \
   compiler/c2/ \
-  -compiler/c2/5091921/Test6850611.java \
-  -compiler/c2/5091921/Test6890943.java \
-  -compiler/c2/5091921/Test6905845.java \
-  -compiler/c2/6340864 \
-  -compiler/c2/6589834 \
-  -compiler/c2/6603011 \
-  -compiler/c2/6912517 \
-  -compiler/c2/6792161 \
-  -compiler/c2/7070134 \
-  -compiler/c2/8004867
+  -compiler/c2/Test6850611.java \
+  -compiler/c2/cr6890943/Test6890943.java \
+  -compiler/c2/Test6905845.java \
+  -compiler/c2/cr6340864 \
+  -compiler/c2/cr6589834 \
+  -compiler/c2/cr8004867
+  -compiler/c2/stemmer \
+  -compiler/c2/Test6792161.java \
+  -compiler/c2/Test6603011.java \
+  -compiler/c2/Test6912517.java \
 
 hotspot_fast_compiler_2 = \
   compiler/classUnloading/ \
@@ -303,7 +303,7 @@
   compiler/integerArithmetic/ \
   compiler/interpreter/ \
   compiler/jvmci/ \
-  -compiler/codegen/7184394 \
+  -compiler/codegen/aes \
   -compiler/codecache/stress \
   -compiler/gcbarriers/PreserveFPRegistersTest.java
 
@@ -320,13 +320,13 @@
   compiler/types/ \
   compiler/uncommontrap/ \
   compiler/unsafe/ \
-  -compiler/intrinsics/adler32 \
   -compiler/intrinsics/bmi \
   -compiler/intrinsics/mathexact \
-  -compiler/intrinsics/multiplytolen \
   -compiler/intrinsics/sha \
-  -compiler/loopopts/7052494 \
-  -compiler/runtime/6826736
+  -compiler/intrinsics/bigInteger/TestMultiplyToLen.java \
+  -compiler/intrinsics/zip/TestAdler32.java \
+  -compiler/loopopts/Test7052494.java \
+  -compiler/runtime/Test6826736.java
 
 hotspot_fast_compiler_closed = \
   sanity/ExecuteInternalVMTests.java
@@ -395,6 +395,17 @@
   :hotspot_fast_gc_gcold \
   :hotspot_fast_runtime \
   :hotspot_fast_serviceability
+  
+hotspot_runtime_tier2 = \
+  runtime/ \
+  serviceability/ \
+ -:hotspot_fast_runtime \
+ -:hotspot_fast_serviceability \
+ -:hotspot_runtime_tier2_platform_agnostic
+ 
+hotspot_runtime_tier2_platform_agnostic = \
+  runtime/SelectionResolution \
+ -:hotspot_fast_runtime
 
 #All tests that depends on nashorn extension.
 #
--- a/test/compiler/arguments/BMICommandLineOptionTestBase.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/BMICommandLineOptionTestBase.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,7 +21,10 @@
  * questions.
  */
 
-import jdk.test.lib.cli.*;
+package compiler.arguments;
+
+import jdk.test.lib.cli.CPUSpecificCommandLineOptionTest;
+import jdk.test.lib.cli.CommandLineOptionTest;
 
 /**
  * Base class for all X86 bit manipulation related command line options.
--- a/test/compiler/arguments/BMISupportedCPUTest.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/BMISupportedCPUTest.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,8 +21,10 @@
  * questions.
  */
 
-import jdk.test.lib.*;
-import jdk.test.lib.cli.*;
+package compiler.arguments;
+
+import jdk.test.lib.ExitCode;
+import jdk.test.lib.cli.CommandLineOptionTest;
 
 /**
  * Test on bit manipulation related command line options,
--- a/test/compiler/arguments/BMIUnsupportedCPUTest.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/BMIUnsupportedCPUTest.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,8 +21,11 @@
  * questions.
  */
 
-import jdk.test.lib.*;
-import jdk.test.lib.cli.*;
+package compiler.arguments;
+
+import jdk.test.lib.ExitCode;
+import jdk.test.lib.Platform;
+import jdk.test.lib.cli.CommandLineOptionTest;
 
 /**
  * Test on bit manipulation related command line options,
--- a/test/compiler/arguments/CheckCICompilerCount.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/CheckCICompilerCount.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,19 +21,22 @@
  * questions.
  */
 
-import jdk.test.lib.*;
-
 /*
  * @test CheckCheckCICompilerCount
  * @bug 8130858
  * @bug 8132525
  * @summary Check that correct range of values for CICompilerCount are allowed depending on whether tiered is enabled or not
- * @library /testlibrary
+ * @library /testlibrary /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @run main CheckCICompilerCount
+ * @run driver compiler.arguments.CheckCICompilerCount
  */
 
+package compiler.arguments;
+
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+
 public class CheckCICompilerCount {
     private static final String[][] NON_TIERED_ARGUMENTS = {
         {
--- a/test/compiler/arguments/CheckCompileThresholdScaling.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/CheckCompileThresholdScaling.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,18 +21,21 @@
  * questions.
  */
 
-import jdk.test.lib.*;
-
 /*
  * @test CheckCompileThresholdScaling
  * @bug 8059604
- * @summary "Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)"
+ * @summary Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)
  * @library /testlibrary
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @run main CheckCompileThresholdScaling
+ * @run driver compiler.arguments.CheckCompileThresholdScaling
  */
 
+package compiler.arguments;
+
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+
 public class CheckCompileThresholdScaling {
 
     // The flag CompileThresholdScaling scales compilation thresholds
--- a/test/compiler/arguments/TestUseBMI1InstructionsOnSupportedCPU.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseBMI1InstructionsOnSupportedCPU.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,19 +26,19 @@
  * @bug 8031321
  * @summary Verify processing of UseBMI1Instructions option on CPU with
  *          BMI1 feature support.
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestUseBMI1InstructionsOnSupportedCPU
- *        BMISupportedCPUTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @build compiler.arguments.TestUseBMI1InstructionsOnSupportedCPU
+ *        compiler.arguments.BMISupportedCPUTest
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- *                   -XX:+WhiteBoxAPI TestUseBMI1InstructionsOnSupportedCPU
+ *                   -XX:+WhiteBoxAPI
+ *                   compiler.arguments.TestUseBMI1InstructionsOnSupportedCPU
  */
 
-import sun.hotspot.cpuinfo.CPUInfo;
-import jdk.test.lib.*;
+package compiler.arguments;
 
 public class TestUseBMI1InstructionsOnSupportedCPU
      extends BMISupportedCPUTest {
--- a/test/compiler/arguments/TestUseBMI1InstructionsOnUnsupportedCPU.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseBMI1InstructionsOnUnsupportedCPU.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,20 +26,19 @@
  * @bug 8031321
  * @summary Verify processing of UseBMI1Instructions option on CPU without
  *          BMI1 feature support.
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestUseBMI1InstructionsOnUnsupportedCPU
- *        BMIUnsupportedCPUTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @build compiler.arguments.TestUseBMI1InstructionsOnUnsupportedCPU
+ *        compiler.arguments.BMIUnsupportedCPUTest
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- *                   -XX:+WhiteBoxAPI TestUseBMI1InstructionsOnUnsupportedCPU
+ *                   -XX:+WhiteBoxAPI
+ *                   compiler.arguments.TestUseBMI1InstructionsOnUnsupportedCPU
  */
 
-import sun.hotspot.cpuinfo.CPUInfo;
-import jdk.test.lib.*;
-import jdk.test.lib.cli.*;
+package compiler.arguments;
 
 public class TestUseBMI1InstructionsOnUnsupportedCPU
       extends BMIUnsupportedCPUTest {
--- a/test/compiler/arguments/TestUseCompiler.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseCompiler.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test TestUseCompiler
  * @bug 8086068
  * @summary Tests execution with inconsistent UseCompiler flag combination.
- * @run main/othervm -Xint -XX:+UseCompiler TestUseCompiler
- * @run main/othervm -XX:+UseCompiler -Xint TestUseCompiler
+ *
+ * @run main/othervm -Xint -XX:+UseCompiler compiler.arguments.TestUseCompiler
+ * @run main/othervm -XX:+UseCompiler -Xint compiler.arguments.TestUseCompiler
  */
 
+package compiler.arguments;
+
 public class TestUseCompiler {
 
     public static void main(String args[]) {
--- a/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnSupportedCPU.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnSupportedCPU.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,20 +26,19 @@
  * @bug 8031321
  * @summary Verify processing of UseCountLeadingZerosInstruction option
  *          on CPU with LZCNT support.
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestUseCountLeadingZerosInstructionOnSupportedCPU
- *        BMISupportedCPUTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *
+ * @build compiler.arguments.TestUseCountLeadingZerosInstructionOnSupportedCPU
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
- *                   TestUseCountLeadingZerosInstructionOnSupportedCPU
+ *                   compiler.arguments.TestUseCountLeadingZerosInstructionOnSupportedCPU
  */
 
-import sun.hotspot.cpuinfo.CPUInfo;
-import jdk.test.lib.*;
+package compiler.arguments;
 
 public class TestUseCountLeadingZerosInstructionOnSupportedCPU
      extends BMISupportedCPUTest {
--- a/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnUnsupportedCPU.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnUnsupportedCPU.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,20 +26,19 @@
  * @bug 8031321
  * @summary Verify processing of UseCountLeadingZerosInstruction option
  *          on CPU without LZCNT support.
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestUseCountLeadingZerosInstructionOnUnsupportedCPU
- *        BMIUnsupportedCPUTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *
+ * @build compiler.arguments.TestUseCountLeadingZerosInstructionOnUnsupportedCPU
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
- *                   TestUseCountLeadingZerosInstructionOnUnsupportedCPU
+ *                   compiler.arguments.TestUseCountLeadingZerosInstructionOnUnsupportedCPU
  */
 
-import sun.hotspot.cpuinfo.CPUInfo;
-import jdk.test.lib.*;
+package compiler.arguments;
 
 public class TestUseCountLeadingZerosInstructionOnUnsupportedCPU
      extends BMIUnsupportedCPUTest {
--- a/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnSupportedCPU.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnSupportedCPU.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,21 +26,21 @@
  * @bug 8031321
  * @summary Verify processing of UseCountTrailingZerosInstruction option
  *          on CPU with TZCNT (BMI1 feature) support.
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestUseCountTrailingZerosInstructionOnSupportedCPU
- *        BMISupportedCPUTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *
+ * @build compiler.arguments.TestUseCountTrailingZerosInstructionOnSupportedCPU
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
- *                   TestUseCountTrailingZerosInstructionOnSupportedCPU
+ *                   compiler.arguments.TestUseCountTrailingZerosInstructionOnSupportedCPU
  */
 
-import sun.hotspot.cpuinfo.CPUInfo;
-import jdk.test.lib.*;
-import jdk.test.lib.cli.*;
+package compiler.arguments;
+
+import jdk.test.lib.cli.CommandLineOptionTest;
 
 public class TestUseCountTrailingZerosInstructionOnSupportedCPU
         extends BMISupportedCPUTest {
--- a/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnUnsupportedCPU.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnUnsupportedCPU.java	Thu Jul 14 15:18:15 2016 +0100
@@ -26,21 +26,21 @@
  * @bug 8031321
  * @summary Verify processing of UseCountTrailingZerosInstruction option
  *          on CPU without TZCNT instruction (BMI1 feature) support.
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestUseCountTrailingZerosInstructionOnUnsupportedCPU
- *        BMIUnsupportedCPUTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *
+ * @build compiler.arguments.TestUseCountTrailingZerosInstructionOnUnsupportedCPU
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
- *                   TestUseCountTrailingZerosInstructionOnUnsupportedCPU
+ *                   compiler.arguments.TestUseCountTrailingZerosInstructionOnUnsupportedCPU
  */
 
-import sun.hotspot.cpuinfo.CPUInfo;
-import jdk.test.lib.*;
-import jdk.test.lib.cli.*;
+package compiler.arguments;
+
+import jdk.test.lib.cli.CommandLineOptionTest;
 
 public class TestUseCountTrailingZerosInstructionOnUnsupportedCPU
         extends BMIUnsupportedCPUTest {
--- a/test/compiler/arraycopy/TestArrayCloneBadAssert.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCloneBadAssert.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,14 @@
  * @test
  * @bug 8073792
  * @summary assert broken when array size becomes known during igvn
- * @run main/othervm -Xcomp -XX:CompileOnly=TestArrayCloneBadAssert.m TestArrayCloneBadAssert
  *
+ * @run main/othervm -Xcomp
+ *      -XX:CompileCommand=compileonly,compiler.arraycopy.TestArrayCloneBadAssert::m
+ *      compiler.arraycopy.TestArrayCloneBadAssert
  */
 
+package compiler.arraycopy;
+
 public class TestArrayCloneBadAssert {
 
     static final int[] array = new int[5];
--- a/test/compiler/arraycopy/TestArrayCopyAsLoadsStores.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyAsLoadsStores.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,13 +25,22 @@
  * @test
  * @bug 6912521
  * @summary small array copy as loads/stores
- * @compile TestArrayCopyAsLoadsStores.java TestArrayCopyUtils.java
- * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
- * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
+ * @library /
  *
+ * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestArrayCopyAsLoadsStores::m*
+ *                   -XX:TypeProfileLevel=200
+ *                   compiler.arraycopy.TestArrayCopyAsLoadsStores
+ * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestArrayCopyAsLoadsStores::m*
+ *                   -XX:TypeProfileLevel=200
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode
+ *                   compiler.arraycopy.TestArrayCopyAsLoadsStores
  */
 
-import java.util.*;
+package compiler.arraycopy;
+
+import java.util.Arrays;
 
 public class TestArrayCopyAsLoadsStores extends TestArrayCopyUtils {
 
--- a/test/compiler/arraycopy/TestArrayCopyBadReexec.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyBadReexec.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8073866
  * @summary Fix for 8064703 may also cause stores between the allocation and arraycopy to be rexecuted after a deoptimization
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyBadReexec
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestArrayCopyBadReexec
  */
 
+package compiler.arraycopy;
+
 public class TestArrayCopyBadReexec {
 
     static int val;
--- a/test/compiler/arraycopy/TestArrayCopyMacro.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyMacro.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 7173584
  * @summary arraycopy as macro node
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyMacro
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestArrayCopyMacro
  */
 
+package compiler.arraycopy;
+
 public class TestArrayCopyMacro {
     static class A {
     }
--- a/test/compiler/arraycopy/TestArrayCopyNoInit.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyNoInit.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8064703
  * @summary Deoptimization between array allocation and arraycopy may result in non initialized array
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020 TestArrayCopyNoInit
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020
+ *                   compiler.arraycopy.TestArrayCopyNoInit
  */
 
+package compiler.arraycopy;
+
 public class TestArrayCopyNoInit {
 
     static int[] m1(int[] src) {
--- a/test/compiler/arraycopy/TestArrayCopyNoInitDeopt.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyNoInitDeopt.java	Thu Jul 14 15:18:15 2016 +0100
@@ -28,19 +28,22 @@
  * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @build TestArrayCopyNoInitDeopt
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *
+ * @build compiler.arraycopy.TestArrayCopyNoInitDeopt
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
  *                                jdk.test.lib.Platform
  * @run main/othervm -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020
- *                   TestArrayCopyNoInitDeopt
+ *                   compiler.arraycopy.TestArrayCopyNoInitDeopt
  */
 
+package compiler.arraycopy;
+
+import compiler.whitebox.CompilerWhiteBoxTest;
+import jdk.test.lib.Platform;
 import sun.hotspot.WhiteBox;
-import sun.hotspot.code.NMethod;
-import jdk.test.lib.Platform;
-import java.lang.reflect.*;
-import compiler.whitebox.CompilerWhiteBoxTest;
+
+import java.lang.reflect.Method;
 
 public class TestArrayCopyNoInitDeopt {
 
--- a/test/compiler/arraycopy/TestArrayCopyOfStopped.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyOfStopped.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8074676
  * @summary after guards in Arrays.copyOf() intrinsic, control may become top
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyOfStopped
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestArrayCopyOfStopped
  */
 
+package compiler.arraycopy;
+
 import java.util.Arrays;
 
 public class TestArrayCopyOfStopped {
--- a/test/compiler/arraycopy/TestArrayCopyOverflowArguments.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyOverflowArguments.java	Thu Jul 14 15:18:15 2016 +0100
@@ -28,10 +28,13 @@
  *          are properly sign extended to 64 bit (e.g., PPC64, s390x). This can fail
  *          if slow_arraycopy_C() is commpiled by the C compiler without any imlicit
  *          casts (as spill stores to the stack that are done with 4-byte instruction).
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyOverflowArguments
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestArrayCopyOverflowArguments
  */
 
+package compiler.arraycopy;
+
 public class TestArrayCopyOverflowArguments {
 
     // Without volatile the overflowing computation was moved up and then
--- a/test/compiler/arraycopy/TestArrayCopyOverflowInBoundChecks.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyOverflowInBoundChecks.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,9 +25,13 @@
  * @test
  * @bug 8134468
  * @summary test that checks whether an array load falls into the range of an arraycopy is incorrect on 32bits
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyOverflowInBoundChecks
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                    compiler.arraycopy.TestArrayCopyOverflowInBoundChecks
  */
+
+package compiler.arraycopy;
+
 public class TestArrayCopyOverflowInBoundChecks {
 
     static byte[] src_array = { 'a', 'b', 'c', 'd', 'e' };
--- a/test/compiler/arraycopy/TestArrayCopyStoppedAfterGuards.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyStoppedAfterGuards.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,16 @@
  * @test
  * @bug 8075921
  * @summary control becomes top after arraycopy guards and confuses tighly coupled allocation logic
- * @run main/othervm -Xcomp -XX:CompileOnly=TestArrayCopyStoppedAfterGuards.test,System.arraycopy TestArrayCopyStoppedAfterGuards
+ *
+ * @run main/othervm -Xcomp
+ *      -XX:CompileCommand=compileonly,java.lang.System::arraycopy
+ *      -XX:CompileCommand=compileonly,compiler.arraycopy.TestArrayCopyStoppedAfterGuards::test
+ *      compiler.arraycopy.TestArrayCopyStoppedAfterGuards
  *
  */
 
+package compiler.arraycopy;
+
 public class TestArrayCopyStoppedAfterGuards {
 
     static void test() {
--- a/test/compiler/arraycopy/TestArrayCopyUtils.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArrayCopyUtils.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,9 +21,13 @@
  * questions.
  */
 
-import java.lang.annotation.*;
-import java.lang.reflect.*;
-import java.util.*;
+package compiler.arraycopy;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
 
 abstract class TestArrayCopyUtils {
     public enum ArraySrc {
--- a/test/compiler/arraycopy/TestArraysCopyOfNoTypeCheck.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestArraysCopyOfNoTypeCheck.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8055910
  * @summary Arrays.copyOf doesn't perform subtype check
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArraysCopyOfNoTypeCheck
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestArraysCopyOfNoTypeCheck
  */
 
+package compiler.arraycopy;
+
 import java.util.Arrays;
 
 public class TestArraysCopyOfNoTypeCheck {
--- a/test/compiler/arraycopy/TestDeadArrayCopyOnMemChain.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestDeadArrayCopyOnMemChain.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8080699
  * @summary eliminated arraycopy node still reachable through exception edges
- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation TestDeadArrayCopyOnMemChain
  *
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation
+ *                   compiler.arraycopy.TestDeadArrayCopyOnMemChain
  */
 
+package compiler.arraycopy;
+
 public class TestDeadArrayCopyOnMemChain {
     static class A {
         int f;
--- a/test/compiler/arraycopy/TestEliminateArrayCopy.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestEliminateArrayCopy.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,11 +25,16 @@
  * @test
  * @bug 8076188
  * @summary arraycopy to non escaping destination may be eliminated
- * @compile TestEliminateArrayCopy.java TestArrayCopyUtils.java
- * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestEliminateArrayCopy*::m* TestEliminateArrayCopy
+ * @library /
+ *
+ * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestEliminateArrayCopy*::m*
+ *                   compiler.arraycopy.TestEliminateArrayCopy
  *
  */
 
+package compiler.arraycopy;
+
 public class TestEliminateArrayCopy {
 
     static class CloneTests extends TestInstanceCloneUtils {
--- a/test/compiler/arraycopy/TestEliminatedArrayCopyDeopt.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestEliminatedArrayCopyDeopt.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,8 +25,12 @@
  * @test
  * @bug 8130847 8156760
  * @summary Eliminated instance/array written to by an array copy variant must be correctly initialized when reallocated at a deopt
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminatedArrayCopyDeopt
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:+IgnoreUnrecognizedVMOptions -XX:-ReduceInitialCardMarks TestEliminatedArrayCopyDeopt
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestEliminatedArrayCopyDeopt
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:-ReduceInitialCardMarks
+ *                   compiler.arraycopy.TestEliminatedArrayCopyDeopt
  */
 
 // Test that if an ArrayCopy node is eliminated because it doesn't
@@ -34,6 +38,8 @@
 // on a deoptimization, when the object/array is reallocated, it is
 // correctly initialized
 
+package compiler.arraycopy;
+
 public class TestEliminatedArrayCopyDeopt {
 
     static class A implements Cloneable {
--- a/test/compiler/arraycopy/TestEliminatedArrayCopyPhi.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestEliminatedArrayCopyPhi.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8134321
  * @summary Code that capture field values of eliminated allocation at a safepoint when there's an arraycopy behind a Phi is broken
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminatedArrayCopyPhi
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestEliminatedArrayCopyPhi
  */
 
+package compiler.arraycopy;
+
 public class TestEliminatedArrayCopyPhi {
 
     static int[] escaped;
--- a/test/compiler/arraycopy/TestEliminatedArrayLoopPredicateCopyDeopt.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestEliminatedArrayLoopPredicateCopyDeopt.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,13 @@
  * @test
  * @bug 8134974
  * @summary Cannot pin eliminated arraycopy loads for deopt state in uncommon trap path if it is a loop predicate unc
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminatedArrayLoopPredicateCopyDeopt
  *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   compiler.arraycopy.TestEliminatedArrayLoopPredicateCopyDeopt
  */
 
+package compiler.arraycopy;
+
 public class TestEliminatedArrayLoopPredicateCopyDeopt {
 
     static boolean test(int[] array_src) {
--- a/test/compiler/arraycopy/TestInstanceCloneAsLoadsStores.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestInstanceCloneAsLoadsStores.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,12 +25,23 @@
  * @test
  * @bug 6700100 8156760
  * @summary small instance clone as loads/stores
- * @compile TestInstanceCloneAsLoadsStores.java TestInstanceCloneUtils.java
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestInstanceCloneAsLoadsStores::m* TestInstanceCloneAsLoadsStores
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestInstanceCloneAsLoadsStores::m* -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode TestInstanceCloneAsLoadsStores
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestInstanceCloneAsLoadsStores::m* -XX:+IgnoreUnrecognizedVMOptions -XX:-ReduceInitialCardMarks TestInstanceCloneAsLoadsStores
+ * @library /
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestInstanceCloneAsLoadsStores::m*
+ *                   compiler.arraycopy.TestInstanceCloneAsLoadsStores
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestInstanceCloneAsLoadsStores::m*
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode
+ *                   compiler.arraycopy.TestInstanceCloneAsLoadsStores
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestInstanceCloneAsLoadsStores::m*
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:-ReduceInitialCardMarks
+ *                   compiler.arraycopy.TestInstanceCloneAsLoadsStores
  */
 
+package compiler.arraycopy;
+
 public class TestInstanceCloneAsLoadsStores extends TestInstanceCloneUtils {
 
     // Should be compiled as loads/stores
--- a/test/compiler/arraycopy/TestInstanceCloneUtils.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestInstanceCloneUtils.java	Thu Jul 14 15:18:15 2016 +0100
@@ -21,8 +21,12 @@
  * questions.
  */
 
-import java.lang.reflect.*;
-import java.util.*;
+package compiler.arraycopy;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
 
 abstract class TestInstanceCloneUtils {
     static class Base implements Cloneable {
--- a/test/compiler/arraycopy/TestLoadBypassArrayCopy.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestLoadBypassArrayCopy.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,10 +25,15 @@
  * @test
  * @bug 8086046
  * @summary load bypasses arraycopy that sets the value after the ArrayCopyNode is expanded
- * @run main/othervm -XX:-BackgroundCompilation  -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestLoadBypassArrayCopy::test_helper -XX:-TieredCompilation TestLoadBypassArrayCopy
  *
+ * @run main/othervm -XX:-BackgroundCompilation  -XX:-UseOnStackReplacement
+ *                   -XX:CompileCommand=dontinline,compiler.arraycopy.TestLoadBypassArrayCopy::test_helper
+ *                   -XX:-TieredCompilation
+ *                   compiler.arraycopy.TestLoadBypassArrayCopy
  */
 
+package compiler.arraycopy;
+
 public class TestLoadBypassArrayCopy {
 
     static long i;
--- a/test/compiler/arraycopy/TestMissingControl.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestMissingControl.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,9 +25,14 @@
  * @test
  * @bug 8055153
  * @summary missing control on LoadRange and LoadKlass when array copy macro node is expanded
- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation TestMissingControl
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
+ *                   compiler.arraycopy.TestMissingControl
  *
  */
+
+package compiler.arraycopy;
+
 public class TestMissingControl {
 
     static int[] m1(int[] a2) {
--- a/test/compiler/arraycopy/TestObjectArrayClone.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestObjectArrayClone.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,8 +25,14 @@
  * @test
  * @bug 8155643
  * @summary Test Object.clone() intrinsic if ReduceInitialCardMarks is disabled.
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompileOnly=TestObjectArrayClone.test -XX:-ReduceInitialCardMarks TestObjectArrayClone
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:-ReduceInitialCardMarks
+ *                   -XX:CompileCommand=compileonly,compiler.arraycopy.TestObjectArrayClone::test
+ *                   compiler.arraycopy.TestObjectArrayClone
  */
+
+package compiler.arraycopy;
+
 public class TestObjectArrayClone {
 
     public static TestObjectArrayClone[] test(TestObjectArrayClone[] arr) {
--- a/test/compiler/arraycopy/TestReduceBulkZeroingDisabled.java	Wed Jul 13 15:19:34 2016 +0100
+++ b/test/compiler/arraycopy/TestReduceBulkZeroingDisabled.java	Thu Jul 14 15:18:15 2016 +0100
@@ -25,8 +25,13 @@
  * @test
  * @bug 8155241
  * @summary Test arraycopy elimination with ReduceBulkZeroing disabled.
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:-ReduceBulkZeroing TestReduceBulkZeroingDisabled
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:-ReduceBulkZeroing
+ *                   compiler.arraycopy.TestReduceBulkZeroingDisabled
  */
+
+package compiler.arraycopy;
+
 public class TestReduceBulkZeroingDisabled {
 
     static public void main(String[] args) {
--- a/test/compiler/c1/6478991/NullCheckTest.java	Wed Jul 13 15:19:34 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/**
- * @test
- * @bug 6478991
- * @summary C1 NullCheckEliminator yields incorrect exceptions
- *
- * @run main/othervm -XX:CompileOnly=NullCheckTest.test,NullCheckTest.inlined  -Xcomp NullCheckTest
- */
-
-public class NullCheckTest {
-        static class A {
-                int f;
-
-                public final void inlined(A a) {
-                        // This cast is intended to fail.
-                        B b = ((B) a);
-                }
-        }
-
-        static class B extends A {
-        }
-
-
-        private static void test(A a1, A a2) {
-                // Inlined call must do a null check on a1.
-                // However, the exlipcit NullCheck instruction is eliminated and
-                // the null check is folded into the field load below, so the
-                // exception in the inlined method is thrown before the null check
-                // and the NullPointerException is not thrown.
-                a1.inlined(a2);
-
-                int x = a1.f;
-        }
-
-        public static void main(String[] args) {
-                // load classes
-                new B();
-                try {
-                        test(null, new A());
-
-                        throw new InternalError("FAILURE: no exception");
-                } catch (NullPointerException ex) {
-                        System.out.println("CORRECT: NullPointerException");
-                } catch (ClassCastException ex) {
-                        System.out.println("FAILURE: ClassCastException");
-                        throw ex;
-                }
-        }
-}
--- a/test/compiler/c1/6579789/Test6579789.java	Wed Jul 13 15:19:34 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/**
- * @test
- * @bug 6579789
- * @summary Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
- * @run main/othervm -Xcomp -XX:UseSSE=0 -XX:CompileOnly=Test6579789.bug Test6579789
- */
-
-public class Test6579789 {
-    public static void main(String[] args) {
-        bug(4);
-    }
-    public static void bug(int n) {
-        float f = 1;
-        int i = 1;
-        try {
-            int x = 1 / n; // instruction that can trap
-            f = 2;
-            i = 2;
-            int y = 2 / n; // instruction that can trap
-        } catch (Exception ex) {
-            f++;
-            i++;
-        }
-    }
-}
--- a/test/compiler/c1/6756768/Test6756768.java	Wed Jul 13 15:19:34 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * @test
- * @bug 6756768
- * @summary C1 generates invalid code
- *
- * @run main/othervm -Xcomp Test6756768
- */
-
-class Test6756768a
-{
-    static boolean var_1 = true;
-}
-
-final class Test6756768b
-{
-    static boolean var_24 = false;
-    static int var_25 = 0;
-
-    static boolean var_temp1 = Test6756768a.var_1 = false;
-}
-
-public final class Test6756768 extends Test6756768a
-{
-    final static int var = var_1 ^ (Test6756768b.var_24 ? var_1 : var_1) ? Test6756768b.var_25 : 1;
-
-    static public void main(String[] args) {
-        if (var != 0) {
-            throw new InternalError("var = " + var);
-        }
-    }
-
-}
--- a/test/compiler/c1/6756768/Test6756768_2.java	Wed Jul 13 15:19:34 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * @test
- * @bug 6756768
- * @summary C1 generates invalid code
- *
- * @run main/othervm -Xcomp Test6756768_2
- */
-
-class Test6756768_2a {
-    static int var = ++Test6756768_2.var;
-}
-
-public class Test6756768_2 {
-    static int var = 1;
-
-    static Object d2 = null;
-
-    static void test_static_field() {
-        int v = var;
-        int v2 = Test6756768_2a.var;
-        int v3 = var;
-