changeset 7521:35e222a277ba

Merge
author minqi
date Mon, 08 Dec 2014 00:15:55 -0800
parents 30782e93e3be e2457e3f8c0e
children 4b1d7d4be6dc 8db6a8a28797 c75901698a47 4e9283984ee1
files agent/src/share/classes/sun/jvm/hotspot/memory/OneContigSpaceCardGeneration.java src/os/windows/vm/os_windows.cpp src/share/vm/memory/filemap.cpp src/share/vm/memory/generation.inline.hpp src/share/vm/memory/metaspace.cpp src/share/vm/memory/metaspaceShared.hpp test/Makefile test/compiler/5057225/Test5057225.java test/compiler/5091921/Test5091921.java test/compiler/5091921/Test6186134.java test/compiler/5091921/Test6196102.java test/compiler/5091921/Test6357214.java test/compiler/5091921/Test6559156.java test/compiler/5091921/Test6753639.java test/compiler/5091921/Test6850611.java test/compiler/5091921/Test6890943.java test/compiler/5091921/Test6897150.java test/compiler/5091921/Test6905845.java test/compiler/5091921/Test6931567.java test/compiler/5091921/Test6935022.java test/compiler/5091921/Test6959129.java test/compiler/5091921/Test6985295.java test/compiler/5091921/Test6992759.java test/compiler/5091921/Test7005594.java test/compiler/5091921/Test7005594.sh test/compiler/5091921/Test7020614.java test/compiler/5091921/input6890943.txt test/compiler/5091921/output6890943.txt test/compiler/6340864/TestByteVect.java test/compiler/6340864/TestDoubleVect.java test/compiler/6340864/TestFloatVect.java test/compiler/6340864/TestIntVect.java test/compiler/6340864/TestLongVect.java test/compiler/6340864/TestShortVect.java test/compiler/6378821/Test6378821.java test/compiler/6431242/Test.java test/compiler/6443505/Test6443505.java test/compiler/6478991/NullCheckTest.java test/compiler/6539464/Test.java test/compiler/6579789/Test6579789.java test/compiler/6589834/InlinedArrayCloneTestCase.java test/compiler/6589834/Test_ia32.java test/compiler/6603011/Test.java test/compiler/6636138/Test1.java test/compiler/6636138/Test2.java test/compiler/6646019/Test.java test/compiler/6646020/Tester.java test/compiler/6659207/Test.java test/compiler/6661247/Test.java test/compiler/6663621/IVTest.java test/compiler/6663848/Tester.java test/compiler/6663854/Test6663854.java test/compiler/6689060/Test.java test/compiler/6695810/Test.java test/compiler/6700047/Test6700047.java test/compiler/6711100/Test.java test/compiler/6711117/Test.java test/compiler/6712835/Test6712835.java test/compiler/6714694/Tester.java test/compiler/6716441/Tester.java test/compiler/6724218/Test.java test/compiler/6726999/Test.java test/compiler/6732154/Test6732154.java test/compiler/6741738/Tester.java test/compiler/6756768/Test6756768.java test/compiler/6756768/Test6756768_2.java test/compiler/6757316/Test6757316.java test/compiler/6758234/Test6758234.java test/compiler/6769124/TestArrayCopy6769124.java test/compiler/6769124/TestDeoptInt6769124.java test/compiler/6769124/TestUnalignedLoad6769124.java test/compiler/6772683/InterruptedTest.java test/compiler/6775880/Test.java test/compiler/6778657/Test.java test/compiler/6792161/Test6792161.java test/compiler/6795161/Test.java test/compiler/6795362/Test6795362.java test/compiler/6795465/Test6795465.java test/compiler/6796786/Test6796786.java test/compiler/6797305/Test6797305.java test/compiler/6799693/Test.java test/compiler/6800154/Test6800154.java test/compiler/6805724/Test6805724.java test/compiler/6814842/Test6814842.java test/compiler/6823354/Test6823354.java test/compiler/6823453/Test.java test/compiler/6826736/Test.java test/compiler/6832293/Test.java test/compiler/6833129/Test.java test/compiler/6837011/Test6837011.java test/compiler/6837094/Test.java test/compiler/6843752/Test.java test/compiler/6849574/Test.java test/compiler/6851282/Test.java test/compiler/6852078/Test6852078.java test/compiler/6855164/Test.java test/compiler/6855215/Test6855215.java test/compiler/6857159/Test6857159.java test/compiler/6857159/Test6857159.sh test/compiler/6859338/Test6859338.java test/compiler/6860469/Test.java test/compiler/6863155/Test6863155.java test/compiler/6863420/Test.java test/compiler/6865031/Test.java test/compiler/6865265/StackOverflowBug.java test/compiler/6866651/Test.java test/compiler/6875866/Test.java test/compiler/6877254/Test.java test/compiler/6879902/Test6879902.java test/compiler/6880034/Test6880034.java test/compiler/6885584/Test6885584.java test/compiler/6891750/Test6891750.java test/compiler/6892265/Test.java test/compiler/6894807/IsInstanceTest.java test/compiler/6894807/Test6894807.sh test/compiler/6895383/Test.java test/compiler/6896617/Test6896617.java test/compiler/6896727/Test.java test/compiler/6901572/Test.java test/compiler/6909839/Test6909839.java test/compiler/6910484/Test.java test/compiler/6910605/Test.java test/compiler/6910618/Test.java test/compiler/6912517/Test.java test/compiler/6916644/Test6916644.java test/compiler/6921969/TestMultiplyLongHiZero.java test/compiler/6930043/Test6930043.java test/compiler/6932496/Test6932496.java test/compiler/6934604/TestByteBoxing.java test/compiler/6934604/TestDoubleBoxing.java test/compiler/6934604/TestFloatBoxing.java test/compiler/6934604/TestIntBoxing.java test/compiler/6934604/TestLongBoxing.java test/compiler/6934604/TestShortBoxing.java test/compiler/6935535/Test.java test/compiler/6942326/Test.java test/compiler/6946040/TestCharShortByteSwap.java test/compiler/6956668/Test6956668.java test/compiler/6958485/Test.java test/compiler/6968348/Test6968348.java test/compiler/6973329/Test.java test/compiler/6982370/Test6982370.java test/compiler/6990212/Test6990212.java test/compiler/7002666/Test7002666.java test/compiler/7009231/Test7009231.java test/compiler/7009359/Test7009359.java test/compiler/7017746/Test.java test/compiler/7024475/Test7024475.java test/compiler/7029152/Test.java test/compiler/7041100/Test7041100.java test/compiler/7042153/Test7042153.java test/compiler/7044738/Test7044738.java test/compiler/7046096/Test7046096.java test/compiler/7047069/Test7047069.java test/compiler/7048332/Test7048332.java test/compiler/7052494/Test7052494.java test/compiler/7068051/Test7068051.java test/compiler/7070134/Stemmer.java test/compiler/7070134/Test7070134.sh test/compiler/7070134/words test/compiler/7082949/Test7082949.java test/compiler/7088020/Test7088020.java test/compiler/7088419/CRCTest.java test/compiler/7090976/Test7090976.java test/compiler/7100757/Test7100757.java test/compiler/7103261/Test7103261.java test/compiler/7110586/Test7110586.java test/compiler/7116216/LargeFrame.java test/compiler/7116216/StackOverflow.java test/compiler/7119644/TestBooleanVect.java test/compiler/7119644/TestByteDoubleVect.java test/compiler/7119644/TestByteFloatVect.java test/compiler/7119644/TestByteIntVect.java test/compiler/7119644/TestByteLongVect.java test/compiler/7119644/TestByteShortVect.java test/compiler/7119644/TestByteVect.java test/compiler/7119644/TestCharShortVect.java test/compiler/7119644/TestCharVect.java test/compiler/7119644/TestDoubleVect.java test/compiler/7119644/TestFloatDoubleVect.java test/compiler/7119644/TestFloatVect.java test/compiler/7119644/TestIntDoubleVect.java test/compiler/7119644/TestIntFloatVect.java test/compiler/7119644/TestIntLongVect.java test/compiler/7119644/TestIntVect.java test/compiler/7119644/TestLongDoubleVect.java test/compiler/7119644/TestLongFloatVect.java test/compiler/7119644/TestLongVect.java test/compiler/7119644/TestShortDoubleVect.java test/compiler/7119644/TestShortFloatVect.java test/compiler/7119644/TestShortIntVect.java test/compiler/7119644/TestShortLongVect.java test/compiler/7119644/TestShortVect.java test/compiler/7123108/Test7123108.java test/compiler/7125879/Test7125879.java test/compiler/7141637/SpreadNullArg.java test/compiler/7160610/Test7160610.java test/compiler/7169782/Test7169782.java test/compiler/7174363/Test7174363.java test/compiler/7177917/Test7177917.java test/compiler/7179138/Test7179138_1.java test/compiler/7179138/Test7179138_2.java test/compiler/7184394/TestAESBase.java test/compiler/7184394/TestAESDecode.java test/compiler/7184394/TestAESEncode.java test/compiler/7184394/TestAESMain.java test/compiler/7190310/Test7190310.java test/compiler/7190310/Test7190310_unsafe.java test/compiler/7192963/TestByteVect.java test/compiler/7192963/TestDoubleVect.java test/compiler/7192963/TestFloatVect.java test/compiler/7192963/TestIntVect.java test/compiler/7192963/TestLongVect.java test/compiler/7192963/TestShortVect.java test/compiler/7196199/Test7196199.java test/compiler/7199742/Test7199742.java test/compiler/7200264/Test7200264.sh test/compiler/7200264/TestIntVect.java test/compiler/8000805/Test8000805.java test/compiler/8001183/TestCharVect.java test/compiler/8002069/Test8002069.java test/compiler/8004051/Test8004051.java test/compiler/8004741/Test8004741.java test/compiler/8004867/TestIntAtomicCAS.java test/compiler/8004867/TestIntAtomicOrdered.java test/compiler/8004867/TestIntAtomicVolatile.java test/compiler/8004867/TestIntUnsafeCAS.java test/compiler/8004867/TestIntUnsafeOrdered.java test/compiler/8004867/TestIntUnsafeVolatile.java test/compiler/8005033/Test8005033.java test/compiler/8005419/Test8005419.java test/compiler/8005956/PolynomialRoot.java test/compiler/8007294/Test8007294.java test/compiler/8007722/Test8007722.java test/compiler/8009761/Test8009761.java test/compiler/8010927/Test8010927.java test/compiler/8011706/Test8011706.java test/compiler/8011771/Test8011771.java test/compiler/8011901/Test8011901.java test/compiler/8015436/Test8015436.java test/compiler/EliminateAutoBox/UnsignedLoads.java test/compiler/EscapeAnalysis/Test8020215.java test/compiler/EscapeAnalysis/TestAllocatedEscapesPtrComparison.java test/compiler/EscapeAnalysis/TestUnsafePutAddressNullObjMustNotEscape.java test/compiler/IntegerArithmetic/TestIntegerComparison.java test/gc/concurrentMarkSweep/CheckAllocateAndSystemGC.java test/gc/concurrentMarkSweep/SystemGCOnForegroundCollector.java test/gc/startup_warnings/TestCMSForegroundFlags.java
diffstat 739 files changed, 168566 insertions(+), 166458 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Dec 06 04:30:00 2014 +0000
+++ b/.hgtags	Mon Dec 08 00:15:55 2014 -0800
@@ -443,3 +443,4 @@
 c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
 9cb75e5e394827ccbaf2e15524108a412dc4ddc5 jdk9-b39
 6b09b3193d731e3288e2a240c504a20d0a06c766 jdk9-b40
+1d29b13e8a515a7ea3b882f140576d5d675bc11f jdk9-b41
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/Generation.java	Sat Dec 06 04:30:00 2014 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/Generation.java	Mon Dec 08 00:15:55 2014 -0800
@@ -37,10 +37,7 @@
       <ul>
       <li> CardGeneration
         <ul>
-        <li> OneContigSpaceCardGeneration
-          <ul>
-          <li> TenuredGeneration
-          </ul>
+        <li> TenuredGeneration
         </ul>
       <li> DefNewGeneration
       </ul>
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/OneContigSpaceCardGeneration.java	Sat Dec 06 04:30:00 2014 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.io.*;
-import java.util.*;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-/** <P> OneSpaceOldGeneration models a heap of old objects contained
-    in a single contiguous space. </P>
-
-    <P> Garbage collection is performed using mark-compact. </P> */
-
-public abstract class OneContigSpaceCardGeneration extends CardGeneration {
-  private static AddressField theSpaceField;
-
-  static {
-    VM.registerVMInitializedObserver(new Observer() {
-        public void update(Observable o, Object data) {
-          initialize(VM.getVM().getTypeDataBase());
-        }
-      });
-  }
-
-  private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("OneContigSpaceCardGeneration");
-
-    theSpaceField = type.getAddressField("_the_space");
-  }
-
-  public OneContigSpaceCardGeneration(Address addr) {
-    super(addr);
-  }
-
-  public ContiguousSpace theSpace() {
-    return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, theSpaceField.getValue(addr));
-  }
-
-  public boolean isIn(Address p) {
-    return theSpace().contains(p);
-  }
-
-  /** Space queries */
-  public long capacity()            { return theSpace().capacity();                                }
-  public long used()                { return theSpace().used();                                    }
-  public long free()                { return theSpace().free();                                    }
-  public long contiguousAvailable() { return theSpace().free() + virtualSpace().uncommittedSize(); }
-
-  public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
-    blk.doSpace(theSpace());
-  }
-
-  public void printOn(PrintStream tty) {
-    tty.print("  old ");
-    theSpace().printOn(tty);
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/TenuredGeneration.java	Sat Dec 06 04:30:00 2014 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/TenuredGeneration.java	Mon Dec 08 00:15:55 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,13 +24,62 @@
 
 package sun.jvm.hotspot.memory;
 
+import java.io.*;
+import java.util.*;
+
 import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
 
-public class TenuredGeneration extends OneContigSpaceCardGeneration {
+/** <P> TenuredGeneration models a heap of old objects contained
+    in a single contiguous space. </P>
+
+    <P> Garbage collection is performed using mark-compact. </P> */
+
+public class TenuredGeneration extends CardGeneration {
+  private static AddressField theSpaceField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("TenuredGeneration");
+
+    theSpaceField = type.getAddressField("_the_space");
+  }
+
   public TenuredGeneration(Address addr) {
     super(addr);
   }
 
+  public ContiguousSpace theSpace() {
+    return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, theSpaceField.getValue(addr));
+  }
+
+  public boolean isIn(Address p) {
+    return theSpace().contains(p);
+  }
+
+  /** Space queries */
+  public long capacity()            { return theSpace().capacity();                                }
+  public long used()                { return theSpace().used();                                    }
+  public long free()                { return theSpace().free();                                    }
+  public long contiguousAvailable() { return theSpace().free() + virtualSpace().uncommittedSize(); }
+
+  public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
+    blk.doSpace(theSpace());
+  }
+
+  public void printOn(PrintStream tty) {
+    tty.print("  old ");
+    theSpace().printOn(tty);
+  }
+
   public Generation.Name kind() {
     return Generation.Name.MARK_SWEEP_COMPACT;
   }
--- a/make/bsd/makefiles/sa.make	Sat Dec 06 04:30:00 2014 +0000
+++ b/make/bsd/makefiles/sa.make	Mon Dec 08 00:15:55 2014 -0800
@@ -65,6 +65,10 @@
   SA_CLASSPATH=$(shell test -f $(ALT_SA_CLASSPATH) && echo $(ALT_SA_CLASSPATH))
 endif
 
+ifneq ($(SA_CLASSPATH),)
+  SA_CLASSPATH_ARG := -classpath $(SA_CLASSPATH)
+endif
+
 # TODO: if it's a modules image, check if SA module is installed.
 MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
 
@@ -116,7 +120,7 @@
 # are in AGENT_FILES, so use the shell to expand them.
 # Be extra carefull to not produce too long command lines in the shell!
 	$(foreach file,$(AGENT_FILES),$(shell ls -1 $(file) >> $(AGENT_FILES_LIST)))
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES_LIST)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) $(SA_CLASSPATH_ARG) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES_LIST)
 	$(QUIETLY) $(REMOTE) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
 	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
--- a/make/linux/makefiles/gcc.make	Sat Dec 06 04:30:00 2014 +0000
+++ b/make/linux/makefiles/gcc.make	Mon Dec 08 00:15:55 2014 -0800
@@ -214,7 +214,7 @@
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wreturn-type
 
 ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
--- a/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -27,6 +27,7 @@
 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
 
 #include "asm/assembler.hpp"
+#include "utilities/macros.hpp"
 
 // MacroAssembler extends Assembler by a few frequently used macros.
 
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -4813,6 +4813,7 @@
     StubRoutines::_atomic_add_entry          = generate_atomic_add();
     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
+    StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
 #endif  // COMPILER2 !=> _LP64
--- a/src/cpu/x86/vm/assembler_x86.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1297,6 +1297,17 @@
   emit_operand(reg, adr);
 }
 
+// The 8-bit cmpxchg compares the value at adr with the contents of rax,
+// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
+// The ZF is set if the compared values were equal, and cleared otherwise.
+void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
+  InstructionMark im(this);
+  prefix(adr, reg, true);
+  emit_int8(0x0F);
+  emit_int8((unsigned char)0xB0);
+  emit_operand(reg, adr);
+}
+
 void Assembler::comisd(XMMRegister dst, Address src) {
   // NOTE: dbx seems to decode this as comiss even though the
   // 0x66 is there. Strangly ucomisd comes out correct
--- a/src/cpu/x86/vm/assembler_x86.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1006,6 +1006,7 @@
 
   void cmpxchg8 (Address adr);
 
+  void cmpxchgb(Register reg, Address adr);
   void cmpxchgl(Register reg, Address adr);
 
   void cmpxchgq(Register reg, Address adr);
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -594,9 +594,35 @@
     return start;
   }
 
-  // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
-  //                                             volatile jlong* dest,
-  //                                             jlong compare_value)
+  // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
+  //                                          jbyte compare_value)
+  //
+  // Arguments :
+  //    c_rarg0: exchange_value
+  //    c_rarg1: dest
+  //    c_rarg2: compare_value
+  //
+  // Result:
+  //    if ( compare_value == *dest ) {
+  //       *dest = exchange_value
+  //       return compare_value;
+  //    else
+  //       return *dest;
+  address generate_atomic_cmpxchg_byte() {
+    StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
+    address start = __ pc();
+
+    __ movsbq(rax, c_rarg2);
+   if ( os::is_MP() ) __ lock();
+    __ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
+    __ ret(0);
+
+    return start;
+  }
+
+  // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
+  //                                          volatile jlong* dest,
+  //                                          jlong compare_value)
   // Arguments :
   //    c_rarg0: exchange_value
   //    c_rarg1: dest
@@ -3894,6 +3920,7 @@
     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
     StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
+    StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
     StubRoutines::_atomic_add_entry          = generate_atomic_add();
     StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
--- a/src/cpu/x86/vm/x86_32.ad	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/x86/vm/x86_32.ad	Mon Dec 08 00:15:55 2014 -0800
@@ -1210,6 +1210,7 @@
 
 
   Unimplemented();
+  return 0; // Mute compiler
 }
 
 #ifndef PRODUCT
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -207,6 +207,7 @@
     StubRoutines::_atomic_xchg_ptr_entry     = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_entry      = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_ptr_entry  = ShouldNotCallThisStub();
+    StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_entry          = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_ptr_entry      = ShouldNotCallThisStub();
--- a/src/os/aix/vm/os_aix.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/aix/vm/os_aix.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -518,15 +518,13 @@
 
 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 #define EXTENSIONS_DIR  "/lib/ext"
-#define ENDORSED_DIR    "/lib/endorsed"
 
   // Buffer that fits several sprintfs.
   // Note that the space for the trailing null is provided
   // by the nulls included by the sizeof operator.
   const size_t bufsize =
-    MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
-         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
-         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
+    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
+         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // sysclasspath, java_home, dll_dir
@@ -577,15 +575,10 @@
   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
   Arguments::set_ext_dirs(buf);
 
-  // Endorsed standards default directory.
-  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
-  Arguments::set_endorsed_dirs(buf);
-
   FREE_C_HEAP_ARRAY(char, buf);
 
 #undef DEFAULT_LIBPATH
 #undef EXTENSIONS_DIR
-#undef ENDORSED_DIR
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -2788,6 +2781,10 @@
   return ::read(fd, buf, nBytes);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  return ::pread(fd, buf, nBytes, offset);
+}
+
 void os::naked_short_sleep(jlong ms) {
   struct timespec req;
 
--- a/src/os/aix/vm/perfMemory_aix.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/aix/vm/perfMemory_aix.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -422,7 +422,7 @@
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
 static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/src/os/bsd/vm/os_bsd.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/bsd/vm/os_bsd.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -353,7 +353,6 @@
 // Base path of extensions installed on the system.
 #define SYS_EXT_DIR     "/usr/java/packages"
 #define EXTENSIONS_DIR  "/lib/ext"
-#define ENDORSED_DIR    "/lib/endorsed"
 
 #ifndef __APPLE__
 
@@ -361,9 +360,8 @@
   // Note that the space for the colon and the trailing null are provided
   // by the nulls included by the sizeof operator.
   const size_t bufsize =
-    MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
-         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
-         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
+    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
+         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // sysclasspath, java_home, dll_dir
@@ -425,10 +423,6 @@
   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
   Arguments::set_ext_dirs(buf);
 
-  // Endorsed standards default directory.
-  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
-  Arguments::set_endorsed_dirs(buf);
-
   FREE_C_HEAP_ARRAY(char, buf);
 
 #else // __APPLE__
@@ -445,9 +439,8 @@
   // Note that the space for the colon and the trailing null are provided
   // by the nulls included by the sizeof operator.
   const size_t bufsize =
-    MAX3((size_t)MAXPATHLEN,  // for dll_dir & friends.
-         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size, // extensions dir
-         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
+    MAX2((size_t)MAXPATHLEN,  // for dll_dir & friends.
+         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size); // extensions dir
   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // sysclasspath, java_home, dll_dir
@@ -525,10 +518,6 @@
           user_home_dir, Arguments::get_java_home());
   Arguments::set_ext_dirs(buf);
 
-  // Endorsed standards default directory.
-  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
-  Arguments::set_endorsed_dirs(buf);
-
   FREE_C_HEAP_ARRAY(char, buf);
 
 #undef SYS_EXTENSIONS_DIR
@@ -538,7 +527,6 @@
 
 #undef SYS_EXT_DIR
 #undef EXTENSIONS_DIR
-#undef ENDORSED_DIR
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -2576,6 +2564,10 @@
   RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  RESTARTABLE_RETURN_INT(::pread(fd, buf, nBytes, offset));
+}
+
 void os::naked_short_sleep(jlong ms) {
   struct timespec req;
 
--- a/src/os/bsd/vm/perfMemory_bsd.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -422,7 +422,7 @@
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
 static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/src/os/linux/vm/os_linux.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/linux/vm/os_linux.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -68,6 +68,7 @@
 #include "utilities/events.hpp"
 #include "utilities/elfFile.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 // put OS-includes here
@@ -337,15 +338,13 @@
 // Base path of extensions installed on the system.
 #define SYS_EXT_DIR     "/usr/java/packages"
 #define EXTENSIONS_DIR  "/lib/ext"
-#define ENDORSED_DIR    "/lib/endorsed"
 
   // Buffer that fits several sprintfs.
   // Note that the space for the colon and the trailing null are provided
   // by the nulls included by the sizeof operator.
   const size_t bufsize =
-    MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
-         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
-         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
+    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
+         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // sysclasspath, java_home, dll_dir
@@ -410,16 +409,11 @@
   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
   Arguments::set_ext_dirs(buf);
 
-  // Endorsed standards default directory.
-  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
-  Arguments::set_endorsed_dirs(buf);
-
   FREE_C_HEAP_ARRAY(char, buf);
 
 #undef DEFAULT_LIBPATH
 #undef SYS_EXT_DIR
 #undef EXTENSIONS_DIR
-#undef ENDORSED_DIR
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -3783,6 +3777,10 @@
   return ::read(fd, buf, nBytes);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  return ::pread(fd, buf, nBytes, offset);
+}
+
 // Short sleep, direct OS call.
 //
 // Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
--- a/src/os/linux/vm/perfMemory_linux.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -422,7 +422,7 @@
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
 static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/src/os/solaris/vm/os_solaris.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/solaris/vm/os_solaris.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -609,17 +609,15 @@
 // Base path of extensions installed on the system.
 #define SYS_EXT_DIR     "/usr/jdk/packages"
 #define EXTENSIONS_DIR  "/lib/ext"
-#define ENDORSED_DIR    "/lib/endorsed"
 
   char cpu_arch[12];
   // Buffer that fits several sprintfs.
   // Note that the space for the colon and the trailing null are provided
   // by the nulls included by the sizeof operator.
   const size_t bufsize =
-    MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
+    MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
-         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
-         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
+         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // sysclasspath, java_home, dll_dir
@@ -765,15 +763,10 @@
   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
   Arguments::set_ext_dirs(buf);
 
-  // Endorsed standards default directory.
-  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
-  Arguments::set_endorsed_dirs(buf);
-
   FREE_C_HEAP_ARRAY(char, buf);
 
 #undef SYS_EXT_DIR
 #undef EXTENSIONS_DIR
-#undef ENDORSED_DIR
 }
 
 void os::breakpoint() {
@@ -3167,6 +3160,15 @@
   return res;
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  size_t res;
+  JavaThread* thread = (JavaThread*)Thread::current();
+  assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
+  ThreadBlockInVM tbiv(thread);
+  RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
+  return res;
+}
+
 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
   size_t res;
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -461,7 +461,7 @@
   // since the structured procfs and old procfs interfaces can't be
   // mixed, we attempt to find the file through a directory search.
 
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/src/os/windows/vm/os_windows.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os/windows/vm/os_windows.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -292,19 +292,6 @@
   #undef BIN_DIR
   #undef PACKAGE_DIR
 
-  // Default endorsed standards directory.
-  {
-#define ENDORSED_DIR "\\lib\\endorsed"
-    size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
-    char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
-    sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
-    Arguments::set_endorsed_dirs(buf);
-    // (Arguments::set_endorsed_dirs() calls SystemProperty::set_value(), which
-    //  duplicates the input.)
-    FREE_C_HEAP_ARRAY(char, buf);
-#undef ENDORSED_DIR
-  }
-
 #ifndef _WIN64
   // set our UnhandledExceptionFilter and save any previous one
   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
@@ -4391,6 +4378,23 @@
   return (jlong) ::_lseeki64(fd, offset, whence);
 }
 
+size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
+  OVERLAPPED ov;
+  DWORD nread;
+  BOOL result;
+
+  ZeroMemory(&ov, sizeof(ov));
+  ov.Offset = (DWORD)offset;
+  ov.OffsetHigh = (DWORD)(offset >> 32);
+
+  HANDLE h = (HANDLE)::_get_osfhandle(fd);
+
+  result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
+
+  return result ? nread : 0;
+}
+
+
 // This method is a slightly reworked copy of JDK's sysNativePath
 // from src/windows/hpi/src/path_md.c
 
--- a/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -88,6 +88,15 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   int mp = os::is_MP();
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -88,6 +88,15 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   int mp = os::is_MP();
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -542,6 +542,7 @@
   err.report_and_die();
 
   ShouldNotReachHere();
+  return true; // Mute compiler
 }
 
 void os::Linux::init_thread_fpu_state(void) {
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -68,6 +68,8 @@
 extern "C" {
   jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
   jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
+  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
+                       jbyte compare_value IS_MP_DECL());
   jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
                        jint compare_value IS_MP_DECL());
   jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
@@ -82,6 +84,11 @@
   return _Atomic_xchg(exchange_value, dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
 }
@@ -217,6 +224,15 @@
     return exchange_value;
   }
 
+
+  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
+    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return exchange_value;
+  }
+
   // This is the interface to the atomic instruction in solaris_i486.s.
   jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
 
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Mon Dec 08 00:15:55 2014 -0800
@@ -76,6 +76,23 @@
       xchgl    (%ecx), %eax
       .end
 
+  // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, 
+  //                                   volatile jbyte *dest, 
+  //                                   jbyte compare_value)
+  // An additional bool (os::is_MP()) is passed as the last argument.
+      .inline _Atomic_cmpxchg_byte,4
+      movb     8(%esp), %al   // compare_value
+      movb     0(%esp), %cl   // exchange_value
+      movl     4(%esp), %edx   // dest
+      cmp      $0, 12(%esp)    // MP test
+      jne      1f
+      cmpxchgb %cl, (%edx)
+      jmp      2f
+1:    lock
+      cmpxchgb %cl, (%edx)
+2:
+      .end
+
   // Support for jint Atomic::cmpxchg(jint exchange_value, 
   //                                  volatile jint *dest, 
   //                                  jint compare_value)
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Mon Dec 08 00:15:55 2014 -0800
@@ -77,6 +77,15 @@
       movq     %rdi, %rax
       .end
 
+  // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, 
+  //                                   volatile jbyte *dest, 
+  //                                   jbyte compare_value)
+      .inline _Atomic_cmpxchg_byte,3
+      movb     %dl, %al      // compare_value
+      lock
+      cmpxchgb %dil, (%rsi)
+      .end
+
   // Support for jint Atomic::cmpxchg(jint exchange_value, 
   //                                  volatile jint *dest, 
   //                                  jint compare_value)
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -123,6 +123,11 @@
   return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
+}
+
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value) {
   return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
 }
@@ -212,6 +217,19 @@
   return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  // alternative for InterlockedCompareExchange
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest
+    mov cl, exchange_value
+    mov al, compare_value
+    LOCK_IF_MP(mp)
+    cmpxchg byte ptr [edx], cl
+  }
+}
+
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   // alternative for InterlockedCompareExchange
   int mp = os::is_MP();
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -220,6 +220,7 @@
 typedef jint      xchg_func_t            (jint,     volatile jint*);
 typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
 typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
+typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
 typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
 typedef jint      add_func_t             (jint,     volatile jint*);
 typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
@@ -272,6 +273,23 @@
     *dest = exchange_value;
   return old_value;
 }
+
+jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+  // try to use the stub:
+  cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
+
+  if (func != NULL) {
+    os::atomic_cmpxchg_byte_func = func;
+    return (*func)(exchange_value, dest, compare_value);
+  }
+  assert(Threads::number_of_threads() == 0, "for bootstrap only");
+
+  jbyte old_value = *dest;
+  if (old_value == compare_value)
+    *dest = exchange_value;
+  return old_value;
+}
+
 #endif // AMD64
 
 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
@@ -321,6 +339,7 @@
 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
 xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
+cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
 
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -33,6 +33,7 @@
   static intptr_t  (*atomic_xchg_ptr_func)      (intptr_t,  volatile intptr_t*);
 
   static jint      (*atomic_cmpxchg_func)       (jint,      volatile jint*,  jint);
+  static jbyte     (*atomic_cmpxchg_byte_func)  (jbyte,     volatile jbyte*, jbyte);
   static jlong     (*atomic_cmpxchg_long_func)  (jlong,     volatile jlong*, jlong);
 
   static jint      (*atomic_add_func)           (jint,      volatile jint*);
@@ -42,6 +43,7 @@
   static intptr_t  atomic_xchg_ptr_bootstrap    (intptr_t,  volatile intptr_t*);
 
   static jint      atomic_cmpxchg_bootstrap     (jint,      volatile jint*,  jint);
+  static jbyte     atomic_cmpxchg_byte_bootstrap(jbyte,     volatile jbyte*, jbyte);
 #else
 
   static jlong (*atomic_cmpxchg_long_func)  (jlong, volatile jlong*, jlong);
--- a/src/share/vm/ci/ciEnv.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/ci/ciEnv.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -53,6 +53,7 @@
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #ifdef COMPILER1
@@ -1141,6 +1142,16 @@
   }
 }
 
+void ciEnv::report_failure(const char* reason) {
+  // Create and fire JFR event
+  EventCompilerFailure event;
+  if (event.should_commit()) {
+    event.set_compileID(compile_id());
+    event.set_failure(reason);
+    event.commit();
+  }
+}
+
 // ------------------------------------------------------------------
 // ciEnv::record_method_not_compilable()
 void ciEnv::record_method_not_compilable(const char* reason, bool all_tiers) {
--- a/src/share/vm/ci/ciEnv.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/ci/ciEnv.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -450,7 +450,8 @@
   // Check for changes to the system dictionary during compilation
   bool system_dictionary_modification_counter_changed();
 
-  void record_failure(const char* reason);
+  void record_failure(const char* reason);      // Record failure and report later
+  void report_failure(const char* reason);      // Report failure immediately
   void record_method_not_compilable(const char* reason, bool all_tiers = true);
   void record_out_of_memory_failure();
 
--- a/src/share/vm/ci/ciObjectFactory.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -46,6 +46,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/fieldType.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 # include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif
--- a/src/share/vm/ci/ciReplay.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/ci/ciReplay.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -332,7 +332,7 @@
   // Lookup a klass
   Klass* resolve_klass(const char* klass, TRAPS) {
     Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL);
-    return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, CHECK_NULL);
+    return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, THREAD);
   }
 
   // Parse the standard tuple of <klass> <name> <signature>
--- a/src/share/vm/ci/ciTypeFlow.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -35,6 +35,8 @@
 #include "interpreter/bytecode.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "memory/allocation.inline.hpp"
+#include "opto/compile.hpp"
+#include "opto/node.hpp"
 #include "runtime/deoptimization.hpp"
 #include "utilities/growableArray.hpp"
 
@@ -2646,7 +2648,7 @@
       assert (!blk->has_pre_order(), "");
       blk->set_next_pre_order();
 
-      if (_next_pre_order >= MaxNodeLimit / 2) {
+      if (_next_pre_order >= (int)Compile::current()->max_node_limit() / 2) {
         // Too many basic blocks.  Bail out.
         // This can happen when try/finally constructs are nested to depth N,
         // and there is O(2**N) cloning of jsr bodies.  See bug 4697245!
--- a/src/share/vm/classfile/classFileParser.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/classFileParser.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -31,9 +31,6 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
-#if INCLUDE_CDS
-#include "classfile/systemDictionaryShared.hpp"
-#endif
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -63,7 +60,11 @@
 #include "services/threadService.hpp"
 #include "utilities/array.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
+#if INCLUDE_CDS
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 
 // We generally try to create the oops directly when parsing, rather than
 // allocating temporary data structures and copying the bytes twice. A
--- a/src/share/vm/classfile/classLoader.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/classLoader.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -28,11 +28,8 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderExt.hpp"
 #include "classfile/classLoaderData.inline.hpp"
+#include "classfile/imageFile.hpp"
 #include "classfile/javaClasses.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedPathsMiscInfo.hpp"
-#include "classfile/sharedClassUtil.hpp"
-#endif
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -64,10 +61,15 @@
 #include "services/management.hpp"
 #include "services/threadService.hpp"
 #include "utilities/events.hpp"
-#include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#endif
 
-// Entry points in zip.dll for loading zip/jar file entries
+
+// Entry points in zip.dll for loading zip/jar file entries and image file entries
 
 typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
 typedef void (JNICALL *ZipClose_t)(jzfile *zip);
@@ -75,6 +77,7 @@
 typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
 typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
 typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
+typedef jboolean (JNICALL *ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg);
 typedef jint     (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
 
 static ZipOpen_t         ZipOpen            = NULL;
@@ -84,6 +87,7 @@
 static ReadMappedEntry_t ReadMappedEntry    = NULL;
 static GetNextEntry_t    GetNextEntry       = NULL;
 static canonicalize_fn_t CanonicalizeEntry  = NULL;
+static ZipInflateFully_t ZipInflateFully    = NULL;
 static Crc32_t           Crc32              = NULL;
 
 // Globals
@@ -322,6 +326,8 @@
 }
 
 bool LazyClassPathEntry::is_jar_file() {
+  size_t len = strlen(_path);
+  if (len < 4 || strcmp(_path + len - 4, ".jar") != 0) return false;
   return ((_st.st_mode & S_IFREG) == S_IFREG);
 }
 
@@ -385,6 +391,78 @@
   }
 }
 
+ClassPathImageEntry::ClassPathImageEntry(char* name) : ClassPathEntry(), _image(new ImageFile(name)) {
+  bool opened = _image->open();
+  if (!opened) {
+    _image = NULL;
+  }
+}
+
+ClassPathImageEntry::~ClassPathImageEntry() {
+  if (_image) {
+    _image->close();
+    _image = NULL;
+  }
+}
+
+const char* ClassPathImageEntry::name() {
+  return _image ? _image->name() : "";
+}
+
+ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
+  u1* buffer;
+  u8 size;
+  _image->get_resource(name, buffer, size);
+
+  if (buffer) {
+    if (UsePerfData) {
+      ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
+    }
+    return new ClassFileStream(buffer, (int)size, (char*)name);  // Resource allocated
+  }
+
+  return NULL;
+}
+
+#ifndef PRODUCT
+void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
+  tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
+  tty->cr();
+  const ImageStrings strings = _image->get_strings();
+  // Retrieve each path component string.
+  u4 count = _image->get_location_count();
+  for (u4 i = 0; i < count; i++) {
+    u1* location_data = _image->get_location_data(i);
+
+    if (location_data) {
+       ImageLocation location(location_data);
+       const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
+       const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
+       const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
+       assert((strlen(parent) + strlen(base) + strlen(extension)) < JVM_MAXPATHLEN, "path exceeds buffer");
+       char path[JVM_MAXPATHLEN];
+       strcpy(path, parent);
+       strcat(path, base);
+       strcat(path, extension);
+       ClassLoader::compile_the_world_in(path, loader, CHECK);
+    }
+  }
+  if (HAS_PENDING_EXCEPTION) {
+  if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
+    CLEAR_PENDING_EXCEPTION;
+    tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
+    tty->print_cr("Increase class metadata storage if a limit was set");
+  } else {
+    tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
+  }
+  }
+}
+
+bool ClassPathImageEntry::is_jrt() {
+  return string_ends_with(name(), "bootmodules.jimage");
+}
+#endif
+
 static void print_meta_index(LazyClassPathEntry* entry,
                              GrowableArray<char*>& meta_packages) {
   tty->print("[Meta index for %s=", entry->name());
@@ -634,7 +712,7 @@
   }
   ClassPathEntry* new_entry = NULL;
   if ((st->st_mode & S_IFREG) == S_IFREG) {
-    // Regular file, should be a zip file
+    // Regular file, should be a zip or image file
     // Canonicalized filename
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
@@ -645,6 +723,11 @@
         return NULL;
       }
     }
+    // TODO - add proper criteria for selecting image file
+    ClassPathImageEntry* entry = new ClassPathImageEntry(canonical_path);
+    if (entry->is_open()) {
+      new_entry = entry;
+    } else {
     char* error_msg = NULL;
     jzfile* zip;
     {
@@ -655,9 +738,6 @@
     }
     if (zip != NULL && error_msg == NULL) {
       new_entry = new ClassPathZipEntry(zip, path);
-      if (TraceClassLoading || TraceClassPaths) {
-        tty->print_cr("[Opened %s]", path);
-      }
     } else {
       ResourceMark rm(thread);
       char *msg;
@@ -675,10 +755,14 @@
         return NULL;
       }
     }
+    }
+    if (TraceClassLoading || TraceClassPaths) {
+      tty->print_cr("[Opened %s]", path);
+    }
   } else {
     // Directory
     new_entry = new ClassPathDirEntry(path);
-    if (TraceClassLoading || TraceClassPaths) {
+    if (TraceClassLoading) {
       tty->print_cr("[Path %s]", path);
     }
   }
@@ -801,6 +885,7 @@
   ReadEntry    = CAST_TO_FN_PTR(ReadEntry_t, os::dll_lookup(handle, "ZIP_ReadEntry"));
   ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, os::dll_lookup(handle, "ZIP_ReadMappedEntry"));
   GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, os::dll_lookup(handle, "ZIP_GetNextEntry"));
+  ZipInflateFully = CAST_TO_FN_PTR(ZipInflateFully_t, os::dll_lookup(handle, "ZIP_InflateFully"));
   Crc32        = CAST_TO_FN_PTR(Crc32_t, os::dll_lookup(handle, "ZIP_CRC32"));
 
   // ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL
@@ -809,12 +894,20 @@
     vm_exit_during_initialization("Corrupted ZIP library", path);
   }
 
+  if (ZipInflateFully == NULL) {
+    vm_exit_during_initialization("Corrupted ZIP library ZIP_InflateFully missing", path);
+  }
+
   // Lookup canonicalize entry in libjava.dll
   void *javalib_handle = os::native_java_library();
   CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, os::dll_lookup(javalib_handle, "Canonicalize"));
   // This lookup only works on 1.3. Do not check for non-null here
 }
 
+jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
+  return (*ZipInflateFully)(in, inSize, out, outSize, pmsg);
+}
+
 int ClassLoader::crc32(int crc, const char* buf, int len) {
   assert(Crc32 != NULL, "ZIP_CRC32 is not found");
   return (*Crc32)(crc, (const jbyte*)buf, len);
@@ -1367,8 +1460,7 @@
   tty->cr();
 }
 
-
-bool ClassPathDirEntry::is_rt_jar() {
+bool ClassPathDirEntry::is_jrt() {
   return false;
 }
 
@@ -1393,13 +1485,13 @@
   }
 }
 
-bool ClassPathZipEntry::is_rt_jar() {
+bool ClassPathZipEntry::is_jrt() {
   real_jzfile* zip = (real_jzfile*) _zip;
   int len = (int)strlen(zip->name);
   // Check whether zip name ends in "rt.jar"
   // This will match other archives named rt.jar as well, but this is
   // only used for debugging.
-  return (len >= 6) && (strcasecmp(zip->name + len - 6, "rt.jar") == 0);
+  return string_ends_with(zip->name, "rt.jar");
 }
 
 void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
@@ -1409,7 +1501,7 @@
   }
 }
 
-bool LazyClassPathEntry::is_rt_jar() {
+bool LazyClassPathEntry::is_jrt() {
   Thread* THREAD = Thread::current();
   ClassPathEntry* cpe = resolve_entry(THREAD);
   return (cpe != NULL) ? cpe->is_jar_file() : false;
@@ -1428,7 +1520,7 @@
   jlong start = os::javaTimeMillis();
   while (e != NULL) {
     // We stop at rt.jar, unless it is the first bootstrap path entry
-    if (e->is_rt_jar() && e != _first_entry) break;
+    if (e->is_jrt() && e != _first_entry) break;
     e->compile_the_world(system_class_loader, CATCH);
     e = e->next();
   }
@@ -1476,9 +1568,9 @@
 }
 
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
-  int len = (int)strlen(name);
-  if (len > 6 && strcmp(".class", name + len - 6) == 0) {
+  if (string_ends_with(name, ".class")) {
     // We have a .class file
+    int len = (int)strlen(name);
     char buffer[2048];
     strncpy(buffer, name, len - 6);
     buffer[len-6] = 0;
--- a/src/share/vm/classfile/classLoader.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/classLoader.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -27,6 +27,7 @@
 
 #include "classfile/classFileParser.hpp"
 #include "runtime/perfData.hpp"
+#include "utilities/macros.hpp"
 
 // The VM class loader.
 #include <sys/stat.h>
@@ -66,7 +67,7 @@
   virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
   // Debugging
   NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
-  NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
+  NOT_PRODUCT(virtual bool is_jrt() = 0;)
 };
 
 
@@ -80,7 +81,7 @@
   ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
-  NOT_PRODUCT(bool is_rt_jar();)
+  NOT_PRODUCT(bool is_jrt();)
 };
 
 
@@ -112,7 +113,7 @@
   void contents_do(void f(const char* name, void* context), void* context);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
-  NOT_PRODUCT(bool is_rt_jar();)
+  NOT_PRODUCT(bool is_jrt();)
 };
 
 
@@ -138,7 +139,25 @@
   virtual bool is_lazy();
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
-  NOT_PRODUCT(bool is_rt_jar();)
+  NOT_PRODUCT(bool is_jrt();)
+};
+
+// For java image files
+class ImageFile;
+class ClassPathImageEntry: public ClassPathEntry {
+private:
+  ImageFile *_image;
+public:
+  bool is_jar_file()  { return false;  }
+  bool is_open()  { return _image != NULL; }
+  const char* name();
+  ClassPathImageEntry(char* name);
+  ~ClassPathImageEntry();
+  ClassFileStream* open_stream(const char* name, TRAPS);
+
+  // Debugging
+  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
+  NOT_PRODUCT(bool is_jrt();)
 };
 
 class PackageHashtable;
@@ -226,6 +245,7 @@
   // to avoid confusing the zip library
   static bool get_canonical_path(const char* orig, char* out, int len);
  public:
+  static jboolean decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg);
   static int crc32(int crc, const char* buf, int len);
   static bool update_class_path_entry_list(const char *path,
                                            bool check_for_duplicates,
--- a/src/share/vm/classfile/classLoaderData.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/classLoaderData.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -471,7 +471,7 @@
 // These anonymous class loaders are to contain classes used for JSR292
 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
   // Add a new class loader data to the graph.
-  return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
+  return ClassLoaderDataGraph::add(loader, true, THREAD);
 }
 
 const char* ClassLoaderData::loader_name() {
--- a/src/share/vm/classfile/defaultMethods.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/defaultMethods.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -493,7 +493,7 @@
 };
 
 Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
-  return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
+  return SymbolTable::new_symbol("No qualifying defaults found", THREAD);
 }
 
 Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const {
@@ -506,7 +506,7 @@
   ss.write((const char*)name->bytes(), name->utf8_length());
   ss.write((const char*)signature->bytes(), signature->utf8_length());
   ss.print(" is abstract");
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
 }
 
 Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
@@ -521,7 +521,7 @@
     ss.print(".");
     ss.write((const char*)name->bytes(), name->utf8_length());
   }
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
 }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/imageFile.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/imageFile.hpp"
+#include "runtime/os.inline.hpp"
+#include "utilities/bytes.hpp"
+
+
+// Compute the Perfect Hashing hash code for the supplied string.
+u4 ImageStrings::hash_code(const char* string, u4 seed) {
+  u1* bytes = (u1*)string;
+
+  // Compute hash code.
+  for (u1 byte = *bytes++; byte; byte = *bytes++) {
+    seed = (seed * HASH_MULTIPLIER) ^ byte;
+  }
+
+  // Ensure the result is unsigned.
+  return seed & 0x7FFFFFFF;
+}
+
+// Test to see if string begins with start.  If so returns remaining portion
+// of string.  Otherwise, NULL.
+const char* ImageStrings::starts_with(const char* string, const char* start) {
+  char ch1, ch2;
+
+  // Match up the strings the best we can.
+  while ((ch1 = *string) && (ch2 = *start)) {
+    if (ch1 != ch2) {
+      // Mismatch, return NULL.
+      return NULL;
+    }
+
+    string++, start++;
+  }
+
+  // Return remainder of string.
+  return string;
+}
+
+ImageLocation::ImageLocation(u1* data) {
+  // Deflate the attribute stream into an array of attributes.
+  memset(_attributes, 0, sizeof(_attributes));
+  u1 byte;
+
+  while ((byte = *data) != ATTRIBUTE_END) {
+    u1 kind = attribute_kind(byte);
+    u1 n = attribute_length(byte);
+    assert(kind < ATTRIBUTE_COUNT, "invalid image location attribute");
+    _attributes[kind] = attribute_value(data + 1, n);
+    data += n + 1;
+  }
+}
+
+ImageFile::ImageFile(const char* name) {
+  // Copy the image file name.
+  _name = NEW_C_HEAP_ARRAY(char, strlen(name)+1, mtClass);
+  strcpy(_name, name);
+
+  // Initialize for a closed file.
+  _fd = -1;
+  _memory_mapped = true;
+  _index_data = NULL;
+}
+
+ImageFile::~ImageFile() {
+  // Ensure file is closed.
+  close();
+
+  // Free up name.
+  FREE_C_HEAP_ARRAY(char, _name);
+}
+
+bool ImageFile::open() {
+  // If file exists open for reading.
+  struct stat st;
+  if (os::stat(_name, &st) != 0 ||
+    (st.st_mode & S_IFREG) != S_IFREG ||
+    (_fd = os::open(_name, 0, O_RDONLY)) == -1) {
+    return false;
+  }
+
+  // Read image file header and verify.
+  u8 header_size = sizeof(ImageHeader);
+  if (os::read(_fd, &_header, header_size) != header_size ||
+    _header._magic != IMAGE_MAGIC ||
+    _header._major_version != MAJOR_VERSION ||
+    _header._minor_version != MINOR_VERSION) {
+    close();
+    return false;
+  }
+
+  // Memory map index.
+  _index_size = index_size();
+  _index_data = (u1*)os::map_memory(_fd, _name, 0, NULL, _index_size, true, false);
+
+  // Failing that, read index into C memory.
+  if (_index_data == NULL) {
+    _memory_mapped = false;
+    _index_data = NEW_RESOURCE_ARRAY(u1, _index_size);
+
+    if (os::seek_to_file_offset(_fd, 0) == -1) {
+      close();
+      return false;
+    }
+
+    if (os::read(_fd, _index_data, _index_size) != _index_size) {
+      close();
+      return false;
+    }
+
+    return true;
+  }
+
+// Used to advance a pointer, unstructured.
+#undef nextPtr
+#define nextPtr(base, fromType, count, toType) (toType*)((fromType*)(base) + (count))
+  // Pull tables out from the index.
+  _redirect_table = nextPtr(_index_data, u1, header_size, s4);
+  _offsets_table = nextPtr(_redirect_table, s4, _header._location_count, u4);
+  _location_bytes = nextPtr(_offsets_table, u4, _header._location_count, u1);
+  _string_bytes = nextPtr(_location_bytes, u1, _header._locations_size, u1);
+#undef nextPtr
+
+  // Successful open.
+  return true;
+}
+
+void ImageFile::close() {
+  // Dealllocate the index.
+  if (_index_data) {
+    if (_memory_mapped) {
+      os::unmap_memory((char*)_index_data, _index_size);
+    } else {
+      FREE_RESOURCE_ARRAY(u1, _index_data, _index_size);
+    }
+
+    _index_data = NULL;
+  }
+
+  // close file.
+  if (_fd != -1) {
+    os::close(_fd);
+    _fd = -1;
+  }
+
+}
+
+// Return the attribute stream for a named resourced.
+u1* ImageFile::find_location_data(const char* path) const {
+  // Compute hash.
+  u4 hash = ImageStrings::hash_code(path) % _header._location_count;
+  s4 redirect = _redirect_table[hash];
+
+  if (!redirect) {
+    return NULL;
+  }
+
+  u4 index;
+
+  if (redirect < 0) {
+    // If no collision.
+    index = -redirect - 1;
+  } else {
+    // If collision, recompute hash code.
+    index = ImageStrings::hash_code(path, redirect) % _header._location_count;
+  }
+
+  assert(index < _header._location_count, "index exceeds location count");
+  u4 offset = _offsets_table[index];
+  assert(offset < _header._locations_size, "offset exceeds location attributes size");
+
+  if (offset == 0) {
+    return NULL;
+  }
+
+  return _location_bytes + offset;
+}
+
+// Verify that a found location matches the supplied path.
+bool ImageFile::verify_location(ImageLocation& location, const char* path) const {
+  // Retrieve each path component string.
+  ImageStrings strings(_string_bytes, _header._strings_size);
+  // Match a path with each subcomponent without concatenation (copy).
+  // Match up path parent.
+  const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
+  const char* next = ImageStrings::starts_with(path, parent);
+  // Continue only if a complete match.
+  if (!next) return false;
+  // Match up path base.
+  const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
+  next = ImageStrings::starts_with(next, base);
+  // Continue only if a complete match.
+  if (!next) return false;
+  // Match up path extension.
+  const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
+  next = ImageStrings::starts_with(next, extension);
+
+  // True only if complete match and no more characters.
+  return next && *next == '\0';
+}
+
+// Return the resource for the supplied location.
+u1* ImageFile::get_resource(ImageLocation& location) const {
+  // Retrieve the byte offset and size of the resource.
+  u8 offset = _index_size + location.get_attribute(ImageLocation::ATTRIBUTE_OFFSET);
+  u8 size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
+  u8 compressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_COMPRESSED);
+  u8 read_size = compressed_size ? compressed_size : size;
+
+  // Allocate space for the resource.
+  u1* data = NEW_RESOURCE_ARRAY(u1, read_size);
+
+  bool is_read = os::read_at(_fd, data, read_size, offset) == read_size;
+  guarantee(is_read, "error reading from image or short read");
+
+  // If not compressed, just return the data.
+  if (!compressed_size) {
+    return data;
+  }
+
+  u1* uncompressed = NEW_RESOURCE_ARRAY(u1, size);
+  char* msg = NULL;
+  jboolean res = ClassLoader::decompress(data, compressed_size, uncompressed, size, &msg);
+  if (!res) warning("decompression failed due to %s\n", msg);
+  guarantee(res, "decompression failed");
+
+  return uncompressed;
+}
+
+void ImageFile::get_resource(const char* path, u1*& buffer, u8& size) const {
+  buffer = NULL;
+  size = 0;
+  u1* data = find_location_data(path);
+  if (data) {
+    ImageLocation location(data);
+    if (verify_location(location, path)) {
+      size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
+      buffer = get_resource(location);
+    }
+  }
+}
+
+GrowableArray<const char*>* ImageFile::packages(const char* name) {
+  char entry[JVM_MAXPATHLEN];
+  bool overflow = jio_snprintf(entry, sizeof(entry), "%s/packages.offsets", name) == -1;
+  guarantee(!overflow, "package name overflow");
+
+  u1* buffer;
+  u8 size;
+
+  get_resource(entry, buffer, size);
+  guarantee(buffer, "missing module packages reource");
+  ImageStrings strings(_string_bytes, _header._strings_size);
+  GrowableArray<const char*>* pkgs = new GrowableArray<const char*>();
+  int count = size / 4;
+  for (int i = 0; i < count; i++) {
+    u4 offset = Bytes::get_Java_u4(buffer + (i*4));
+    const char* p = strings.get(offset);
+    pkgs->append(p);
+  }
+
+  return pkgs;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/imageFile.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_IMAGEFILE_HPP
+#define SHARE_VM_CLASSFILE_IMAGEFILE_HPP
+
+#include "classfile/classLoader.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Image files are an alternate file format for storing classes and resources. The
+// goal is to supply file access which is faster and smaller that the jar format.
+// It should be noted that unlike jars information stored in an image is in native
+// endian format. This allows the image to be memory mapped into memory without
+// endian translation.  This also means that images are platform dependent.
+//
+// Image files are structured as three sections;
+//
+//         +-----------+
+//         |  Header   |
+//         +-----------+
+//         |           |
+//         | Directory |
+//         |           |
+//         +-----------+
+//         |           |
+//         |           |
+//         | Resources |
+//         |           |
+//         |           |
+//         +-----------+
+//
+// The header contains information related to identification and description of
+// contents.
+//
+//         +-------------------------+
+//         |   Magic (0xCAFEDADA)    |
+//         +------------+------------+
+//         | Major Vers | Minor Vers |
+//         +------------+------------+
+//         |      Location Count     |
+//         +-------------------------+
+//         |      Attributes Size    |
+//         +-------------------------+
+//         |       Strings Size      |
+//         +-------------------------+
+//
+// Magic - means of identifying validity of the file.  This avoids requiring a
+//         special file extension.
+// Major vers, minor vers - differences in version numbers indicate structural
+//                          changes in the image.
+// Location count - number of locations/resources in the file.  This count is also
+//                  the length of lookup tables used in the directory.
+// Attributes size - number of bytes in the region used to store location attribute
+//                   streams.
+// Strings size - the size of the region used to store strings used by the
+//                directory and meta data.
+//
+// The directory contains information related to resource lookup. The algorithm
+// used for lookup is "A Practical Minimal Perfect Hashing Method"
+// (http://homepages.dcc.ufmg.br/~nivio/papers/wea05.pdf). Given a path string
+// in the form <package>/<base>.<extension>  return the resource location
+// information;
+//
+//     redirectIndex = hash(path, DEFAULT_SEED) % count;
+//     redirect = redirectTable[redirectIndex];
+//     if (redirect == 0) return not found;
+//     locationIndex = redirect < 0 ? -1 - redirect : hash(path, redirect) % count;
+//     location = locationTable[locationIndex];
+//     if (!verify(location, path)) return not found;
+//     return location;
+//
+// Note: The hash function takes an initial seed value.  A different seed value
+// usually returns a different result for strings that would otherwise collide with
+// other seeds. The verify function guarantees the found resource location is
+// indeed the resource we are looking for.
+//
+// The following is the format of the directory;
+//
+//         +-------------------+
+//         |   Redirect Table  |
+//         +-------------------+
+//         | Attribute Offsets |
+//         +-------------------+
+//         |   Attribute Data  |
+//         +-------------------+
+//         |      Strings      |
+//         +-------------------+
+//
+// Redirect Table - Array of 32-bit signed values representing actions that
+//                  should take place for hashed strings that map to that
+//                  value.  Negative values indicate no hash collision and can be
+//                  quickly converted to indices into attribute offsets.  Positive
+//                  values represent a new seed for hashing an index into attribute
+//                  offsets.  Zero indicates not found.
+// Attribute Offsets - Array of 32-bit unsigned values representing offsets into
+//                     attribute data.  Attribute offsets can be iterated to do a
+//                     full survey of resources in the image.
+// Attribute Data - Bytes representing compact attribute data for locations. (See
+//                  comments in ImageLocation.)
+// Strings - Collection of zero terminated UTF-8 strings used by the directory and
+//           image meta data.  Each string is accessed by offset.  Each string is
+//           unique.  Offset zero is reserved for the empty string.
+//
+// Note that the memory mapped directory assumes 32 bit alignment of the image
+// header, the redirect table and the attribute offsets.
+//
+
+
+// Manage image file string table.
+class ImageStrings {
+private:
+  // Data bytes for strings.
+  u1* _data;
+  // Number of bytes in the string table.
+  u4 _size;
+
+public:
+  // Prime used to generate hash for Perfect Hashing.
+  static const u4 HASH_MULTIPLIER = 0x01000193;
+
+  ImageStrings(u1* data, u4 size) : _data(data), _size(size) {}
+
+  // Return the UTF-8 string beginning at offset.
+  inline const char* get(u4 offset) const {
+    assert(offset < _size, "offset exceeds string table size");
+    return (const char*)(_data + offset);
+  }
+
+  // Compute the Perfect Hashing hash code for the supplied string.
+  inline static u4 hash_code(const char* string) {
+    return hash_code(string, HASH_MULTIPLIER);
+  }
+
+  // Compute the Perfect Hashing hash code for the supplied string, starting at seed.
+  static u4 hash_code(const char* string, u4 seed);
+
+  // Test to see if string begins with start.  If so returns remaining portion
+  // of string.  Otherwise, NULL.  Used to test sections of a path without
+  // copying.
+  static const char* starts_with(const char* string, const char* start);
+
+};
+
+// Manage image file location attribute streams.  Within an image, a location's
+// attributes are compressed into a stream of bytes.  An attribute stream is
+// composed of individual attribute sequences.  Each attribute sequence begins with
+// a header byte containing the attribute 'kind' (upper 5 bits of header) and the
+// 'length' less 1 (lower 3 bits of header) of bytes that follow containing the
+// attribute value.  Attribute values present as most significant byte first.
+//
+// Ex. Container offset (ATTRIBUTE_OFFSET) 0x33562 would be represented as 0x22
+// (kind = 4, length = 3), 0x03, 0x35, 0x62.
+//
+// An attribute stream is terminated with a header kind of ATTRIBUTE_END (header
+// byte of zero.)
+//
+// ImageLocation inflates the stream into individual values stored in the long
+// array _attributes. This allows an attribute value can be quickly accessed by
+// direct indexing. Unspecified values default to zero.
+//
+// Notes:
+//  - Even though ATTRIBUTE_END is used to mark the end of the attribute stream,
+//    streams will contain zero byte values to represent lesser significant bits.
+//    Thus, detecting a zero byte is not sufficient to detect the end of an attribute
+//    stream.
+//  - ATTRIBUTE_OFFSET represents the number of bytes from the beginning of the region
+//    storing the resources.  Thus, in an image this represents the number of bytes
+//    after the directory.
+//  - Currently, compressed resources are represented by having a non-zero
+//    ATTRIBUTE_COMPRESSED value.  This represents the number of bytes stored in the
+//    image, and the value of ATTRIBUTE_UNCOMPRESSED represents number of bytes of the
+//    inflated resource in memory. If the ATTRIBUTE_COMPRESSED is zero then the value
+//    of ATTRIBUTE_UNCOMPRESSED represents both the number of bytes in the image and
+//    in memory.  In the future, additional compression techniques will be used and
+//    represented differently.
+//  - Package strings include trailing slash and extensions include prefix period.
+//
+class ImageLocation {
+public:
+  // Attribute kind enumeration.
+  static const u1 ATTRIBUTE_END = 0; // End of attribute stream marker
+  static const u1 ATTRIBUTE_BASE = 1; // String table offset of resource path base
+  static const u1 ATTRIBUTE_PARENT = 2; // String table offset of resource path parent
+  static const u1 ATTRIBUTE_EXTENSION = 3; // String table offset of resource path extension
+  static const u1 ATTRIBUTE_OFFSET = 4; // Container byte offset of resource
+  static const u1 ATTRIBUTE_COMPRESSED = 5; // In image byte size of the compressed resource
+  static const u1 ATTRIBUTE_UNCOMPRESSED = 6; // In memory byte size of the uncompressed resource
+  static const u1 ATTRIBUTE_COUNT = 7; // Number of attribute kinds
+
+private:
+  // Values of inflated attributes.
+  u8 _attributes[ATTRIBUTE_COUNT];
+
+  // Return the attribute value number of bytes.
+  inline static u1 attribute_length(u1 data) {
+    return (data & 0x7) + 1;
+  }
+
+  // Return the attribute kind.
+  inline static u1 attribute_kind(u1 data) {
+    u1 kind = data >> 3;
+    assert(kind < ATTRIBUTE_COUNT, "invalid attribute kind");
+    return kind;
+  }
+
+  // Return the attribute length.
+  inline static u8 attribute_value(u1* data, u1 n) {
+    assert(0 < n && n <= 8, "invalid attribute value length");
+    u8 value = 0;
+
+    // Most significant bytes first.
+    for (u1 i = 0; i < n; i++) {
+      value <<= 8;
+      value |= data[i];
+    }
+
+    return value;
+  }
+
+public:
+  ImageLocation(u1* data);
+
+  // Retrieve an attribute value from the inflated array.
+  inline u8 get_attribute(u1 kind) const {
+    assert(ATTRIBUTE_END < kind && kind < ATTRIBUTE_COUNT, "invalid attribute kind");
+    return _attributes[kind];
+  }
+
+  // Retrieve an attribute string value from the inflated array.
+  inline const char* get_attribute(u4 kind, const ImageStrings& strings) const {
+    return strings.get((u4)get_attribute(kind));
+  }
+};
+
+// Manage the image file.
+class ImageFile: public CHeapObj<mtClass> {
+private:
+  // Image file marker.
+  static const u4 IMAGE_MAGIC = 0xCAFEDADA;
+  // Image file major version number.
+  static const u2 MAJOR_VERSION = 0;
+  // Image file minor version number.
+  static const u2 MINOR_VERSION = 1;
+
+  struct ImageHeader {
+    u4 _magic;          // Image file marker
+    u2 _major_version;  // Image file major version number
+    u2 _minor_version;  // Image file minor version number
+    u4 _location_count; // Number of locations managed in index.
+    u4 _locations_size; // Number of bytes in attribute table.
+    u4 _strings_size;   // Number of bytes in string table.
+  };
+
+  char* _name;          // Name of image
+  int _fd;              // File descriptor
+  bool _memory_mapped;  // Is file memory mapped
+  ImageHeader _header;  // Image header
+  u8 _index_size;       // Total size of index
+  u1* _index_data;      // Raw index data
+  s4* _redirect_table;  // Perfect hash redirect table
+  u4* _offsets_table;   // Location offset table
+  u1* _location_bytes;  // Location attributes
+  u1* _string_bytes;    // String table
+
+  // Compute number of bytes in image file index.
+  inline u8 index_size() {
+    return sizeof(ImageHeader) +
+    _header._location_count * sizeof(u4) * 2 +
+    _header._locations_size +
+    _header._strings_size;
+  }
+
+public:
+  ImageFile(const char* name);
+  ~ImageFile();
+
+  // Open image file for access.
+  bool open();
+  // Close image file.
+  void close();
+
+  // Retrieve name of image file.
+  inline const char* name() const {
+    return _name;
+  }
+
+  // Return a string table accessor.
+  inline const ImageStrings get_strings() const {
+    return ImageStrings(_string_bytes, _header._strings_size);
+  }
+
+  // Return number of locations in image file index.
+  inline u4 get_location_count() const {
+    return _header._location_count;
+  }
+
+  // Return location attribute stream for location i.
+  inline u1* get_location_data(u4 i) const {
+    u4 offset = _offsets_table[i];
+
+    return offset != 0 ? _location_bytes + offset : NULL;
+  }
+
+  // Return the attribute stream for a named resourced.
+  u1* find_location_data(const char* path) const;
+
+  // Verify that a found location matches the supplied path.
+  bool verify_location(ImageLocation& location, const char* path) const;
+
+  // Return the resource for the supplied location info.
+  u1* get_resource(ImageLocation& location) const;
+
+  // Return the resource associated with the path else NULL if not found.
+  void get_resource(const char* path, u1*& buffer, u8& size) const;
+
+  // Return an array of packages for a given module
+  GrowableArray<const char*>* packages(const char* name);
+};
+
+#endif // SHARE_VM_CLASSFILE_IMAGEFILE_HPP
--- a/src/share/vm/classfile/javaClasses.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/javaClasses.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1950,7 +1950,7 @@
   // This class is eagerly initialized during VM initialization, since we keep a refence
   // to one of the methods
   assert(InstanceKlass::cast(klass)->is_initialized(), "must be initialized");
-  return InstanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH);
+  return InstanceKlass::cast(klass)->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Method::clazz(oop reflect) {
@@ -2128,7 +2128,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Constructor::clazz(oop reflect) {
@@ -2268,7 +2268,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Field::clazz(oop reflect) {
@@ -2395,7 +2395,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Parameter::name(oop param) {
@@ -2445,7 +2445,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 
--- a/src/share/vm/classfile/sharedPathsMiscInfo.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/sharedPathsMiscInfo.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -110,7 +110,7 @@
 bool SharedPathsMiscInfo::check(jint type, const char* path) {
   switch (type) {
   case BOOT:
-    if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
+    if (os::file_name_strcmp(path, Arguments::get_sysclasspath()) != 0) {
       return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
     }
     break;
--- a/src/share/vm/classfile/stringTable.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/stringTable.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -36,6 +36,7 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
--- a/src/share/vm/classfile/symbolTable.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/symbolTable.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -235,7 +235,7 @@
   MutexLocker ml(SymbolTable_lock, THREAD);
 
   // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, (u1*)name, len, hashValue, true, CHECK_NULL);
+  return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD);
 }
 
 Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
@@ -274,7 +274,7 @@
   // Grab SymbolTable_lock first.
   MutexLocker ml(SymbolTable_lock, THREAD);
 
-  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, CHECK_NULL);
+  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, THREAD);
 }
 
 Symbol* SymbolTable::lookup_only(const char* name, int len,
--- a/src/share/vm/classfile/systemDictionary.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/systemDictionary.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -31,10 +31,6 @@
 #include "classfile/resolutionErrors.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedClassUtil.hpp"
-#include "classfile/systemDictionaryShared.hpp"
-#endif
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecodeStream.hpp"
@@ -65,6 +61,10 @@
 #include "services/threadService.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #if INCLUDE_TRACE
 #include "trace/tracing.hpp"
 #endif
@@ -122,7 +122,7 @@
 
 ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
   if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
-  return ClassLoaderDataGraph::find_or_create(class_loader, CHECK_NULL);
+  return ClassLoaderDataGraph::find_or_create(class_loader, THREAD);
 }
 
 // ----------------------------------------------------------------------------
@@ -232,15 +232,15 @@
                  class_name->as_C_string(),
                  class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string()));
   if (FieldType::is_array(class_name)) {
-    return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+    return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
   } else if (FieldType::is_obj(class_name)) {
     ResourceMark rm(THREAD);
     // Ignore wrapping L and ;.
     TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
                                    class_name->utf8_length() - 2, CHECK_NULL);
-    return resolve_instance_class_or_null(name, class_loader, protection_domain, CHECK_NULL);
+    return resolve_instance_class_or_null(name, class_loader, protection_domain, THREAD);
   } else {
-    return resolve_instance_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+    return resolve_instance_class_or_null(class_name, class_loader, protection_domain, THREAD);
   }
 }
 
--- a/src/share/vm/classfile/verificationType.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/verificationType.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -289,7 +289,7 @@
           if (is_reference() && from.is_reference()) {
             return is_reference_assignable_from(from, context,
                                                 from_field_is_protected,
-                                                CHECK_false);
+                                                THREAD);
           } else {
             return false;
           }
--- a/src/share/vm/classfile/verifier.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/classfile/verifier.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1927,7 +1927,7 @@
 
   return SystemDictionary::resolve_or_fail(
     name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
-    true, CHECK_NULL);
+    true, THREAD);
 }
 
 bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
--- a/src/share/vm/code/dependencies.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/code/dependencies.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -912,6 +912,8 @@
   bool is_witness(Klass* k) {
     if (doing_subtype_search()) {
       return Dependencies::is_concrete_klass(k);
+    } else if (!k->oop_is_instance()) {
+      return false; // no methods to find in an array type
     } else {
       Method* m = InstanceKlass::cast(k)->find_method(_name, _signature);
       if (m == NULL || !Dependencies::is_concrete_method(m))  return false;
@@ -1118,7 +1120,7 @@
   Klass* chain;       // scratch variable
 #define ADD_SUBCLASS_CHAIN(k)                     {  \
     assert(chaini < CHAINMAX, "oob");                \
-    chain = InstanceKlass::cast(k)->subklass();      \
+    chain = k->subklass();                           \
     if (chain != NULL)  chains[chaini++] = chain;    }
 
   // Look for non-abstract subclasses.
@@ -1129,35 +1131,37 @@
   // (Their subclasses are additional indirect implementors.
   // See InstanceKlass::add_implementor.)
   // (Note:  nof_implementors is always zero for non-interfaces.)
-  int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
-  if (nof_impls > 1) {
-    // Avoid this case: *I.m > { A.m, C }; B.m > C
-    // Here, I.m has 2 concrete implementations, but m appears unique
-    // as A.m, because the search misses B.m when checking C.
-    // The inherited method B.m was getting missed by the walker
-    // when interface 'I' was the starting point.
-    // %%% Until this is fixed more systematically, bail out.
-    // (Old CHA had the same limitation.)
-    return context_type;
-  }
-  if (nof_impls > 0) {
-    Klass* impl = InstanceKlass::cast(context_type)->implementor();
-    assert(impl != NULL, "just checking");
-    // If impl is the same as the context_type, then more than one
-    // implementor has seen. No exact info in this case.
-    if (impl == context_type) {
-      return context_type;  // report an inexact witness to this sad affair
+  if (top_level_call) {
+    int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
+    if (nof_impls > 1) {
+      // Avoid this case: *I.m > { A.m, C }; B.m > C
+      // Here, I.m has 2 concrete implementations, but m appears unique
+      // as A.m, because the search misses B.m when checking C.
+      // The inherited method B.m was getting missed by the walker
+      // when interface 'I' was the starting point.
+      // %%% Until this is fixed more systematically, bail out.
+      // (Old CHA had the same limitation.)
+      return context_type;
     }
-    if (do_counts)
-      { NOT_PRODUCT(deps_find_witness_steps++); }
-    if (is_participant(impl)) {
-      if (!participants_hide_witnesses) {
+    if (nof_impls > 0) {
+      Klass* impl = InstanceKlass::cast(context_type)->implementor();
+      assert(impl != NULL, "just checking");
+      // If impl is the same as the context_type, then more than one
+      // implementor has seen. No exact info in this case.
+      if (impl == context_type) {
+        return context_type;  // report an inexact witness to this sad affair
+      }
+      if (do_counts)
+        { NOT_PRODUCT(deps_find_witness_steps++); }
+      if (is_participant(impl)) {
+        if (!participants_hide_witnesses) {
+          ADD_SUBCLASS_CHAIN(impl);
+        }
+      } else if (is_witness(impl) && !ignore_witness(impl)) {
+        return impl;
+      } else {
         ADD_SUBCLASS_CHAIN(impl);
       }
-    } else if (is_witness(impl) && !ignore_witness(impl)) {
-      return impl;
-    } else {
-      ADD_SUBCLASS_CHAIN(impl);
     }
   }
 
--- a/src/share/vm/compiler/compileBroker.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/compiler/compileBroker.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -594,7 +594,7 @@
  * Add a CompileTask to a CompileQueue.
  */
 void CompileQueue::add(CompileTask* task) {
-  assert(lock()->owned_by_self(), "must own lock");
+  assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
 
   task->set_next(NULL);
   task->set_prev(NULL);
@@ -625,7 +625,7 @@
   }
 
   // Notify CompilerThreads that a task is available.
-  lock()->notify_all();
+  MethodCompileQueue_lock->notify_all();
 }
 
 /**
@@ -635,7 +635,7 @@
  * compilation is disabled.
  */
 void CompileQueue::free_all() {
-  MutexLocker mu(lock());
+  MutexLocker mu(MethodCompileQueue_lock);
   CompileTask* next = _first;
 
   // Iterate over all tasks in the compile queue
@@ -653,14 +653,14 @@
   _first = NULL;
 
   // Wake up all threads that block on the queue.
-  lock()->notify_all();
+  MethodCompileQueue_lock->notify_all();
 }
 
 /**
  * Get the next CompileTask from a CompileQueue
  */
 CompileTask* CompileQueue::get() {
-  MutexLocker locker(lock());
+  MutexLocker locker(MethodCompileQueue_lock);
   // If _first is NULL we have no more compile jobs. There are two reasons for
   // having no compile jobs: First, we compiled everything we wanted. Second,
   // we ran out of code cache so compilation has been disabled. In the latter
@@ -681,7 +681,7 @@
     // We need a timed wait here, since compiler threads can exit if compilation
     // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
     // is not critical and we do not want idle compiler threads to wake up too often.
-    lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
+    MethodCompileQueue_lock->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
   }
 
   if (CompileBroker::is_compilation_disabled_forever()) {
@@ -701,7 +701,7 @@
 // Clean & deallocate stale compile tasks.
 // Temporarily releases MethodCompileQueue lock.
 void CompileQueue::purge_stale_tasks() {
-  assert(lock()->owned_by_self(), "must own lock");
+  assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
   if (_first_stale != NULL) {
     // Stale tasks are purged when MCQ lock is released,
     // but _first_stale updates are protected by MCQ lock.
@@ -710,7 +710,7 @@
     CompileTask* head = _first_stale;
     _first_stale = NULL;
     {
-      MutexUnlocker ul(lock());
+      MutexUnlocker ul(MethodCompileQueue_lock);
       for (CompileTask* task = head; task != NULL; ) {
         CompileTask* next_task = task->next();
         CompileTaskWrapper ctw(task); // Frees the task
@@ -722,7 +722,7 @@
 }
 
 void CompileQueue::remove(CompileTask* task) {
-   assert(lock()->owned_by_self(), "must own lock");
+   assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
   if (task->prev() != NULL) {
     task->prev()->set_next(task->next());
   } else {
@@ -742,7 +742,7 @@
 }
 
 void CompileQueue::remove_and_mark_stale(CompileTask* task) {
-  assert(lock()->owned_by_self(), "must own lock");
+  assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
   remove(task);
 
   // Enqueue the task for reclamation (should be done outside MCQ lock)
@@ -780,7 +780,7 @@
 }
 
 void CompileQueue::print(outputStream* st) {
-  assert(lock()->owned_by_self(), "must own lock");
+  assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
   st->print_cr("Contents of %s", name());
   st->print_cr("----------------------------");
   CompileTask* task = _first;
@@ -1066,11 +1066,11 @@
 #endif // !ZERO && !SHARK
   // Initialize the compilation queue
   if (c2_compiler_count > 0) {
-    _c2_compile_queue  = new CompileQueue("C2 compile queue",  MethodCompileQueue_lock);
+    _c2_compile_queue  = new CompileQueue("C2 compile queue");
     _compilers[1]->set_num_compiler_threads(c2_compiler_count);
   }
   if (c1_compiler_count > 0) {
-    _c1_compile_queue  = new CompileQueue("C1 compile queue",  MethodCompileQueue_lock);
+    _c1_compile_queue  = new CompileQueue("C1 compile queue");
     _compilers[0]->set_num_compiler_threads(c1_compiler_count);
   }
 
@@ -1214,7 +1214,7 @@
 
   // Acquire our lock.
   {
-    MutexLocker locker(queue->lock(), thread);
+    MutexLocker locker(MethodCompileQueue_lock, thread);
 
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
@@ -1807,7 +1807,7 @@
                      os::file_separator(), thread_id, os::current_process_id());
       }
 
-      fp = fopen(file_name, "at");
+      fp = fopen(file_name, "wt");
       if (fp != NULL) {
         if (LogCompilation && Verbose) {
           tty->print_cr("Opening compilation log %s", file_name);
@@ -1985,6 +1985,7 @@
 
     if (ci_env.failing()) {
       task->set_failure_reason(ci_env.failure_reason());
+      ci_env.report_failure(ci_env.failure_reason());
       const char* retry_message = ci_env.retry_message();
       if (_compilation_log != NULL) {
         _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
--- a/src/share/vm/compiler/compileBroker.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/compiler/compileBroker.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -195,7 +195,6 @@
 class CompileQueue : public CHeapObj<mtCompiler> {
  private:
   const char* _name;
-  Monitor*    _lock;
 
   CompileTask* _first;
   CompileTask* _last;
@@ -206,9 +205,8 @@
 
   void purge_stale_tasks();
  public:
-  CompileQueue(const char* name, Monitor* lock) {
+  CompileQueue(const char* name) {
     _name = name;
-    _lock = lock;
     _first = NULL;
     _last = NULL;
     _size = 0;
@@ -216,7 +214,6 @@
   }
 
   const char*  name() const                      { return _name; }
-  Monitor*     lock() const                      { return _lock; }
 
   void         add(CompileTask* task);
   void         remove(CompileTask* task);
@@ -418,6 +415,7 @@
     shutdown_compilaton = 2
   };
 
+  static jint get_compilation_activity_mode() { return _should_compile_new_jobs; }
   static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
   static bool set_should_compile_new_jobs(jint new_state) {
     // Return success if the current caller set it
--- a/src/share/vm/compiler/compileLog.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/compiler/compileLog.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -56,7 +56,7 @@
 }
 
 CompileLog::~CompileLog() {
-  delete _out;
+  delete _out; // Close fd in fileStream::~fileStream()
   _out = NULL;
   FREE_C_HEAP_ARRAY(char, _identities);
   FREE_C_HEAP_ARRAY(char, _file);
@@ -278,10 +278,9 @@
       }
       file->print_raw_cr("</compilation_log>");
       close(partial_fd);
-      unlink(partial_file);
     }
     CompileLog* next_log = log->_next;
-    delete log;
+    delete log; // Removes partial file
     log = next_log;
   }
   _first = NULL;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -52,21 +52,9 @@
 }
 
 void ConcurrentMarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
-    CURRENT_PC, AllocFailStrategy::RETURN_NULL);
-  if (_generations == NULL)
-    vm_exit_during_initialization("Unable to allocate gen spec");
-
-  Generation::Name yg_name =
-    UseParNewGC ? Generation::ParNew : Generation::DefNew;
-  _generations[0] = new GenerationSpec(yg_name, _initial_young_size,
-                                       _max_young_size);
-  _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
-                                       _initial_old_size, _max_old_size);
-
-  if (_generations[0] == NULL || _generations[1] == NULL) {
-    vm_exit_during_initialization("Unable to allocate gen spec");
-  }
+  _generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
+  _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
+  _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep, _initial_old_size, _max_old_size);
 }
 
 void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
@@ -82,10 +70,5 @@
 
 void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
   // initialize the policy counters - 2 collectors, 3 generations
-  if (UseParNewGC) {
-    _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
-  }
-  else {
-    _gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
-  }
+  _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -90,7 +90,8 @@
                     CMSRescanMultiple),
   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
                     CMSConcMarkMultiple),
-  _collector(NULL)
+  _collector(NULL),
+  _preconsumptionDirtyCardClosure(NULL)
 {
   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
          "FreeChunk is larger than expected");
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -155,6 +155,9 @@
   // Used to keep track of limit of sweep for the space
   HeapWord* _sweep_limit;
 
+  // Used to make the young collector update the mod union table
+  MemRegionClosure* _preconsumptionDirtyCardClosure;
+
   // Support for compacting cms
   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
@@ -356,6 +359,14 @@
   void initialize_sequential_subtasks_for_marking(int n_threads,
          HeapWord* low = NULL);
 
+  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
+    return _preconsumptionDirtyCardClosure;
+  }
+
+  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
+    _preconsumptionDirtyCardClosure = cl;
+  }
+
   // Space enquiries
   size_t used() const;
   size_t free() const;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -192,7 +192,6 @@
      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   CardGeneration(rs, initial_byte_size, level, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
-  _debug_collection_type(Concurrent_collection_type),
   _did_compact(false)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
@@ -612,8 +611,6 @@
   // Clip CMSBootstrapOccupancy between 0 and 100.
   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 
-  _full_gcs_since_conc_gc = 0;
-
   // Now tell CMS generations the identity of their collector
   ConcurrentMarkSweepGeneration::set_collector(this);
 
@@ -626,7 +623,8 @@
 
   // Support for parallelizing young gen rescan
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  _young_gen = gch->prev_gen(_cmsGen);
+  assert(gch->prev_gen(_cmsGen)->kind() == Generation::ParNew, "CMS can only be used with ParNew");
+  _young_gen = (ParNewGeneration*)gch->prev_gen(_cmsGen);
   if (gch->supports_inline_contig_alloc()) {
     _top_addr = gch->top_addr();
     _end_addr = gch->end_addr();
@@ -1206,14 +1204,6 @@
 
 void
 ConcurrentMarkSweepGeneration::
-par_promote_alloc_undo(int thread_num,
-                       HeapWord* obj, size_t word_sz) {
-  // CMS does not support promotion undo.
-  ShouldNotReachHere();
-}
-
-void
-ConcurrentMarkSweepGeneration::
 par_promote_alloc_done(int thread_num) {
   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
   ps->lab.retire(thread_num);
@@ -1247,20 +1237,6 @@
     return true;
   }
 
-  // For debugging purposes, change the type of collection.
-  // If the rotation is not on the concurrent collection
-  // type, don't start a concurrent collection.
-  NOT_PRODUCT(
-    if (RotateCMSCollectionTypes &&
-        (_cmsGen->debug_collection_type() !=
-          ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
-      assert(_cmsGen->debug_collection_type() !=
-        ConcurrentMarkSweepGeneration::Unknown_collection_type,
-        "Bad cms collection type");
-      return false;
-    }
-  )
-
   FreelistLocker x(this);
   // ------------------------------------------------------------------
   // Print out lots of information which affects the initiation of
@@ -1441,16 +1417,6 @@
                            size_t size,
                            bool   tlab)
 {
-  if (!UseCMSCollectionPassing && _collectorState > Idling) {
-    // For debugging purposes skip the collection if the state
-    // is not currently idle
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
-        Thread::current(), full, _collectorState);
-    }
-    return;
-  }
-
   // The following "if" branch is present for defensive reasons.
   // In the current uses of this interface, it can be replaced with:
   // assert(!GC_locker.is_active(), "Can't be called otherwise");
@@ -1466,7 +1432,6 @@
     return;
   }
   acquire_control_and_collect(full, clear_all_soft_refs);
-  _full_gcs_since_conc_gc++;
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
@@ -1636,66 +1601,51 @@
     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
   }
 
-  // Check if we need to do a compaction, or if not, whether
-  // we need to start the mark-sweep from scratch.
-  bool should_compact    = false;
-  bool should_start_over = false;
-  decide_foreground_collection_type(clear_all_soft_refs,
-    &should_compact, &should_start_over);
-
-NOT_PRODUCT(
-  if (RotateCMSCollectionTypes) {
-    if (_cmsGen->debug_collection_type() ==
-        ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
-      should_compact = true;
-    } else if (_cmsGen->debug_collection_type() ==
-               ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
-      should_compact = false;
-    }
-  }
-)
+  // Inform cms gen if this was due to partial collection failing.
+  // The CMS gen may use this fact to determine its expansion policy.
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
+    assert(!_cmsGen->incremental_collection_failed(),
+           "Should have been noticed, reacted to and cleared");
+    _cmsGen->set_incremental_collection_failed();
+  }
 
   if (first_state > Idling) {
     report_concurrent_mode_interruption();
   }
 
-  set_did_compact(should_compact);
-  if (should_compact) {
-    // If the collection is being acquired from the background
-    // collector, there may be references on the discovered
-    // references lists that have NULL referents (being those
-    // that were concurrently cleared by a mutator) or
-    // that are no longer active (having been enqueued concurrently
-    // by the mutator).
-    // Scrub the list of those references because Mark-Sweep-Compact
-    // code assumes referents are not NULL and that all discovered
-    // Reference objects are active.
-    ref_processor()->clean_up_discovered_references();
-
-    if (first_state > Idling) {
-      save_heap_summary();
-    }
-
-    do_compaction_work(clear_all_soft_refs);
-
-    // Has the GC time limit been exceeded?
-    DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
-    size_t max_eden_size = young_gen->max_capacity() -
-                           young_gen->to()->capacity() -
-                           young_gen->from()->capacity();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    GCCause::Cause gc_cause = gch->gc_cause();
-    size_policy()->check_gc_overhead_limit(_young_gen->used(),
-                                           young_gen->eden()->used(),
-                                           _cmsGen->max_capacity(),
-                                           max_eden_size,
-                                           full,
-                                           gc_cause,
-                                           gch->collector_policy());
-  } else {
-    do_mark_sweep_work(clear_all_soft_refs, first_state,
-      should_start_over);
-  }
+  set_did_compact(true);
+
+  // If the collection is being acquired from the background
+  // collector, there may be references on the discovered
+  // references lists that have NULL referents (being those
+  // that were concurrently cleared by a mutator) or
+  // that are no longer active (having been enqueued concurrently
+  // by the mutator).
+  // Scrub the list of those references because Mark-Sweep-Compact
+  // code assumes referents are not NULL and that all discovered
+  // Reference objects are active.
+  ref_processor()->clean_up_discovered_references();
+
+  if (first_state > Idling) {
+    save_heap_summary();
+  }
+
+  do_compaction_work(clear_all_soft_refs);
+
+  // Has the GC time limit been exceeded?
+  size_t max_eden_size = _young_gen->max_capacity() -
+                         _young_gen->to()->capacity() -
+                         _young_gen->from()->capacity();
+  GCCause::Cause gc_cause = gch->gc_cause();
+  size_policy()->check_gc_overhead_limit(_young_gen->used(),
+                                         _young_gen->eden()->used(),
+                                         _cmsGen->max_capacity(),
+                                         max_eden_size,
+                                         full,
+                                         gc_cause,
+                                         gch->collector_policy());
+
   // Reset the expansion cause, now that we just completed
   // a collection cycle.
   clear_expansion_cause();
@@ -1713,68 +1663,6 @@
   _cmsGen->compute_new_size_free_list();
 }
 
-// A work method used by foreground collection to determine
-// what type of collection (compacting or not, continuing or fresh)
-// it should do.
-// NOTE: the intent is to make UseCMSCompactAtFullCollection
-// and CMSCompactWhenClearAllSoftRefs the default in the future
-// and do away with the flags after a suitable period.
-void CMSCollector::decide_foreground_collection_type(
-  bool clear_all_soft_refs, bool* should_compact,
-  bool* should_start_over) {
-  // Normally, we'll compact only if the UseCMSCompactAtFullCollection
-  // flag is set, and we have either requested a System.gc() or
-  // the number of full gc's since the last concurrent cycle
-  // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
-  // or if an incremental collection has failed
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_generation_policy(),
-         "You may want to check the correctness of the following");
-  // Inform cms gen if this was due to partial collection failing.
-  // The CMS gen may use this fact to determine its expansion policy.
-  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
-    assert(!_cmsGen->incremental_collection_failed(),
-           "Should have been noticed, reacted to and cleared");
-    _cmsGen->set_incremental_collection_failed();
-  }
-  *should_compact =
-    UseCMSCompactAtFullCollection &&
-    ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
-     GCCause::is_user_requested_gc(gch->gc_cause()) ||
-     gch->incremental_collection_will_fail(true /* consult_young */));
-  *should_start_over = false;
-  if (clear_all_soft_refs && !*should_compact) {
-    // We are about to do a last ditch collection attempt
-    // so it would normally make sense to do a compaction
-    // to reclaim as much space as possible.
-    if (CMSCompactWhenClearAllSoftRefs) {
-      // Default: The rationale is that in this case either
-      // we are past the final marking phase, in which case
-      // we'd have to start over, or so little has been done
-      // that there's little point in saving that work. Compaction
-      // appears to be the sensible choice in either case.
-      *should_compact = true;
-    } else {
-      // We have been asked to clear all soft refs, but not to
-      // compact. Make sure that we aren't past the final checkpoint
-      // phase, for that is where we process soft refs. If we are already
-      // past that phase, we'll need to redo the refs discovery phase and
-      // if necessary clear soft refs that weren't previously
-      // cleared. We do so by remembering the phase in which
-      // we came in, and if we are past the refs processing
-      // phase, we'll choose to just redo the mark-sweep
-      // collection from scratch.
-      if (_collectorState > FinalMarking) {
-        // We are past the refs processing phase;
-        // start over and do a fresh synchronous CMS cycle
-        _collectorState = Resetting; // skip to reset to start new cycle
-        reset(false /* == !asynch */);
-        *should_start_over = true;
-      } // else we can continue a possibly ongoing current cycle
-    }
-  }
-}
-
 // A work method used by the foreground collector to do
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
@@ -1787,10 +1675,6 @@
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
-  if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
-    gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
-      "collections passed to foreground collector", _full_gcs_since_conc_gc);
-  }
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
@@ -1852,7 +1736,7 @@
   _collectorState = Resetting;
   assert(_restart_addr == NULL,
          "Should have been NULL'd before baton was passed");
-  reset(false /* == !asynch */);
+  reset(false /* == !concurrent */);
   _cmsGen->reset_after_compaction();
   _concurrent_cycles_since_last_unload = 0;
 
@@ -1875,45 +1759,10 @@
   // in the heap's do_collection() method.
 }
 
-// A work method used by the foreground collector to do
-// a mark-sweep, after taking over from a possibly on-going
-// concurrent mark-sweep collection.
-void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
-  CollectorState first_state, bool should_start_over) {
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Pass concurrent collection to foreground "
-      "collector with count %d",
-      _full_gcs_since_conc_gc);
-  }
-  switch (_collectorState) {
-    case Idling:
-      if (first_state == Idling || should_start_over) {
-        // The background GC was not active, or should
-        // restarted from scratch;  start the cycle.
-        _collectorState = InitialMarking;
-      }
-      // If first_state was not Idling, then a background GC
-      // was in progress and has now finished.  No need to do it
-      // again.  Leave the state as Idling.
-      break;
-    case Precleaning:
-      // In the foreground case don't do the precleaning since
-      // it is not done concurrently and there is extra work
-      // required.
-      _collectorState = FinalMarking;
-  }
-  collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
-
-  // For a mark-sweep, compute_new_size() will be called
-  // in the heap's do_collection() method.
-}
-
-
 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
-  DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
-  ContiguousSpace* eden_space = dng->eden();
-  ContiguousSpace* from_space = dng->from();
-  ContiguousSpace* to_space   = dng->to();
+  ContiguousSpace* eden_space = _young_gen->eden();
+  ContiguousSpace* from_space = _young_gen->from();
+  ContiguousSpace* to_space   = _young_gen->to();
   // Eden
   if (_eden_chunk_array != NULL) {
     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
@@ -1989,13 +1838,7 @@
   }
 };
 
-// There are separate collect_in_background and collect_in_foreground because of
-// the different locking requirements of the background collector and the
-// foreground collector.  There was originally an attempt to share
-// one "collect" method between the background collector and the foreground
-// collector but the if-then-else required made it cleaner to have
-// separate methods.
-void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
+void CMSCollector::collect_in_background(GCCause::Cause cause) {
   assert(Thread::current()->is_ConcurrentGC_thread(),
     "A CMS asynchronous collection is only allowed on a CMS thread.");
 
@@ -2036,7 +1879,7 @@
   // Used for PrintGC
   size_t prev_used;
   if (PrintGC && Verbose) {
-    prev_used = _cmsGen->used(); // XXXPERM
+    prev_used = _cmsGen->used();
   }
 
   // The change of the collection state is normally done at this level;
@@ -2116,7 +1959,7 @@
         break;
       case Marking:
         // initial marking in checkpointRootsInitialWork has been completed
-        if (markFromRoots(true)) { // we were successful
+        if (markFromRoots()) { // we were successful
           assert(_collectorState == Precleaning, "Collector state should "
             "have changed");
         } else {
@@ -2146,10 +1989,9 @@
         break;
       case Sweeping:
         // final marking in checkpointRootsFinal has been completed
-        sweep(true);
+        sweep();
         assert(_collectorState == Resizing, "Collector state change "
           "to Resizing must be done under the free_list_lock");
-        _full_gcs_since_conc_gc = 0;
 
       case Resizing: {
         // Sweeping has been completed...
@@ -2222,12 +2064,6 @@
   }
 }
 
-void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
-  if (!_cms_start_registered) {
-    register_gc_start(cause);
-  }
-}
-
 void CMSCollector::register_gc_start(GCCause::Cause cause) {
   _cms_start_registered = true;
   _gc_timer_cm->register_gc_start();
@@ -2255,120 +2091,6 @@
   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
 }
 
-void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
-  assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
-         "Foreground collector should be waiting, not executing");
-  assert(Thread::current()->is_VM_thread(), "A foreground collection"
-    "may only be done by the VM Thread with the world stopped");
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-
-  // The gc id is created in register_foreground_gc_start if this collection is synchronous
-  const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
-  NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
-    true, NULL, gc_id);)
-  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-
-  HandleMark hm;  // Discard invalid handles created during verification
-
-  if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    Universe::verify();
-  }
-
-  // Snapshot the soft reference policy to be used in this collection cycle.
-  ref_processor()->setup_policy(clear_all_soft_refs);
-
-  // Decide if class unloading should be done
-  update_should_unload_classes();
-
-  bool init_mark_was_synchronous = false; // until proven otherwise
-  while (_collectorState != Idling) {
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
-        Thread::current(), _collectorState);
-    }
-    switch (_collectorState) {
-      case InitialMarking:
-        register_foreground_gc_start(cause);
-        init_mark_was_synchronous = true;  // fact to be exploited in re-mark
-        checkpointRootsInitial(false);
-        assert(_collectorState == Marking, "Collector state should have changed"
-          " within checkpointRootsInitial()");
-        break;
-      case Marking:
-        // initial marking in checkpointRootsInitialWork has been completed
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before initial mark: ");
-        }
-        {
-          bool res = markFromRoots(false);
-          assert(res && _collectorState == FinalMarking, "Collector state should "
-            "have changed");
-          break;
-        }
-      case FinalMarking:
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before re-mark: ");
-        }
-        checkpointRootsFinal(false, clear_all_soft_refs,
-                             init_mark_was_synchronous);
-        assert(_collectorState == Sweeping, "Collector state should not "
-          "have changed within checkpointRootsFinal()");
-        break;
-      case Sweeping:
-        // final marking in checkpointRootsFinal has been completed
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before sweep: ");
-        }
-        sweep(false);
-        assert(_collectorState == Resizing, "Incorrect state");
-        break;
-      case Resizing: {
-        // Sweeping has been completed; the actual resize in this case
-        // is done separately; nothing to be done in this state.
-        _collectorState = Resetting;
-        break;
-      }
-      case Resetting:
-        // The heap has been resized.
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before reset: ");
-        }
-        save_heap_summary();
-        reset(false);
-        assert(_collectorState == Idling, "Collector state should "
-          "have changed");
-        break;
-      case Precleaning:
-      case AbortablePreclean:
-        // Elide the preclean phase
-        _collectorState = FinalMarking;
-        break;
-      default:
-        ShouldNotReachHere();
-    }
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
-        Thread::current(), _collectorState);
-    }
-  }
-
-  if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    Universe::verify();
-  }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
-      " exiting collection CMS state %d",
-      Thread::current(), _collectorState);
-  }
-}
-
 bool CMSCollector::waitForForegroundGC() {
   bool res = false;
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
@@ -3090,7 +2812,7 @@
 }
 
 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
-// OneContigSpaceCardGeneration, which makes me wonder if we should move this
+// TenuredGeneration, which makes me wonder if we should move this
 // to CardGeneration and share it...
 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
   return CardGeneration::expand(bytes, expand_bytes);
@@ -3345,7 +3067,7 @@
 // Checkpoint the roots into this generation from outside
 // this generation. [Note this initial checkpoint need only
 // be approximate -- we'll do a catch up phase subsequently.]
-void CMSCollector::checkpointRootsInitial(bool asynch) {
+void CMSCollector::checkpointRootsInitial() {
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
@@ -3356,32 +3078,19 @@
   ReferenceProcessor* rp = ref_processor();
   SpecializationStats::clear();
   assert(_restart_addr == NULL, "Control point invariant");
-  if (asynch) {
+  {
     // acquire locks for subsequent manipulations
     MutexLockerEx x(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
-    checkpointRootsInitialWork(asynch);
+    checkpointRootsInitialWork();
     // enable ("weak") refs discovery
     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
     _collectorState = Marking;
-  } else {
-    // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
-    // which recognizes if we are a CMS generation, and doesn't try to turn on
-    // discovery; verify that they aren't meddling.
-    assert(!rp->discovery_is_atomic(),
-           "incorrect setting of discovery predicate");
-    assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
-           "ref discovery for this generation kind");
-    // already have locks
-    checkpointRootsInitialWork(asynch);
-    // now enable ("weak") refs discovery
-    rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
-    _collectorState = Marking;
   }
   SpecializationStats::print();
 }
 
-void CMSCollector::checkpointRootsInitialWork(bool asynch) {
+void CMSCollector::checkpointRootsInitialWork() {
   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   assert(_collectorState == InitialMarking, "just checking");
 
@@ -3483,9 +3192,9 @@
   verify_overflow_empty();
 }
 
-bool CMSCollector::markFromRoots(bool asynch) {
+bool CMSCollector::markFromRoots() {
   // we might be tempted to assert that:
-  // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
+  // assert(!SafepointSynchronize::is_at_safepoint(),
   //        "inconsistent argument?");
   // However that wouldn't be right, because it's possible that
   // a safepoint is indeed in progress as a younger generation
@@ -3494,37 +3203,28 @@
   check_correct_thread_executing();
   verify_overflow_empty();
 
-  bool res;
-  if (asynch) {
-    // Weak ref discovery note: We may be discovering weak
-    // refs in this generation concurrent (but interleaved) with
-    // weak ref discovery by a younger generation collector.
-
-    CMSTokenSyncWithLocks ts(true, bitMapLock());
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
-    res = markFromRootsWork(asynch);
-    if (res) {
-      _collectorState = Precleaning;
-    } else { // We failed and a foreground collection wants to take over
-      assert(_foregroundGCIsActive, "internal state inconsistency");
-      assert(_restart_addr == NULL,  "foreground will restart from scratch");
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("bailing out to foreground collection");
-      }
-    }
-  } else {
-    assert(SafepointSynchronize::is_at_safepoint(),
-           "inconsistent with asynch == false");
-    // already have locks
-    res = markFromRootsWork(asynch);
-    _collectorState = FinalMarking;
+  // Weak ref discovery note: We may be discovering weak
+  // refs in this generation concurrent (but interleaved) with
+  // weak ref discovery by a younger generation collector.
+
+  CMSTokenSyncWithLocks ts(true, bitMapLock());
+  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+  CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+  bool res = markFromRootsWork();
+  if (res) {
+    _collectorState = Precleaning;
+  } else { // We failed and a foreground collection wants to take over
+    assert(_foregroundGCIsActive, "internal state inconsistency");
+    assert(_restart_addr == NULL,  "foreground will restart from scratch");
+    if (PrintGCDetails) {
+      gclog_or_tty->print_cr("bailing out to foreground collection");
+    }
   }
   verify_overflow_empty();
   return res;
 }
 
-bool CMSCollector::markFromRootsWork(bool asynch) {
+bool CMSCollector::markFromRootsWork() {
   // iterate over marked bits in bit map, doing a full scan and mark
   // from these roots using the following algorithm:
   // . if oop is to the right of the current scan pointer,
@@ -3549,9 +3249,9 @@
   verify_overflow_empty();
   bool result = false;
   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
-    result = do_marking_mt(asynch);
+    result = do_marking_mt();
   } else {
-    result = do_marking_st(asynch);
+    result = do_marking_st();
   }
   return result;
 }
@@ -3591,7 +3291,6 @@
 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
   CMSCollector* _collector;
   int           _n_workers;                  // requested/desired # workers
-  bool          _asynch;
   bool          _result;
   CompactibleFreeListSpace*  _cms_space;
   char          _pad_front[64];   // padding to ...
@@ -3612,13 +3311,12 @@
  public:
   CMSConcMarkingTask(CMSCollector* collector,
                  CompactibleFreeListSpace* cms_space,
-                 bool asynch,
                  YieldingFlexibleWorkGang* workers,
                  OopTaskQueueSet* task_queues):
     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
     _collector(collector),
     _cms_space(cms_space),
-    _asynch(asynch), _n_workers(0), _result(true),
+    _n_workers(0), _result(true),
     _task_queues(task_queues),
     _term(_n_workers, task_queues, _collector),
     _bit_map_lock(collector->bitMapLock())
@@ -3645,8 +3343,7 @@
   void work(uint worker_id);
   bool should_yield() {
     return    ConcurrentMarkSweepThread::should_yield()
-           && !_collector->foregroundGCIsActive()
-           && _asynch;
+           && !_collector->foregroundGCIsActive();
   }
 
   virtual void coordinator_yield();  // stuff done by coordinator
@@ -3878,8 +3575,7 @@
         Par_MarkFromRootsClosure cl(this, _collector, my_span,
                                     &_collector->_markBitMap,
                                     work_queue(i),
-                                    &_collector->_markStack,
-                                    _asynch);
+                                    &_collector->_markStack);
         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
       } // else nothing to do for this task
     }   // else nothing to do for this task
@@ -4084,7 +3780,7 @@
   _collector->startTimer();
 }
 
-bool CMSCollector::do_marking_mt(bool asynch) {
+bool CMSCollector::do_marking_mt() {
   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
                                        conc_workers()->total_workers(),
@@ -4096,7 +3792,6 @@
 
   CMSConcMarkingTask tsk(this,
                          cms_space,
-                         asynch,
                          conc_workers(),
                          task_queues());
 
@@ -4125,7 +3820,7 @@
     // If _restart_addr is non-NULL, a marking stack overflow
     // occurred; we need to do a fresh marking iteration from the
     // indicated restart address.
-    if (_foregroundGCIsActive && asynch) {
+    if (_foregroundGCIsActive) {
       // We may be running into repeated stack overflows, having
       // reached the limit of the stack size, while making very
       // slow forward progress. It may be best to bail out and
@@ -4154,14 +3849,14 @@
   return true;
 }
 
-bool CMSCollector::do_marking_st(bool asynch) {
+bool CMSCollector::do_marking_st() {
   ResourceMark rm;
   HandleMark   hm;
 
   // Temporarily make refs discovery single threaded (non-MT)
   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
-    &_markStack, CMSYield && asynch);
+    &_markStack, CMSYield);
   // the last argument to iterate indicates whether the iteration
   // should be incremental with periodic yields.
   _markBitMap.iterate(&markFromRootsClosure);
@@ -4169,7 +3864,7 @@
   // occurred; we need to do a fresh iteration from the
   // indicated restart address.
   while (_restart_addr != NULL) {
-    if (_foregroundGCIsActive && asynch) {
+    if (_foregroundGCIsActive) {
       // We may be running into repeated stack overflows, having
       // reached the limit of the stack size, while making very
       // slow forward progress. It may be best to bail out and
@@ -4390,10 +4085,6 @@
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
-    assert(_young_gen->kind() == Generation::DefNew ||
-           _young_gen->kind() == Generation::ParNew,
-         "incorrect type for cast");
-    DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
                              &_markBitMap, &_modUnionTable,
                              &_markStack, true /* precleaning phase */);
@@ -4406,8 +4097,8 @@
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
-    dng->from()->object_iterate_careful(&sss_cl);
-    dng->to()->object_iterate_careful(&sss_cl);
+    _young_gen->from()->object_iterate_careful(&sss_cl);
+    _young_gen->to()->object_iterate_careful(&sss_cl);
   }
   MarkRefsIntoAndScanClosure
     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
@@ -4703,8 +4394,7 @@
   verify_overflow_empty();
 }
 
-void CMSCollector::checkpointRootsFinal(bool asynch,
-  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
+void CMSCollector::checkpointRootsFinal() {
   assert(_collectorState == FinalMarking, "incorrect state transition?");
   check_correct_thread_executing();
   // world is stopped at this checkpoint
@@ -4721,7 +4411,7 @@
                         _young_gen->used() / K,
                         _young_gen->capacity() / K);
   }
-  if (asynch) {
+  {
     if (CMSScavengeBeforeRemark) {
       GenCollectedHeap* gch = GenCollectedHeap::heap();
       // Temporarily set flag to false, GCH->do_collection will
@@ -4742,21 +4432,14 @@
     FreelistLocker x(this);
     MutexLockerEx y(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
-    assert(!init_mark_was_synchronous, "but that's impossible!");
-    checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
-  } else {
-    // already have all the locks
-    checkpointRootsFinalWork(asynch, clear_all_soft_refs,
-                             init_mark_was_synchronous);
+    checkpointRootsFinalWork();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
   SpecializationStats::print();
 }
 
-void CMSCollector::checkpointRootsFinalWork(bool asynch,
-  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
-
+void CMSCollector::checkpointRootsFinalWork() {
   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
 
   assert(haveFreelistLocks(), "must have free list locks");
@@ -4773,60 +4456,54 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
-  if (!init_mark_was_synchronous) {
-    // We might assume that we need not fill TLAB's when
-    // CMSScavengeBeforeRemark is set, because we may have just done
-    // a scavenge which would have filled all TLAB's -- and besides
-    // Eden would be empty. This however may not always be the case --
-    // for instance although we asked for a scavenge, it may not have
-    // happened because of a JNI critical section. We probably need
-    // a policy for deciding whether we can in that case wait until
-    // the critical section releases and then do the remark following
-    // the scavenge, and skip it here. In the absence of that policy,
-    // or of an indication of whether the scavenge did indeed occur,
-    // we cannot rely on TLAB's having been filled and must do
-    // so here just in case a scavenge did not happen.
-    gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
-    // Update the saved marks which may affect the root scans.
-    gch->save_marks();
-
-    if (CMSPrintEdenSurvivorChunks) {
-      print_eden_and_survivor_chunk_arrays();
-    }
-
-    {
-      COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
-
-      // Note on the role of the mod union table:
-      // Since the marker in "markFromRoots" marks concurrently with
-      // mutators, it is possible for some reachable objects not to have been
-      // scanned. For instance, an only reference to an object A was
-      // placed in object B after the marker scanned B. Unless B is rescanned,
-      // A would be collected. Such updates to references in marked objects
-      // are detected via the mod union table which is the set of all cards
-      // dirtied since the first checkpoint in this GC cycle and prior to
-      // the most recent young generation GC, minus those cleaned up by the
-      // concurrent precleaning.
-      if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
-        GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
-        do_remark_parallel();
-      } else {
-        GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
-                    _gc_timer_cm, _gc_tracer_cm->gc_id());
-        do_remark_non_parallel();
-      }
-    }
-  } else {
-    assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
-    // The initial mark was stop-world, so there's no rescanning to
-    // do; go straight on to the next step below.
+  // We might assume that we need not fill TLAB's when
+  // CMSScavengeBeforeRemark is set, because we may have just done
+  // a scavenge which would have filled all TLAB's -- and besides
+  // Eden would be empty. This however may not always be the case --
+  // for instance although we asked for a scavenge, it may not have
+  // happened because of a JNI critical section. We probably need
+  // a policy for deciding whether we can in that case wait until
+  // the critical section releases and then do the remark following
+  // the scavenge, and skip it here. In the absence of that policy,
+  // or of an indication of whether the scavenge did indeed occur,
+  // we cannot rely on TLAB's having been filled and must do
+  // so here just in case a scavenge did not happen.
+  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+  // Update the saved marks which may affect the root scans.
+  gch->save_marks();
+
+  if (CMSPrintEdenSurvivorChunks) {
+    print_eden_and_survivor_chunk_arrays();
+  }
+
+  {
+    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+
+    // Note on the role of the mod union table:
+    // Since the marker in "markFromRoots" marks concurrently with
+    // mutators, it is possible for some reachable objects not to have been
+    // scanned. For instance, an only reference to an object A was
+    // placed in object B after the marker scanned B. Unless B is rescanned,
+    // A would be collected. Such updates to references in marked objects
+    // are detected via the mod union table which is the set of all cards
+    // dirtied since the first checkpoint in this GC cycle and prior to
+    // the most recent young generation GC, minus those cleaned up by the
+    // concurrent precleaning.
+    if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
+      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      do_remark_parallel();
+    } else {
+      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
+                  _gc_timer_cm, _gc_tracer_cm->gc_id());
+      do_remark_non_parallel();
+    }
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
 
   {
     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
-    refProcessingWork(asynch, clear_all_soft_refs);
+    refProcessingWork();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -5006,10 +4683,10 @@
 };
 
 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
-  DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
-  ContiguousSpace* eden_space = dng->eden();
-  ContiguousSpace* from_space = dng->from();
-  ContiguousSpace* to_space   = dng->to();
+  ParNewGeneration* young_gen = _collector->_young_gen;
+  ContiguousSpace* eden_space = young_gen->eden();
+  ContiguousSpace* from_space = young_gen->from();
+  ContiguousSpace* to_space   = young_gen->to();
 
   HeapWord** eca = _collector->_eden_chunk_array;
   size_t     ect = _collector->_eden_chunk_index;
@@ -5478,11 +5155,10 @@
 CMSCollector::
 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
   assert(n_threads > 0, "Unexpected n_threads argument");
-  DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
 
   // Eden space
-  if (!dng->eden()->is_empty()) {
-    SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
+  if (!_young_gen->eden()->is_empty()) {
+    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
     assert(!pst->valid(), "Clobbering existing data?");
     // Each valid entry in [0, _eden_chunk_index) represents a task.
     size_t n_tasks = _eden_chunk_index + 1;
@@ -5495,14 +5171,14 @@
 
   // Merge the survivor plab arrays into _survivor_chunk_array
   if (_survivor_plab_array != NULL) {
-    merge_survivor_plab_arrays(dng->from(), n_threads);
+    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
   } else {
     assert(_survivor_chunk_index == 0, "Error");
   }
 
   // To space
   {
-    SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
+    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
     assert(!pst->valid(), "Clobbering existing data?");
     // Sets the condition for completion of the subtask (how many threads
     // need to finish in order to be done).
@@ -5513,7 +5189,7 @@
 
   // From space
   {
-    SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
+    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
     assert(!pst->valid(), "Clobbering existing data?");
     size_t n_tasks = _survivor_chunk_index + 1;
     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
@@ -5872,8 +5548,7 @@
   workers->run_task(&enq_task);
 }
 
-void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
-
+void CMSCollector::refProcessingWork() {
   ResourceMark rm;
   HandleMark   hm;
 
@@ -5881,7 +5556,7 @@
   assert(rp->span().equals(_span), "Spans should be equal");
   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
   // Process weak references.
-  rp->setup_policy(clear_all_soft_refs);
+  rp->setup_policy(false);
   verify_work_stacks_empty();
 
   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
@@ -6005,7 +5680,7 @@
 }
 #endif
 
-void CMSCollector::sweep(bool asynch) {
+void CMSCollector::sweep() {
   assert(_collectorState == Sweeping, "just checking");
   check_correct_thread_executing();
   verify_work_stacks_empty();
@@ -6019,14 +5694,14 @@
   assert(!_intra_sweep_timer.is_active(), "Should not be active");
   _intra_sweep_timer.reset();
   _intra_sweep_timer.start();
-  if (asynch) {
+  {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
     // First sweep the old gen
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
                                bitMapLock());
-      sweepWork(_cmsGen, asynch);
+      sweepWork(_cmsGen);
     }
 
     // Update Universe::_heap_*_at_gc figures.
@@ -6040,13 +5715,6 @@
       Universe::update_heap_info_at_gc();
       _collectorState = Resizing;
     }
-  } else {
-    // already have needed locks
-    sweepWork(_cmsGen,  asynch);
-    // Update heap occupancy information which is used as
-    // input to soft ref clearing policy at the next gc.
-    Universe::update_heap_info_at_gc();
-    _collectorState = Resizing;
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -6139,20 +5807,7 @@
   }
 }
 
-void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
-  }
-  _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
-  _debug_collection_type =
-    (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("to %d ", _debug_collection_type);
-  }
-}
-
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
-  bool asynch) {
+void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
   // We iterate over the space(s) underlying this generation,
   // checking the mark bit map to see if the bits corresponding
   // to specific blocks are marked or not. Blocks that are
@@ -6180,9 +5835,7 @@
 
   // check that we hold the requisite locks
   assert(have_cms_token(), "Should hold cms token");
-  assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
-         || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
-        "Should possess CMS token to sweep");
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
   assert_lock_strong(gen->freelistLock());
   assert_lock_strong(bitMapLock());
 
@@ -6194,8 +5847,7 @@
   gen->setNearLargestChunk();
 
   {
-    SweepClosure sweepClosure(this, gen, &_markBitMap,
-                            CMSYield && asynch);
+    SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
     // We need to free-up/coalesce garbage/blocks from a
     // co-terminal free run. This is done in the SweepClosure
@@ -6213,8 +5865,8 @@
 
 // Reset CMS data structures (for now just the marking bit map)
 // preparatory for the next cycle.
-void CMSCollector::reset(bool asynch) {
-  if (asynch) {
+void CMSCollector::reset(bool concurrent) {
+  if (concurrent) {
     CMSTokenSyncWithLocks ts(true, bitMapLock());
 
     // If the state is not "Resetting", the foreground  thread
@@ -6275,17 +5927,10 @@
     _collectorState = Idling;
   }
 
-  NOT_PRODUCT(
-    if (RotateCMSCollectionTypes) {
-      _cmsGen->rotate_debug_collection_type();
-    }
-  )
-
   register_gc_end();
 }
 
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
-  gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
   TraceCollectorStats tcs(counters());
@@ -6293,7 +5938,7 @@
   switch (op) {
     case CMS_op_checkpointRootsInitial: {
       SvcGCMarker sgcm(SvcGCMarker::OTHER);
-      checkpointRootsInitial(true);       // asynch
+      checkpointRootsInitial();
       if (PrintGC) {
         _cmsGen->printOccupancy("initial-mark");
       }
@@ -6301,9 +5946,7 @@
     }
     case CMS_op_checkpointRootsFinal: {
       SvcGCMarker sgcm(SvcGCMarker::OTHER);
-      checkpointRootsFinal(true,    // asynch
-                           false,   // !clear_all_soft_refs
-                           false);  // !init_mark_was_synchronous
+      checkpointRootsFinal();
       if (PrintGC) {
         _cmsGen->printOccupancy("remark");
       }
@@ -7193,8 +6836,7 @@
                        CMSCollector* collector, MemRegion span,
                        CMSBitMap* bit_map,
                        OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack,
-                       bool should_yield):
+                       CMSMarkStack*  overflow_stack):
   _collector(collector),
   _whole_span(collector->_span),
   _span(span),
@@ -7202,7 +6844,6 @@
   _mut(&collector->_modUnionTable),
   _work_queue(work_queue),
   _overflow_stack(overflow_stack),
-  _yield(should_yield),
   _skip_bits(0),
   _task(task)
 {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -608,7 +608,6 @@
   GCHeapSummary _last_heap_summary;
   MetaspaceSummary _last_metaspace_summary;
 
-  void register_foreground_gc_start(GCCause::Cause cause);
   void register_gc_start(GCCause::Cause cause);
   void register_gc_end();
   void save_heap_summary();
@@ -695,8 +694,6 @@
   int    _numYields;
   size_t _numDirtyCards;
   size_t _sweep_count;
-  // Number of full gc's since the last concurrent gc.
-  uint   _full_gcs_since_conc_gc;
 
   // Occupancy used for bootstrapping stats
   double _bootstrap_occupancy;
@@ -724,7 +721,8 @@
 
  private:
   // Support for parallelizing young gen rescan in CMS remark phase
-  Generation* _young_gen;  // the younger gen
+  ParNewGeneration* _young_gen;  // the younger gen
+
   HeapWord** _top_addr;    // ... Top of Eden
   HeapWord** _end_addr;    // ... End of Eden
   Mutex*     _eden_chunk_lock;
@@ -760,14 +758,14 @@
   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 
   // CMS work methods
-  void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
+  void checkpointRootsInitialWork(); // Initial checkpoint work
 
   // A return value of false indicates failure due to stack overflow
-  bool markFromRootsWork(bool asynch);  // Concurrent marking work
+  bool markFromRootsWork();  // Concurrent marking work
 
  public:   // FIX ME!!! only for testing
-  bool do_marking_st(bool asynch);      // Single-threaded marking
-  bool do_marking_mt(bool asynch);      // Multi-threaded  marking
+  bool do_marking_st();      // Single-threaded marking
+  bool do_marking_mt();      // Multi-threaded  marking
 
  private:
 
@@ -788,20 +786,19 @@
   void reset_survivor_plab_arrays();
 
   // Final (second) checkpoint work
-  void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
-                                bool init_mark_was_synchronous);
+  void checkpointRootsFinalWork();
   // Work routine for parallel version of remark
   void do_remark_parallel();
   // Work routine for non-parallel version of remark
   void do_remark_non_parallel();
   // Reference processing work routine (during second checkpoint)
-  void refProcessingWork(bool asynch, bool clear_all_soft_refs);
+  void refProcessingWork();
 
   // Concurrent sweeping work
-  void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
+  void sweepWork(ConcurrentMarkSweepGeneration* gen);
 
   // (Concurrent) resetting of support data structures
-  void reset(bool asynch);
+  void reset(bool concurrent);
 
   // Clear _expansion_cause fields of constituent generations
   void clear_expansion_cause();
@@ -810,22 +807,10 @@
   // used regions of each generation to limit the extent of sweep
   void save_sweep_limits();
 
-  // A work method used by foreground collection to determine
-  // what type of collection (compacting or not, continuing or fresh)
-  // it should do.
-  void decide_foreground_collection_type(bool clear_all_soft_refs,
-    bool* should_compact, bool* should_start_over);
-
   // A work method used by the foreground collector to do
   // a mark-sweep-compact.
   void do_compaction_work(bool clear_all_soft_refs);
 
-  // A work method used by the foreground collector to do
-  // a mark-sweep, after taking over from a possibly on-going
-  // concurrent mark-sweep collection.
-  void do_mark_sweep_work(bool clear_all_soft_refs,
-    CollectorState first_state, bool should_start_over);
-
   // Work methods for reporting concurrent mode interruption or failure
   bool is_external_interruption();
   void report_concurrent_mode_interruption();
@@ -868,15 +853,13 @@
   // Locking checks
   NOT_PRODUCT(static bool have_cms_token();)
 
-  // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
   bool shouldConcurrentCollect();
 
   void collect(bool   full,
                bool   clear_all_soft_refs,
                size_t size,
                bool   tlab);
-  void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
-  void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
+  void collect_in_background(GCCause::Cause cause);
 
   // In support of ExplicitGCInvokesConcurrent
   static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
@@ -928,18 +911,16 @@
   void directAllocated(HeapWord* start, size_t size);
 
   // Main CMS steps and related support
-  void checkpointRootsInitial(bool asynch);
-  bool markFromRoots(bool asynch);  // a return value of false indicates failure
-                                    // due to stack overflow
+  void checkpointRootsInitial();
+  bool markFromRoots();  // a return value of false indicates failure
+                         // due to stack overflow
   void preclean();
-  void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
-                            bool init_mark_was_synchronous);
-  void sweep(bool asynch);
+  void checkpointRootsFinal();
+  void sweep();
 
   // Check that the currently executing thread is the expected
   // one (foreground collector or background collector).
   static void check_correct_thread_executing() PRODUCT_RETURN;
-  // XXXPERM void print_statistics()           PRODUCT_RETURN;
 
   bool is_cms_reachable(HeapWord* addr);
 
@@ -1060,15 +1041,6 @@
   // In support of MinChunkSize being larger than min object size
   const double _dilatation_factor;
 
-  enum CollectionTypes {
-    Concurrent_collection_type          = 0,
-    MS_foreground_collection_type       = 1,
-    MSC_foreground_collection_type      = 2,
-    Unknown_collection_type             = 3
-  };
-
-  CollectionTypes _debug_collection_type;
-
   // True if a compacting collection was done.
   bool _did_compact;
   bool did_compact() { return _did_compact; }
@@ -1152,7 +1124,7 @@
   // hack to allow the collection of the younger gen first if the flag is
   // set.
   virtual bool full_collects_younger_generations() const {
-    return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
+    return !ScavengeBeforeFullGC;
   }
 
   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
@@ -1180,9 +1152,6 @@
   // Overrides for parallel promotion.
   virtual oop par_promote(int thread_num,
                           oop obj, markOop m, size_t word_sz);
-  // This one should not be called for CMS.
-  virtual void par_promote_alloc_undo(int thread_num,
-                                      HeapWord* obj, size_t word_sz);
   virtual void par_promote_alloc_done(int thread_num);
   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
 
@@ -1285,8 +1254,6 @@
   virtual const char* short_name() const { return "CMS"; }
   void        print() const;
   void printOccupancy(const char* s);
-  bool must_be_youngest() const { return false; }
-  bool must_be_oldest()   const { return true; }
 
   // Resize the generation after a compacting GC.  The
   // generation can be treated as a contiguous space
@@ -1295,9 +1262,6 @@
   // Resize the generation after a non-compacting
   // collection.
   void compute_new_size_free_list();
-
-  CollectionTypes debug_collection_type() { return _debug_collection_type; }
-  void rotate_debug_collection_type();
 };
 
 //
@@ -1344,7 +1308,6 @@
   CMSBitMap*     _mut;
   OopTaskQueue*  _work_queue;
   CMSMarkStack*  _overflow_stack;
-  bool           _yield;
   int            _skip_bits;
   HeapWord*      _finger;
   HeapWord*      _threshold;
@@ -1354,8 +1317,7 @@
                        MemRegion span,
                        CMSBitMap* bit_map,
                        OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack,
-                       bool should_yield);
+                       CMSMarkStack*  overflow_stack);
   bool do_bit(size_t offset);
   inline void do_yield_check();
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -29,8 +29,9 @@
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
+#include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/shared/gcUtil.hpp"
-#include "memory/defNewGeneration.hpp"
+#include "memory/genCollectedHeap.hpp"
 
 inline void CMSBitMap::clear_all() {
   assert_locked();
@@ -257,11 +258,11 @@
 }
 
 inline size_t CMSCollector::get_eden_used() const {
-  return _young_gen->as_DefNewGeneration()->eden()->used();
+  return _young_gen->eden()->used();
 }
 
 inline size_t CMSCollector::get_eden_capacity() const {
-  return _young_gen->as_DefNewGeneration()->eden()->capacity();
+  return _young_gen->eden()->capacity();
 }
 
 inline bool CMSStats::valid() const {
@@ -398,8 +399,7 @@
 
 inline void Par_MarkFromRootsClosure::do_yield_check() {
   if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
+      !_collector->foregroundGCIsActive()) {
     do_yield_work();
   }
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -134,7 +134,7 @@
     if (_should_terminate) break;
     GCCause::Cause cause = _collector->_full_gc_requested ?
       _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
-    _collector->collect_in_background(false, cause);
+    _collector->collect_in_background(cause);
   }
   assert(_should_terminate, "just checking");
   // Check that the state of any protocol for synchronization
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -42,8 +42,12 @@
 void VM_CMS_Operation::acquire_pending_list_lock() {
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
-  ConcurrentMarkSweepThread::slt()->
-    manipulatePLL(SurrogateLockerThread::acquirePLL);
+  SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
+  if (slt != NULL) {
+    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
+  } else {
+    SurrogateLockerThread::report_missing_slt();
+  }
 }
 
 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -180,9 +180,32 @@
   }
 };
 
+class ParClearNextMarkBitmapTask : public AbstractGangTask {
+  ClearBitmapHRClosure* _cl;
+  HeapRegionClaimer     _hrclaimer;
+  bool                  _suspendible; // If the task is suspendible, workers must join the STS.
+
+public:
+  ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
+      _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
+
+  void work(uint worker_id) {
+    if (_suspendible) {
+      SuspendibleThreadSet::join();
+    }
+    G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
+    if (_suspendible) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+};
+
 void CMBitMap::clearAll() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
-  G1CollectedHeap::heap()->heap_region_iterate(&cl);
+  uint n_workers = g1h->workers()->active_workers();
+  ParClearNextMarkBitmapTask task(&cl, n_workers, false);
+  g1h->workers()->run_task(&task);
   guarantee(cl.complete(), "Must have completed iteration.");
   return;
 }
@@ -861,7 +884,8 @@
   guarantee(!g1h->mark_in_progress(), "invariant");
 
   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
-  g1h->heap_region_iterate(&cl);
+  ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
+  _parallel_workers->run_task(&task);
 
   // Clear the liveness counting data. If the marking has been aborted, the abort()
   // call already did that.
@@ -2099,6 +2123,7 @@
   // We reclaimed old regions so we should calculate the sizes to make
   // sure we update the old gen/space data.
   g1h->g1mm()->update_sizes();
+  g1h->allocation_context_stats().update_after_mark();
 
   g1h->trace_heap_after_concurrent_cycle();
 }
@@ -3219,7 +3244,6 @@
   _g1h->set_par_threads(n_workers);
   _g1h->workers()->run_task(&g1_par_agg_task);
   _g1h->set_par_threads(0);
-  _g1h->allocation_context_stats().update_at_remark();
 }
 
 // Clear the per-worker arrays used to store the per-region counting data
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -280,7 +280,6 @@
       // We may have aborted just before the remark. Do not bother clearing the
       // bitmap then, as it has been done during mark abort.
       if (!cm()->has_aborted()) {
-        SuspendibleThreadSetJoiner sts;
         _cm->clearNextBitmap();
       } else {
         assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
--- a/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -45,7 +45,7 @@
 public:
   inline void clear() { }
   inline void update(bool full_gc) { }
-  inline void update_at_remark() { }
+  inline void update_after_mark() { }
   inline bool available() { return false; }
 };
 
--- a/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -59,7 +59,7 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->is_humongous()) {
-    retained_region->record_top_and_timestamp();
+    retained_region->record_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
@@ -94,6 +94,9 @@
   // want either way so no reason to check explicitly for either
   // condition.
   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+  if (_retained_old_gc_alloc_region != NULL) {
+    _retained_old_gc_alloc_region->record_retained_region();
+  }
 
   if (ResizePLAB) {
     _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1222,7 +1222,6 @@
 
     // Timing
     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
-    gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
@@ -1888,7 +1887,7 @@
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // Create the gen rem set (and barrier set) for the entire reserved region.
-  _rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
+  _rem_set = collector_policy()->create_rem_set(reserved_region());
   set_barrier_set(rem_set()->bs());
   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
@@ -2258,6 +2257,7 @@
     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
     case GCCause::_update_allocation_context_stats_inc: return true;
+    case GCCause::_wb_conc_mark:            return true;
     default:                                return false;
   }
 }
@@ -2552,8 +2552,9 @@
 void
 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
                                          uint worker_id,
-                                         HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, worker_id, hrclaimer);
+                                         HeapRegionClaimer *hrclaimer,
+                                         bool concurrent) const {
+  _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
 }
 
 // Clear the cached CSet starting regions and (more importantly)
@@ -4270,10 +4271,11 @@
 
   if (state == G1CollectedHeap::InCSet) {
     oop forwardee;
-    if (obj->is_forwarded()) {
-      forwardee = obj->forwardee();
+    markOop m = obj->mark();
+    if (m->is_marked()) {
+      forwardee = (oop) m->decode_pointer();
     } else {
-      forwardee = _par_scan_state->copy_to_survivor_space(obj);
+      forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
@@ -6529,7 +6531,7 @@
       // We really only need to do this for old regions given that we
       // should never scan survivors. But it doesn't hurt to do it
       // for survivors too.
-      new_alloc_region->record_top_and_timestamp();
+      new_alloc_region->record_timestamp();
       if (survivor) {
         new_alloc_region->set_survivor();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1380,10 +1380,13 @@
   // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
   // to each of the regions, by attempting to claim the region using the
   // HeapRegionClaimer and, if successful, applying the closure to the claimed
-  // region.
+  // region. The concurrent argument should be set to true if iteration is
+  // performed concurrently, during which no assumptions are made for consistent
+  // attributes of the heap regions (as they might be modified while iterating).
   void heap_region_par_iterate(HeapRegionClosure* cl,
                                uint worker_id,
-                               HeapRegionClaimer* hrclaimer) const;
+                               HeapRegionClaimer* hrclaimer,
+                               bool concurrent = false) const;
 
   // Clear the cached cset start regions and (more importantly)
   // the time stamps. Called when we reset the GC time stamp.
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -1425,6 +1425,18 @@
 #endif // PRODUCT
 }
 
+bool G1CollectorPolicy::is_young_list_full() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_target_length = _young_list_target_length;
+  return young_list_length >= young_list_target_length;
+}
+
+bool G1CollectorPolicy::can_expand_young_list() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_max_length = _young_list_max_length;
+  return young_list_length < young_list_max_length;
+}
+
 uint G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
 
 #include "gc_implementation/g1/collectionSetChooser.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
 #include "gc_implementation/g1/g1MMUTracker.hpp"
 #include "memory/collectorPolicy.hpp"
 
@@ -807,7 +808,7 @@
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
-  size_t expansion_amount();
+  virtual size_t expansion_amount();
 
   // Print tracing information.
   void print_tracing_info() const;
@@ -826,17 +827,9 @@
 
   size_t young_list_target_length() const { return _young_list_target_length; }
 
-  bool is_young_list_full() {
-    uint young_list_length = _g1->young_list()->length();
-    uint young_list_target_length = _young_list_target_length;
-    return young_list_length >= young_list_target_length;
-  }
+  bool is_young_list_full();
 
-  bool can_expand_young_list() {
-    uint young_list_length = _g1->young_list()->length();
-    uint young_list_max_length = _young_list_max_length;
-    return young_list_length < young_list_max_length;
-  }
+  bool can_expand_young_list();
 
   uint young_list_max_length() {
     return _young_list_max_length;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy_ext.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
+
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+
+class G1CollectorPolicyExt : public G1CollectorPolicy { };
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -150,7 +150,8 @@
   } while (!_refs->is_empty());
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
+oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
+                                                 markOop const old_mark) {
   size_t word_sz = old->size();
   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
@@ -158,9 +159,8 @@
   assert( (from_region->is_young() && young_index >  0) ||
          (!from_region->is_young() && young_index == 0), "invariant" );
   G1CollectorPolicy* g1p = _g1h->g1_policy();
-  markOop m = old->mark();
-  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
-                                           : m->age();
+  uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
+                                                   : old_mark->age();
   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
   AllocationContext_t context = from_region->allocation_context();
@@ -196,30 +196,22 @@
     alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
 
     if (g1p->track_object_age(alloc_purpose)) {
-      // We could simply do obj->incr_age(). However, this causes a
-      // performance issue. obj->incr_age() will first check whether
-      // the object has a displaced mark by checking its mark word;
-      // getting the mark word from the new location of the object
-      // stalls. So, given that we already have the mark word and we
-      // are about to install it anyway, it's better to increase the
-      // age on the mark word, when the object does not have a
-      // displaced mark word. We're not expecting many objects to have
-      // a displaced marked word, so that case is not optimized
-      // further (it could be...) and we simply call obj->incr_age().
-
-      if (m->has_displaced_mark_helper()) {
-        // in this case, we have to install the mark word first,
+      if (age < markOopDesc::max_age) {
+        age++;
+      }
+      if (old_mark->has_displaced_mark_helper()) {
+        // In this case, we have to install the mark word first,
         // otherwise obj looks to be forwarded (the old mark word,
         // which contains the forward pointer, was copied)
-        obj->set_mark(m);
-        obj->incr_age();
+        obj->set_mark(old_mark);
+        markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
+        old_mark->set_displaced_mark_helper(new_mark);
       } else {
-        m = m->incr_age();
-        obj->set_mark(m);
+        obj->set_mark(old_mark->set_age(age));
       }
-      age_table()->add(obj, word_sz);
+      age_table()->add(age, word_sz);
     } else {
-      obj->set_mark(m);
+      obj->set_mark(old_mark);
     }
 
     if (G1StringDedup::is_enabled()) {
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -195,7 +195,7 @@
   inline void dispatch_reference(StarTask ref);
  public:
 
-  oop copy_to_survivor_space(oop const obj);
+  oop copy_to_survivor_space(oop const obj, markOop const old_mark);
 
   void trim_queue();
 
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -41,10 +41,11 @@
   G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
   if (in_cset_state == G1CollectedHeap::InCSet) {
     oop forwardee;
-    if (obj->is_forwarded()) {
-      forwardee = obj->forwardee();
+    markOop m = obj->mark();
+    if (m->is_marked()) {
+      forwardee = (oop) m->decode_pointer();
     } else {
-      forwardee = copy_to_survivor_space(obj);
+      forwardee = copy_to_survivor_space(obj, m);
     }
     oopDesc::encode_store_heap_oop(p, forwardee);
   } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -140,11 +140,9 @@
 
     // Set the "from" region in the closure.
     _oc->set_region(r);
-    HeapWord* card_start = _bot_shared->address_for_index(index);
-    HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
-    Space *sp = SharedHeap::heap()->space_containing(card_start);
-    MemRegion sm_region = sp->used_region_at_save_marks();
-    MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
+    MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
+    MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
+    MemRegion mr = pre_gc_allocated.intersection(card_region);
     if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
       // We make the card as "claimed" lazily (so races are possible
       // but they're benign), which reduces the number of duplicate
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -32,9 +32,8 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
-G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
-                                                 int max_covered_regions) :
-    CardTableModRefBSForCTRS(whole_heap, max_covered_regions)
+G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) :
+    CardTableModRefBSForCTRS(whole_heap)
 {
   _kind = G1SATBCT;
 }
@@ -132,9 +131,8 @@
 }
 
 G1SATBCardTableLoggingModRefBS::
-G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
-                               int max_covered_regions) :
-  G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
+G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
+  G1SATBCardTableModRefBS(whole_heap),
   _dcqs(JavaThread::dirty_card_queue_set()),
   _listener()
 {
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -50,8 +50,7 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  G1SATBCardTableModRefBS(MemRegion whole_heap,
-                          int max_covered_regions);
+  G1SATBCardTableModRefBS(MemRegion whole_heap);
 
   bool is_a(BarrierSet::Name bsn) {
     return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn);
@@ -152,8 +151,7 @@
     return ReservedSpace::allocation_align_size_up(number_of_slots);
   }
 
-  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
-                                 int max_covered_regions);
+  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
 
   virtual void initialize() { }
   virtual void initialize(G1RegionToSpaceMapper* mapper);
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -326,7 +326,7 @@
 
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  record_top_and_timestamp();
+  record_timestamp();
 
   assert(mr.end() == orig_end(),
          err_msg("Given region end address " PTR_FORMAT " should match exactly "
@@ -416,9 +416,9 @@
 
   // If we're within a stop-world GC, then we might look at a card in a
   // GC alloc region that extends onto a GC LAB, which may not be
-  // parseable.  Stop such at the "saved_mark" of the region.
+  // parseable.  Stop such at the "scan_top" of the region.
   if (g1h->is_gc_active()) {
-    mr = mr.intersection(used_region_at_save_marks());
+    mr = mr.intersection(MemRegion(bottom(), scan_top()));
   } else {
     mr = mr.intersection(used_region());
   }
@@ -969,7 +969,7 @@
 
 void G1OffsetTableContigSpace::clear(bool mangle_space) {
   set_top(bottom());
-  set_saved_mark_word(bottom());
+  _scan_top = bottom();
   CompactibleSpace::clear(mangle_space);
   reset_bot();
 }
@@ -1001,38 +1001,42 @@
   return _offsets.threshold();
 }
 
-HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
+HeapWord* G1OffsetTableContigSpace::scan_top() const {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
-  if (_gc_time_stamp < g1h->get_gc_time_stamp())
-    return top();
-  else
-    return Space::saved_mark_word();
+  HeapWord* local_top = top();
+  OrderAccess::loadload();
+  const unsigned local_time_stamp = _gc_time_stamp;
+  assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
+  if (local_time_stamp < g1h->get_gc_time_stamp()) {
+    return local_top;
+  } else {
+    return _scan_top;
+  }
 }
 
-void G1OffsetTableContigSpace::record_top_and_timestamp() {
+void G1OffsetTableContigSpace::record_timestamp() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
 
   if (_gc_time_stamp < curr_gc_time_stamp) {
-    // The order of these is important, as another thread might be
-    // about to start scanning this region. If it does so after
-    // set_saved_mark and before _gc_time_stamp = ..., then the latter
-    // will be false, and it will pick up top() as the high water mark
-    // of region. If it does so after _gc_time_stamp = ..., then it
-    // will pick up the right saved_mark_word() as the high water mark
-    // of the region. Either way, the behavior will be correct.
-    Space::set_saved_mark_word(top());
-    OrderAccess::storestore();
+    // Setting the time stamp here tells concurrent readers to look at
+    // scan_top to know the maximum allowed address to look at.
+
+    // scan_top should be bottom for all regions except for the
+    // retained old alloc region which should have scan_top == top
+    HeapWord* st = _scan_top;
+    guarantee(st == _bottom || st == _top, "invariant");
+
     _gc_time_stamp = curr_gc_time_stamp;
-    // No need to do another barrier to flush the writes above. If
-    // this is called in parallel with other threads trying to
-    // allocate into the region, the caller should call this while
-    // holding a lock and when the lock is released the writes will be
-    // flushed.
   }
 }
 
+void G1OffsetTableContigSpace::record_retained_region() {
+  // scan_top is the maximum address where it's safe for the next gc to
+  // scan this region.
+  _scan_top = top();
+}
+
 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
 }
@@ -1060,6 +1064,8 @@
 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   CompactibleSpace::initialize(mr, clear_space, mangle_space);
   _top = bottom();
+  _scan_top = bottom();
+  set_saved_mark_word(NULL);
   reset_bot();
 }
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -101,28 +101,25 @@
 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
 // be reconciled, then G1OffsetTableContigSpace could go away.
 
-// The idea behind time stamps is the following. Doing a save_marks on
-// all regions at every GC pause is time consuming (if I remember
-// well, 10ms or so). So, we would like to do that only for regions
-// that are GC alloc regions. To achieve this, we use time
-// stamps. For every evacuation pause, G1CollectedHeap generates a
-// unique time stamp (essentially a counter that gets
-// incremented). Every time we want to call save_marks on a region,
-// we set the saved_mark_word to top and also copy the current GC
-// time stamp to the time stamp field of the space. Reading the
-// saved_mark_word involves checking the time stamp of the
-// region. If it is the same as the current GC time stamp, then we
-// can safely read the saved_mark_word field, as it is valid. If the
-// time stamp of the region is not the same as the current GC time
-// stamp, then we instead read top, as the saved_mark_word field is
-// invalid. Time stamps (on the regions and also on the
-// G1CollectedHeap) are reset at every cleanup (we iterate over
-// the regions anyway) and at the end of a Full GC. The current scheme
-// that uses sequential unsigned ints will fail only if we have 4b
+// The idea behind time stamps is the following. We want to keep track of
+// the highest address where it's safe to scan objects for each region.
+// This is only relevant for current GC alloc regions so we keep a time stamp
+// per region to determine if the region has been allocated during the current
+// GC or not. If the time stamp is current we report a scan_top value which
+// was saved at the end of the previous GC for retained alloc regions and which is
+// equal to the bottom for all other regions.
+// There is a race between card scanners and allocating gc workers where we must ensure
+// that card scanners do not read the memory allocated by the gc workers.
+// In order to enforce that, we must not return a value of _top which is more recent than the
+// time stamp. This is due to the fact that a region may become a gc alloc region at
+// some point after we've read the timestamp value as being < the current time stamp.
+// The time stamps are re-initialized to zero at cleanup and at Full GCs.
+// The current scheme that uses sequential unsigned ints will fail only if we have 4b
 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 class G1OffsetTableContigSpace: public CompactibleSpace {
   friend class VMStructs;
   HeapWord* _top;
+  HeapWord* volatile _scan_top;
  protected:
   G1BlockOffsetArrayContigSpace _offsets;
   Mutex _par_alloc_lock;
@@ -166,10 +163,11 @@
   void set_bottom(HeapWord* value);
   void set_end(HeapWord* value);
 
-  virtual HeapWord* saved_mark_word() const;
-  void record_top_and_timestamp();
+  HeapWord* scan_top() const;
+  void record_timestamp();
   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
+  void record_retained_region();
 
   // See the comment above in the declaration of _pre_dummy_top for an
   // explanation of what it is.
@@ -191,6 +189,8 @@
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
 
+  HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
+
   // MarkSweep support phase3
   virtual HeapWord* initialize_threshold();
   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
--- a/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -260,7 +260,7 @@
   return num_regions;
 }
 
-void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 
   // Every worker will actually look at all regions, skipping over regions that
@@ -279,7 +279,11 @@
     // We'll ignore "continues humongous" regions (we'll process them
     // when we come across their corresponding "start humongous"
     // region) and regions already claimed.
-    if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
+    // However, if the iteration is specified as concurrent, the values for
+    // is_starts_humongous and is_continues_humongous can not be trusted,
+    // and we should just blindly iterate over regions regardless of their
+    // humongous status.
+    if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
       continue;
     }
     // OK, try to claim it
@@ -287,7 +291,9 @@
       continue;
     }
     // Success!
-    if (r->is_starts_humongous()) {
+    // As mentioned above, special treatment of humongous regions can only be
+    // done if we are iterating non-concurrently.
+    if (!concurrent && r->is_starts_humongous()) {
       // If the region is "starts humongous" we'll iterate over its
       // "continues humongous" first; in fact we'll do them
       // first. The order is important. In one case, calling the
--- a/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -222,7 +222,7 @@
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -92,12 +92,8 @@
 
 void VM_G1IncCollectionPause::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  assert(!_should_initiate_conc_mark ||
-  ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
-   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
-    _gc_cause == GCCause::_g1_humongous_allocation ||
-    _gc_cause == GCCause::_update_allocation_context_stats_inc),
-      "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
+  assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
+      "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
@@ -213,8 +209,12 @@
   assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
-  ConcurrentMarkThread::slt()->
-    manipulatePLL(SurrogateLockerThread::acquirePLL);
+  SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
+  if (slt != NULL) {
+    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
+  } else {
+    SurrogateLockerThread::report_missing_slt();
+  }
 }
 
 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
@@ -226,7 +226,6 @@
 }
 
 void VM_CGC_Operation::doit() {
-  gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
   SharedHeap* sh = SharedHeap::heap();
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -39,7 +39,6 @@
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/generation.hpp"
-#include "memory/generation.inline.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/sharedHeap.hpp"
@@ -884,8 +883,6 @@
 
 // A Generation that does parallel young-gen collection.
 
-bool ParNewGeneration::_avoid_promotion_undo = false;
-
 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
   assert(_promo_failure_scan_stack.is_empty(), "post condition");
   _promo_failure_scan_stack.clear(true); // Clear cached segments.
@@ -934,10 +931,6 @@
   assert(gch->n_gens() == 2,
          "Par collection currently only works with single older gen.");
   _next_gen = gch->next_gen(this);
-  // Do we have to avoid promotion_undo?
-  if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
-    set_avoid_promotion_undo(true);
-  }
 
   // If the next generation is too full to accommodate worst-case promotion
   // from this generation, pass on collection; let the next generation
@@ -999,6 +992,11 @@
   thread_state_set.reset(0 /* Bad value in debug if not reset */,
                          promotion_failed());
 
+  // Trace and reset failed promotion info.
+  if (promotion_failed()) {
+    thread_state_set.trace_promotion_failed(gc_tracer);
+  }
+
   // Process (weak) reference objects found during scavenge.
   ReferenceProcessor* rp = ref_processor();
   IsAliveClosure is_alive(this);
@@ -1136,7 +1134,7 @@
 #ifdef ASSERT
 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
   return
-    (_avoid_promotion_undo && p == ClaimedForwardPtr)
+    (p == ClaimedForwardPtr)
     || Universe::heap()->is_in_reserved(p);
 }
 #endif
@@ -1157,7 +1155,7 @@
 // thus avoiding the need to undo the copy as in
 // copy_to_survivor_space_avoiding_with_undo.
 
-oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
+oop ParNewGeneration::copy_to_survivor_space(
         ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
   // In the sequential version, this assert also says that the object is
   // not forwarded.  That might not be the case here.  It is the case that
@@ -1277,131 +1275,6 @@
   return forward_ptr;
 }
 
-
-// Multiple GC threads may try to promote the same object.  If two
-// or more GC threads copy the object, only one wins the race to install
-// the forwarding pointer.  The other threads have to undo their copy.
-
-oop ParNewGeneration::copy_to_survivor_space_with_undo(
-        ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
-
-  // In the sequential version, this assert also says that the object is
-  // not forwarded.  That might not be the case here.  It is the case that
-  // the caller observed it to be not forwarded at some time in the past.
-  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
-
-  // The sequential code read "old->age()" below.  That doesn't work here,
-  // since the age is in the mark word, and that might be overwritten with
-  // a forwarding pointer by a parallel thread.  So we must save the mark
-  // word here, install it in a local oopDesc, and then analyze it.
-  oopDesc dummyOld;
-  dummyOld.set_mark(m);
-  assert(!dummyOld.is_forwarded(),
-         "should not be called with forwarding pointer mark word.");
-
-  bool failed_to_promote = false;
-  oop new_obj = NULL;
-  oop forward_ptr;
-
-  // Try allocating obj in to-space (unless too old)
-  if (dummyOld.age() < tenuring_threshold()) {
-    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
-    if (new_obj == NULL) {
-      set_survivor_overflow(true);
-    }
-  }
-
-  if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote
-    // try allocating obj tenured
-    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
-                                       old, m, sz);
-
-    if (new_obj == NULL) {
-      // promotion failed, forward to self
-      forward_ptr = old->forward_to_atomic(old);
-      new_obj = old;
-
-      if (forward_ptr != NULL) {
-        return forward_ptr;   // someone else succeeded
-      }
-
-      _promotion_failed = true;
-      failed_to_promote = true;
-
-      preserve_mark_if_necessary(old, m);
-      par_scan_state->register_promotion_failure(sz);
-    }
-  } else {
-    // Is in to-space; do copying ourselves.
-    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    // Restore the mark word copied above.
-    new_obj->set_mark(m);
-    // Increment age if new_obj still in new generation
-    new_obj->incr_age();
-    par_scan_state->age_table()->add(new_obj, sz);
-  }
-  assert(new_obj != NULL, "just checking");
-
-#ifndef PRODUCT
-  // This code must come after the CAS test, or it will print incorrect
-  // information.
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-       is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
-  }
-#endif
-
-  // Now attempt to install the forwarding pointer (atomically).
-  // We have to copy the mark word before overwriting with forwarding
-  // ptr, so we can restore it below in the copy.
-  if (!failed_to_promote) {
-    forward_ptr = old->forward_to_atomic(new_obj);
-  }
-
-  if (forward_ptr == NULL) {
-    oop obj_to_push = new_obj;
-    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
-      // Length field used as index of next element to be scanned.
-      // Real length can be obtained from real_forwardee()
-      arrayOop(old)->set_length(0);
-      obj_to_push = old;
-      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
-             "push forwarded object");
-    }
-    // Push it on one of the queues of to-be-scanned objects.
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
-      // Add stats for overflow pushes.
-      push_on_overflow_list(old, par_scan_state);
-      TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
-    }
-
-    return new_obj;
-  }
-
-  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
-  // allocate it?
-  if (is_in_reserved(new_obj)) {
-    // Must be in to_space.
-    assert(to()->is_in_reserved(new_obj), "Checking");
-    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
-  } else {
-    assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
-    _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
-                                      (HeapWord*)new_obj, sz);
-  }
-
-  return forward_ptr;
-}
-
 #ifndef PRODUCT
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -329,9 +329,6 @@
   oop _overflow_list;
   NOT_PRODUCT(ssize_t _num_par_pushes;)
 
-  // If true, older generation does not support promotion undo, so avoid.
-  static bool _avoid_promotion_undo;
-
   // This closure is used by the reference processor to filter out
   // references to live referent.
   DefNewGeneration::IsAliveClosure _is_alive_closure;
@@ -349,9 +346,6 @@
 
   bool _survivor_overflow;
 
-  bool avoid_promotion_undo() { return _avoid_promotion_undo; }
-  void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
-
   bool survivor_overflow() { return _survivor_overflow; }
   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 
@@ -372,7 +366,6 @@
 
   // override
   virtual bool refs_discovery_is_mt()     const {
-    assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
     return ParallelGCThreads > 1;
   }
 
@@ -386,20 +379,7 @@
   // "obj" is the object to be copied, "m" is a recent value of its mark
   // that must not contain a forwarding pointer (though one might be
   // inserted in "obj"s mark word by a parallel thread).
-  inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
-                             oop obj, size_t obj_sz, markOop m) {
-    if (_avoid_promotion_undo) {
-       return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
-                                                             obj, obj_sz, m);
-    }
-
-    return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
-  }
-
-  oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
-                             oop obj, size_t obj_sz, markOop m);
-
-  oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
+  oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
                              oop obj, size_t obj_sz, markOop m);
 
   // in support of testing overflow code
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -28,6 +28,7 @@
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/parNew/parOopClosures.hpp"
 #include "memory/cardTableRS.hpp"
+#include "memory/genCollectedHeap.hpp"
 
 template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
   assert (!oopDesc::is_null(*p), "null weak reference?");
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -53,8 +53,8 @@
     verify_card       = CardTableModRefBS::CT_MR_BS_last_reserved + 5
   };
 
-  CardTableExtension(MemRegion whole_heap, int max_covered_regions) :
-    CardTableModRefBS(whole_heap, max_covered_regions) { }
+  CardTableExtension(MemRegion whole_heap) :
+    CardTableModRefBS(whole_heap) { }
 
   // Too risky for the 4/10/02 putback
   // BarrierSet::Name kind() { return BarrierSet::CardTableExtension; }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -76,7 +76,7 @@
 
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
+  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
   barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -168,7 +168,6 @@
   {
     HandleMark hm;
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -2055,7 +2055,6 @@
     gc_task_manager()->task_idle_workers();
     heap->set_par_threads(gc_task_manager()->active_workers());
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -330,7 +330,6 @@
     ResourceMark rm;
     HandleMark hm;
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/shared/ageTable.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/ageTable.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -55,7 +55,10 @@
 
   // add entry
   void add(oop p, size_t oop_size) {
-    uint age = p->age();
+    add(p->age(), oop_size);
+  }
+
+  void add(uint age, size_t oop_size) {
     assert(age > 0 && age < table_size, "invalid age of object");
     sizes[age] += oop_size;
   }
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -138,6 +138,13 @@
   return res;
 }
 
+void SurrogateLockerThread::report_missing_slt() {
+  vm_exit_during_initialization(
+    "GC before GC support fully initialized: "
+    "SLT is needed but has not yet been created.");
+  ShouldNotReachHere();
+}
+
 void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) {
   MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
   assert(_buffer == empty, "Should be empty");
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -93,6 +93,9 @@
  public:
   static SurrogateLockerThread* make(TRAPS);
 
+  // Terminate VM with error message that SLT needed but not yet created.
+  static void report_missing_slt();
+
   SurrogateLockerThread();
 
   bool is_hidden_from_external_view() const     { return true; }
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -33,8 +33,8 @@
 #include "memory/referenceProcessorStats.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/ticks.inline.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #endif
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -33,12 +33,11 @@
 #include "memory/allocation.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/referenceType.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/ticks.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #endif
-#include "utilities/macros.hpp"
-#include "utilities/ticks.hpp"
-
 
 class EvacuationInfo;
 class GCHeapSummary;
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -31,6 +31,7 @@
 #include "runtime/os.hpp"
 #include "trace/tracing.hpp"
 #include "trace/traceBackend.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -49,10 +49,8 @@
   }
 
   if (_doit) {
-    if (PrintGCTimeStamps) {
-      gclog_or_tty->stamp();
-      gclog_or_tty->print(": ");
-    }
+    gclog_or_tty->date_stamp(PrintGCDateStamps);
+    gclog_or_tty->stamp(PrintGCTimeStamps);
     if (PrintGCID) {
       gclog_or_tty->print("#%u: ", gc_id.id());
     }
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -142,216 +142,3 @@
              "FT"[_retained], _retained_filler.start(), _retained_filler.end());
 }
 #endif // !PRODUCT
-
-const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
-MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
-     ((size_t)Generation::GenGrain)/HeapWordSize);
-const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
-MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
-     (size_t)Generation::GenGrain);
-
-ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
-                                                 BlockOffsetSharedArray* bsa) :
-  ParGCAllocBuffer(word_sz),
-  _bsa(bsa),
-  _bt(bsa, MemRegion(_bottom, _hard_end)),
-  _true_end(_hard_end)
-{}
-
-// The buffer comes with its own BOT, with a shared (obviously) underlying
-// BlockOffsetSharedArray. We manipulate this BOT in the normal way
-// as we would for any contiguous space. However, on occasion we
-// need to do some buffer surgery at the extremities before we
-// start using the body of the buffer for allocations. Such surgery
-// (as explained elsewhere) is to prevent allocation on a card that
-// is in the process of being walked concurrently by another GC thread.
-// When such surgery happens at a point that is far removed (to the
-// right of the current allocation point, top), we use the "contig"
-// parameter below to directly manipulate the shared array without
-// modifying the _next_threshold state in the BOT.
-void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
-                                                     bool contig) {
-  CollectedHeap::fill_with_object(mr);
-  if (contig) {
-    _bt.alloc_block(mr.start(), mr.end());
-  } else {
-    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
-  }
-}
-
-HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
-  HeapWord* res = NULL;
-  if (_true_end > _hard_end) {
-    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
-                                      ChunkSizeInBytes) == _hard_end,
-           "or else _true_end should be equal to _hard_end");
-    assert(_retained, "or else _true_end should be equal to _hard_end");
-    assert(_retained_filler.end() <= _top, "INVARIANT");
-    CollectedHeap::fill_with_object(_retained_filler);
-    if (_top < _hard_end) {
-      fill_region_with_block(MemRegion(_top, _hard_end), true);
-    }
-    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
-    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
-    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
-    _top      = _retained_filler.end();
-    _hard_end = next_hard_end;
-    _end      = _hard_end - AlignmentReserve;
-    res       = ParGCAllocBuffer::allocate(word_sz);
-    if (res != NULL) {
-      _bt.alloc_block(res, word_sz);
-    }
-  }
-  return res;
-}
-
-void
-ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
-  ParGCAllocBuffer::undo_allocation(obj, word_sz);
-  // This may back us up beyond the previous threshold, so reset.
-  _bt.set_region(MemRegion(_top, _hard_end));
-  _bt.initialize_threshold();
-}
-
-void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
-  assert(!retain || end_of_gc, "Can only retain at GC end.");
-  if (_retained) {
-    // We're about to make the retained_filler into a block.
-    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
-                                      _retained_filler.end());
-  }
-  // Reset _hard_end to _true_end (and update _end)
-  if (retain && _hard_end != NULL) {
-    assert(_hard_end <= _true_end, "Invariant.");
-    _hard_end = _true_end;
-    _end      = MAX2(_top, _hard_end - AlignmentReserve);
-    assert(_end <= _hard_end, "Invariant.");
-  }
-  _true_end = _hard_end;
-  HeapWord* pre_top = _top;
-
-  ParGCAllocBuffer::retire(end_of_gc, retain);
-  // Now any old _retained_filler is cut back to size, the free part is
-  // filled with a filler object, and top is past the header of that
-  // object.
-
-  if (retain && _top < _end) {
-    assert(end_of_gc && retain, "Or else retain should be false.");
-    // If the lab does not start on a card boundary, we don't want to
-    // allocate onto that card, since that might lead to concurrent
-    // allocation and card scanning, which we don't support.  So we fill
-    // the first card with a garbage object.
-    size_t first_card_index = _bsa->index_for(pre_top);
-    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
-    if (first_card_start < pre_top) {
-      HeapWord* second_card_start =
-        _bsa->inc_by_region_size(first_card_start);
-
-      // Ensure enough room to fill with the smallest block
-      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
-
-      // If the end is already in the first card, don't go beyond it!
-      // Or if the remainder is too small for a filler object, gobble it up.
-      if (_hard_end < second_card_start ||
-          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
-        second_card_start = _hard_end;
-      }
-      if (pre_top < second_card_start) {
-        MemRegion first_card_suffix(pre_top, second_card_start);
-        fill_region_with_block(first_card_suffix, true);
-      }
-      pre_top = second_card_start;
-      _top = pre_top;
-      _end = MAX2(_top, _hard_end - AlignmentReserve);
-    }
-
-    // If the lab does not end on a card boundary, we don't want to
-    // allocate onto that card, since that might lead to concurrent
-    // allocation and card scanning, which we don't support.  So we fill
-    // the last card with a garbage object.
-    size_t last_card_index = _bsa->index_for(_hard_end);
-    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
-    if (last_card_start < _hard_end) {
-
-      // Ensure enough room to fill with the smallest block
-      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
-
-      // If the top is already in the last card, don't go back beyond it!
-      // Or if the remainder is too small for a filler object, gobble it up.
-      if (_top > last_card_start ||
-          pointer_delta(last_card_start, _top) < AlignmentReserve) {
-        last_card_start = _top;
-      }
-      if (last_card_start < _hard_end) {
-        MemRegion last_card_prefix(last_card_start, _hard_end);
-        fill_region_with_block(last_card_prefix, false);
-      }
-      _hard_end = last_card_start;
-      _end      = MAX2(_top, _hard_end - AlignmentReserve);
-      _true_end = _hard_end;
-      assert(_end <= _hard_end, "Invariant.");
-    }
-
-    // At this point:
-    //   1) we had a filler object from the original top to hard_end.
-    //   2) We've filled in any partial cards at the front and back.
-    if (pre_top < _hard_end) {
-      // Now we can reset the _bt to do allocation in the given area.
-      MemRegion new_filler(pre_top, _hard_end);
-      fill_region_with_block(new_filler, false);
-      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
-      // If there's no space left, don't retain.
-      if (_top >= _end) {
-        _retained = false;
-        invalidate();
-        return;
-      }
-      _retained_filler = MemRegion(pre_top, _top);
-      _bt.set_region(MemRegion(_top, _hard_end));
-      _bt.initialize_threshold();
-      assert(_bt.threshold() > _top, "initialize_threshold failed!");
-
-      // There may be other reasons for queries into the middle of the
-      // filler object.  When such queries are done in parallel with
-      // allocation, bad things can happen, if the query involves object
-      // iteration.  So we ensure that such queries do not involve object
-      // iteration, by putting another filler object on the boundaries of
-      // such queries.  One such is the object spanning a parallel card
-      // chunk boundary.
-
-      // "chunk_boundary" is the address of the first chunk boundary less
-      // than "hard_end".
-      HeapWord* chunk_boundary =
-        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
-      assert(chunk_boundary < _hard_end, "Or else above did not work.");
-      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
-             "Consequence of last card handling above.");
-
-      if (_top <= chunk_boundary) {
-        assert(_true_end == _hard_end, "Invariant.");
-        while (_top <= chunk_boundary) {
-          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
-                 "Consequence of last card handling above.");
-          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
-          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
-          _hard_end = chunk_boundary;
-          chunk_boundary -= ChunkSizeInWords;
-        }
-        _end = _hard_end - AlignmentReserve;
-        assert(_top <= _end, "Invariant.");
-        // Now reset the initial filler chunk so it doesn't overlap with
-        // the one(s) inserted above.
-        MemRegion new_filler(pre_top, _hard_end);
-        fill_region_with_block(new_filler, false);
-      }
-    } else {
-      _retained = false;
-      invalidate();
-    }
-  } else {
-    assert(!end_of_gc ||
-           (!_retained && _true_end == _hard_end), "Checking.");
-  }
-  assert(_end <= _hard_end, "Invariant.");
-  assert(_top < _end || _top == _hard_end, "Invariant");
-}
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -216,44 +216,4 @@
   }
 };
 
-class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
-  BlockOffsetArrayContigSpace _bt;
-  BlockOffsetSharedArray*     _bsa;
-  HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
-
-  static const size_t ChunkSizeInWords;
-  static const size_t ChunkSizeInBytes;
-  HeapWord* allocate_slow(size_t word_sz);
-
-  void fill_region_with_block(MemRegion mr, bool contig);
-
-public:
-  ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
-
-  HeapWord* allocate(size_t word_sz) {
-    HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
-    if (res != NULL) {
-      _bt.alloc_block(res, word_sz);
-    } else {
-      res = allocate_slow(word_sz);
-    }
-    return res;
-  }
-
-  void undo_allocation(HeapWord* obj, size_t word_sz);
-
-  virtual void set_buf(HeapWord* buf_start) {
-    ParGCAllocBuffer::set_buf(buf_start);
-    _true_end = _hard_end;
-    _bt.set_region(MemRegion(buf_start, word_sz()));
-    _bt.initialize_threshold();
-  }
-
-  virtual void retire(bool end_of_gc, bool retain);
-
-  MemRegion range() {
-    return MemRegion(_top, _true_end);
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
--- a/src/share/vm/gc_interface/gcCause.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_interface/gcCause.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -54,6 +54,9 @@
     case _wb_young_gc:
       return "WhiteBox Initiated Young GC";
 
+    case _wb_conc_mark:
+      return "WhiteBox Initiated Concurrent Mark";
+
     case _update_allocation_context_stats_inc:
     case _update_allocation_context_stats_full:
       return "Update Allocation Context Stats";
--- a/src/share/vm/gc_interface/gcCause.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/gc_interface/gcCause.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -47,6 +47,7 @@
     _heap_inspection,
     _heap_dump,
     _wb_young_gc,
+    _wb_conc_mark,
     _update_allocation_context_stats_inc,
     _update_allocation_context_stats_full,
 
--- a/src/share/vm/memory/allocation.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/allocation.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -50,8 +50,7 @@
                                  size_t word_size, bool read_only,
                                  MetaspaceObj::Type type, TRAPS) throw() {
   // Klass has it's own operator new
-  return Metaspace::allocate(loader_data, word_size, read_only,
-                             type, CHECK_NULL);
+  return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD);
 }
 
 bool MetaspaceObj::is_shared() const {
--- a/src/share/vm/memory/barrierSet.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/barrierSet.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -49,7 +49,12 @@
     TargetUninitialized = 1
   };
 protected:
-  int _max_covered_regions;
+  // Some barrier sets create tables whose elements correspond to parts of
+  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
+  // normally reserve space for such tables, and commit parts of the table
+  // "covering" parts of the heap that are committed. At most one covered
+  // region per generation is needed.
+  static const int _max_covered_regions = 2;
   Name _kind;
 
 public:
@@ -159,18 +164,6 @@
 protected:
   virtual void write_region_work(MemRegion mr) = 0;
 public:
-
-  // Some barrier sets create tables whose elements correspond to parts of
-  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
-  // normally reserve space for such tables, and commit parts of the table
-  // "covering" parts of the heap that are committed.  The constructor is
-  // passed the maximum number of independently committable subregions to
-  // be covered, and the "resize_covered_region" function allows the
-  // sub-parts of the heap to inform the barrier set of changes of their
-  // sizes.
-  BarrierSet(int max_covered_regions) :
-    _max_covered_regions(max_covered_regions) {}
-
   // Inform the BarrierSet that the the covered heap region that starts
   // with "base" has been changed to have the given size (possibly from 0,
   // for initialization.)
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -23,8 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "utilities/macros.hpp"
 #include "gc_implementation/shared/allocationStats.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/freeBlockDictionary.hpp"
@@ -32,7 +32,6 @@
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
--- a/src/share/vm/memory/blockOffsetTable.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/blockOffsetTable.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -251,12 +251,6 @@
   // Return the address indicating the start of the region corresponding to
   // "index" in "_offset_array".
   HeapWord* address_for_index(size_t index) const;
-
-  // Return the address "p" incremented by the size of
-  // a region.  This method does not align the address
-  // returned to the start of a region.  It is a simple
-  // primitive.
-  HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
 };
 
 //////////////////////////////////////////////////////////////////////////
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -53,9 +53,8 @@
   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
 }
 
-CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
-                                     int max_covered_regions):
-  ModRefBarrierSet(max_covered_regions),
+CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap) :
+  ModRefBarrierSet(),
   _whole_heap(whole_heap),
   _guard_index(0),
   _guard_region(),
@@ -463,19 +462,6 @@
     // equal to active_workers.  When a different mechanism for shutting
     // off parallelism is used, then active_workers can be used in
     // place of n_par_threads.
-    //  This is an example of a path where n_par_threads is
-    // set to 0 to turn off parallelism.
-    //  [7] CardTableModRefBS::non_clean_card_iterate()
-    //  [8] CardTableRS::younger_refs_in_space_iterate()
-    //  [9] Generation::younger_refs_in_space_iterate()
-    //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
-    //  [11] CompactingPermGenGen::younger_refs_iterate()
-    //  [12] CardTableRS::younger_refs_iterate()
-    //  [13] SharedHeap::process_strong_roots()
-    //  [14] G1CollectedHeap::verify()
-    //  [15] Universe::verify()
-    //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
-    //
     int n_threads =  SharedHeap::heap()->n_par_threads();
     bool is_par = n_threads > 0;
     if (is_par) {
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -284,7 +284,7 @@
     return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
   }
 
-  CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
+  CardTableModRefBS(MemRegion whole_heap);
   ~CardTableModRefBS();
 
   virtual void initialize();
@@ -466,11 +466,6 @@
   void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
   void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
-
-  static size_t par_chunk_heapword_alignment() {
-    return ParGCCardsPerStrideChunk * card_size_in_words;
-  }
-
 };
 
 class CardTableRS;
@@ -482,9 +477,8 @@
   bool card_will_be_scanned(jbyte cv);
   bool card_may_have_been_dirty(jbyte cv);
 public:
-  CardTableModRefBSForCTRS(MemRegion whole_heap,
-                           int max_covered_regions) :
-    CardTableModRefBS(whole_heap, max_covered_regions) {}
+  CardTableModRefBSForCTRS(MemRegion whole_heap) :
+    CardTableModRefBS(whole_heap) {}
 
   void set_CTRS(CardTableRS* rs) { _rs = rs; }
 };
--- a/src/share/vm/memory/cardTableRS.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/cardTableRS.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -38,21 +38,18 @@
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif // INCLUDE_ALL_GCS
 
-CardTableRS::CardTableRS(MemRegion whole_heap,
-                         int max_covered_regions) :
+CardTableRS::CardTableRS(MemRegion whole_heap) :
   GenRemSet(),
-  _cur_youngergen_card_val(youngergenP1_card),
-  _regions_to_iterate(max_covered_regions - 1)
+  _cur_youngergen_card_val(youngergenP1_card)
 {
 #if INCLUDE_ALL_GCS
   if (UseG1GC) {
-      _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap,
-                                                  max_covered_regions);
+      _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap);
   } else {
-    _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
+    _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
   }
 #else
-  _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
+  _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
 #endif
   _ct_bs->initialize();
   set_bs(_ct_bs);
@@ -286,14 +283,14 @@
   // Convert the assertion check to a warning if we are running
   // CMS+ParNew until related bug is fixed.
   MemRegion ur    = sp->used_region();
-  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
+  assert(ur.contains(urasm) || (UseConcMarkSweepGC),
          err_msg("Did you forget to call save_marks()? "
                  "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
                  "[" PTR_FORMAT ", " PTR_FORMAT ")",
                  p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
   // In the case of CMS+ParNew, issue a warning
   if (!ur.contains(urasm)) {
-    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
+    assert(UseConcMarkSweepGC, "Tautology: see assert above");
     warning("CMS+ParNew: Did you forget to call save_marks()? "
             "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
             "[" PTR_FORMAT ", " PTR_FORMAT ")",
@@ -612,21 +609,3 @@
     _ct_bs->verify();
     }
   }
-
-
-void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
-  if (!mr.is_empty()) {
-    jbyte* cur_entry = byte_for(mr.start());
-    jbyte* limit = byte_after(mr.last());
-    // The region mr may not start on a card boundary so
-    // the first card may reflect a write to the space
-    // just prior to mr.
-    if (!is_aligned(mr.start())) {
-      cur_entry++;
-    }
-    for (;cur_entry < limit; cur_entry++) {
-      guarantee(*cur_entry == CardTableModRefBS::clean_card,
-                "Unexpected dirty card found");
-    }
-  }
-}
--- a/src/share/vm/memory/cardTableRS.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/cardTableRS.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -83,7 +83,8 @@
 
   jbyte _cur_youngergen_card_val;
 
-  int _regions_to_iterate;
+  // Number of generations, plus one for lingering PermGen issues in CardTableRS.
+  static const int _regions_to_iterate = 3;
 
   jbyte cur_youngergen_card_val() {
     return _cur_youngergen_card_val;
@@ -101,7 +102,7 @@
   jbyte find_unused_youngergenP_card_value();
 
 public:
-  CardTableRS(MemRegion whole_heap, int max_covered_regions);
+  CardTableRS(MemRegion whole_heap);
   ~CardTableRS();
 
   // *** GenRemSet functions.
@@ -137,7 +138,6 @@
   }
 
   void verify();
-  void verify_aligned_region_empty(MemRegion mr);
 
   void clear(MemRegion mr) { _ct_bs->clear(mr); }
   void clear_into_younger(Generation* old_gen);
--- a/src/share/vm/memory/collectorPolicy.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -152,9 +152,8 @@
   return result;
 }
 
-GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
-                                           int max_covered_regions) {
-  return new CardTableRS(whole_heap, max_covered_regions);
+GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
+  return new CardTableRS(whole_heap);
 }
 
 void CollectorPolicy::cleared_all_soft_refs() {
@@ -909,31 +908,14 @@
 }
 
 void MarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
-    AllocFailStrategy::RETURN_NULL);
-  if (_generations == NULL) {
-    vm_exit_during_initialization("Unable to allocate gen spec");
-  }
-
-  if (UseParNewGC) {
-    _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
-  } else {
-    _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
-  }
+  _generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
+  _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
   _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size);
-
-  if (_generations[0] == NULL || _generations[1] == NULL) {
-    vm_exit_during_initialization("Unable to allocate gen spec");
-  }
 }
 
 void MarkSweepPolicy::initialize_gc_policy_counters() {
   // Initialize the policy counters - 2 collectors, 3 generations.
-  if (UseParNewGC) {
-    _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
-  } else {
-    _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
-  }
+  _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
 }
 
 /////////////// Unit tests ///////////////
--- a/src/share/vm/memory/collectorPolicy.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/collectorPolicy.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -152,10 +152,7 @@
 
   virtual BarrierSet::Name barrier_set_name() = 0;
 
-  // Create the remembered set (to cover the given reserved region,
-  // allowing breaking up into at most "max_covered_regions").
-  virtual GenRemSet* create_rem_set(MemRegion reserved,
-                                    int max_covered_regions);
+  virtual GenRemSet* create_rem_set(MemRegion reserved);
 
   // This method controls how a collector satisfies a request
   // for a block of memory.  "gc_time_limit_was_exceeded" will
--- a/src/share/vm/memory/defNewGeneration.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/defNewGeneration.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -29,7 +29,6 @@
 #include "gc_implementation/shared/cSpaceCounters.hpp"
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
-#include "memory/generation.inline.hpp"
 #include "utilities/stack.hpp"
 
 class ContiguousSpace;
@@ -340,9 +339,6 @@
   virtual const char* name() const;
   virtual const char* short_name() const { return "DefNew"; }
 
-  bool must_be_youngest() const { return true; }
-  bool must_be_oldest() const { return false; }
-
   // PrintHeapAtGC support.
   void print_on(outputStream* st) const;
 
--- a/src/share/vm/memory/filemap.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/filemap.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -217,9 +217,14 @@
           EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
           SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
         } else {
-          ent->_filesize  = -1;
-          if (!os::dir_is_empty(name)) {
-            ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
+          struct stat st;
+          if ((os::stat(name, &st) == 0) && ((st.st_mode & S_IFDIR) == S_IFDIR)) {
+            if (!os::dir_is_empty(name)) {
+              ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
+            }
+            ent->_filesize = -1;
+          } else {
+            ent->_filesize = -2;
           }
         }
         ent->_name = strptr;
@@ -271,7 +276,7 @@
         fail_continue("directory is not empty: %s", name);
         ok = false;
       }
-    } else {
+    } else if (ent->is_jar()) {
       if (ent->_timestamp != st.st_mtime ||
           ent->_filesize != st.st_size) {
         ok = false;
--- a/src/share/vm/memory/filemap.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/filemap.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -44,8 +44,11 @@
 class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
 public:
   const char *_name;
-  time_t _timestamp;          // jar timestamp,  0 if is directory
-  long   _filesize;           // jar file size, -1 if is directory
+  time_t _timestamp;          // jar timestamp,  0 if is directory or other
+  long   _filesize;           // jar file size, -1 if is directory, -2 if other
+  bool is_jar() {
+    return _timestamp != 0;
+  }
   bool is_dir() {
     return _filesize == -1;
   }
--- a/src/share/vm/memory/freeBlockDictionary.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/freeBlockDictionary.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -23,14 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/freeBlockDictionary.hpp"
+#include "memory/metachunk.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
-#include "memory/freeBlockDictionary.hpp"
-#include "memory/metachunk.hpp"
-#include "runtime/thread.inline.hpp"
-#include "utilities/macros.hpp"
 
 #ifndef PRODUCT
 template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
--- a/src/share/vm/memory/freeList.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/freeList.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -31,7 +31,6 @@
 #include "runtime/mutex.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/macros.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/genCollectedHeap.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -36,7 +36,6 @@
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genOopClosures.inline.hpp"
-#include "memory/generation.inline.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/sharedHeap.hpp"
@@ -109,13 +108,11 @@
 
   char* heap_address;
   size_t total_reserved = 0;
-  int n_covered_regions = 0;
   ReservedSpace heap_rs;
 
   size_t heap_alignment = collector_policy()->heap_alignment();
 
-  heap_address = allocate(heap_alignment, &total_reserved,
-                          &n_covered_regions, &heap_rs);
+  heap_address = allocate(heap_alignment, &total_reserved, &heap_rs);
 
   if (!heap_rs.is_reserved()) {
     vm_shutdown_during_initialization(
@@ -125,7 +122,7 @@
 
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
+  _rem_set = collector_policy()->create_rem_set(reserved_region());
   set_barrier_set(rem_set()->bs());
 
   _gch = this;
@@ -152,14 +149,12 @@
 
 char* GenCollectedHeap::allocate(size_t alignment,
                                  size_t* _total_reserved,
-                                 int* _n_covered_regions,
                                  ReservedSpace* heap_rs){
   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
     "the maximum representable size";
 
   // Now figure out the total size.
   size_t total_reserved = 0;
-  int n_covered_regions = 0;
   const size_t pageSize = UseLargePages ?
       os::large_page_size() : os::vm_page_size();
 
@@ -170,18 +165,12 @@
     if (total_reserved < _gen_specs[i]->max_size()) {
       vm_exit_during_initialization(overflow_msg);
     }
-    n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
   assert(total_reserved % alignment == 0,
          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
                  SIZE_FORMAT, total_reserved, alignment));
 
-  // Needed until the cardtable is fixed to have the right number
-  // of covered regions.
-  n_covered_regions += 2;
-
   *_total_reserved = total_reserved;
-  *_n_covered_regions = n_covered_regions;
 
   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   return heap_rs->base();
@@ -192,10 +181,10 @@
   SharedHeap::post_initialize();
   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
   guarantee(policy->is_generation_policy(), "Illegal policy type");
-  DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
-  assert(def_new_gen->kind() == Generation::DefNew ||
-         def_new_gen->kind() == Generation::ParNew,
-         "Wrong generation kind");
+  assert((get_gen(0)->kind() == Generation::DefNew) ||
+         (get_gen(0)->kind() == Generation::ParNew),
+    "Wrong youngest generation type");
+  DefNewGeneration* def_new_gen = (DefNewGeneration*)get_gen(0);
 
   Generation* old_gen = get_gen(1);
   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
@@ -373,7 +362,6 @@
 
     bool complete = full && (max_level == (n_gens()-1));
     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
     // so we can assume here that the next GC id is what we want.
@@ -1128,10 +1116,8 @@
 
 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
 #if INCLUDE_ALL_GCS
-  if (UseParNewGC) {
+  if (UseConcMarkSweepGC) {
     workers()->print_worker_threads_on(st);
-  }
-  if (UseConcMarkSweepGC) {
     ConcurrentMarkSweepThread::print_all_on(st);
   }
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/genCollectedHeap.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -121,9 +121,7 @@
 
   // Returns JNI_OK on success
   virtual jint initialize();
-  char* allocate(size_t alignment,
-                 size_t* _total_reserved, int* _n_covered_regions,
-                 ReservedSpace* heap_rs);
+  char* allocate(size_t alignment, size_t* _total_reserved, ReservedSpace* heap_rs);
 
   // Does operations required after initialization has been done.
   void post_initialize();
@@ -264,12 +262,12 @@
 
   // We don't need barriers for stores to objects in the
   // young gen and, a fortiori, for initializing stores to
-  // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
+  // objects therein. This applies to DefNew+Tenured and ParNew+CMS
   // only and may need to be re-examined in case other
   // kinds of collectors are implemented in the future.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
     // We wanted to assert that:-
-    // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
+    // assert(UseSerialGC || UseConcMarkSweepGC,
     //       "Check can_elide_initializing_store_barrier() for this collector");
     // but unfortunately the flag UseSerialGC need not necessarily always
     // be set when DefNew+Tenured are being used.
--- a/src/share/vm/memory/genMarkSweep.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/genMarkSweep.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -37,7 +37,6 @@
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genMarkSweep.hpp"
 #include "memory/genOopClosures.inline.hpp"
-#include "memory/generation.inline.hpp"
 #include "memory/modRefBarrierSet.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/space.hpp"
--- a/src/share/vm/memory/genRemSet.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/genRemSet.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -105,17 +105,6 @@
 
   virtual void verify() = 0;
 
-  // Verify that the remembered set has no entries for
-  // the heap interval denoted by mr.  If there are any
-  // alignment constraints on the remembered set, only the
-  // part of the region that is aligned is checked.
-  //
-  //   alignment boundaries
-  //   +--------+-------+--------+-------+
-  //         [ region mr              )
-  //            [ part checked   )
-  virtual void verify_aligned_region_empty(MemRegion mr) = 0;
-
   // If appropriate, print some information about the remset on "tty".
   virtual void print() {}
 
--- a/src/share/vm/memory/generation.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/generation.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -36,7 +36,6 @@
 #include "memory/genOopClosures.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/generation.hpp"
-#include "memory/generation.inline.hpp"
 #include "memory/space.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
@@ -152,13 +151,6 @@
   return blk.sp != NULL;
 }
 
-DefNewGeneration* Generation::as_DefNewGeneration() {
-  assert((kind() == Generation::DefNew) ||
-         (kind() == Generation::ParNew),
-    "Wrong youngest generation type");
-  return (DefNewGeneration*) this;
-}
-
 Generation* Generation::next_gen() const {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   int next = level() + 1;
@@ -220,12 +212,6 @@
   return NULL;
 }
 
-void Generation::par_promote_alloc_undo(int thread_num,
-                                        HeapWord* obj, size_t word_sz) {
-  // Could do a bad general impl here that gets a lock.  But no.
-  guarantee(false, "No good general implementation.");
-}
-
 Space* Generation::space_containing(const void* p) const {
   GenerationIsInReservedClosure blk(p);
   // Cast away const
@@ -616,252 +602,3 @@
 // Currently nothing to do.
 void CardGeneration::prepare_for_verify() {}
 
-
-void OneContigSpaceCardGeneration::collect(bool   full,
-                                           bool   clear_all_soft_refs,
-                                           size_t size,
-                                           bool   is_tlab) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-
-  SpecializationStats::clear();
-  // Temporarily expand the span of our ref processor, so
-  // refs discovery is over the entire heap, not just this generation
-  ReferenceProcessorSpanMutator
-    x(ref_processor(), gch->reserved_region());
-
-  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
-  gc_timer->register_gc_start();
-
-  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
-  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
-
-  GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
-
-  gc_timer->register_gc_end();
-
-  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
-
-  SpecializationStats::print();
-}
-
-HeapWord*
-OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
-                                                  bool is_tlab,
-                                                  bool parallel) {
-  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
-  if (parallel) {
-    MutexLocker x(ParGCRareEvent_lock);
-    HeapWord* result = NULL;
-    size_t byte_size = word_size * HeapWordSize;
-    while (true) {
-      expand(byte_size, _min_heap_delta_bytes);
-      if (GCExpandToAllocateDelayMillis > 0) {
-        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
-      }
-      result = _the_space->par_allocate(word_size);
-      if ( result != NULL) {
-        return result;
-      } else {
-        // If there's not enough expansion space available, give up.
-        if (_virtual_space.uncommitted_size() < byte_size) {
-          return NULL;
-        }
-        // else try again
-      }
-    }
-  } else {
-    expand(word_size*HeapWordSize, _min_heap_delta_bytes);
-    return _the_space->allocate(word_size);
-  }
-}
-
-bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
-  GCMutexLocker x(ExpandHeap_lock);
-  return CardGeneration::expand(bytes, expand_bytes);
-}
-
-
-void OneContigSpaceCardGeneration::shrink(size_t bytes) {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  size_t size = ReservedSpace::page_align_size_down(bytes);
-  if (size > 0) {
-    shrink_by(size);
-  }
-}
-
-
-size_t OneContigSpaceCardGeneration::capacity() const {
-  return _the_space->capacity();
-}
-
-
-size_t OneContigSpaceCardGeneration::used() const {
-  return _the_space->used();
-}
-
-
-size_t OneContigSpaceCardGeneration::free() const {
-  return _the_space->free();
-}
-
-MemRegion OneContigSpaceCardGeneration::used_region() const {
-  return the_space()->used_region();
-}
-
-size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
-  return _the_space->free();
-}
-
-size_t OneContigSpaceCardGeneration::contiguous_available() const {
-  return _the_space->free() + _virtual_space.uncommitted_size();
-}
-
-bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  bool result = _virtual_space.expand_by(bytes);
-  if (result) {
-    size_t new_word_size =
-       heap_word_size(_virtual_space.committed_size());
-    MemRegion mr(_the_space->bottom(), new_word_size);
-    // Expand card table
-    Universe::heap()->barrier_set()->resize_covered_region(mr);
-    // Expand shared block offset array
-    _bts->resize(new_word_size);
-
-    // Fix for bug #4668531
-    if (ZapUnusedHeapArea) {
-      MemRegion mangle_region(_the_space->end(),
-      (HeapWord*)_virtual_space.high());
-      SpaceMangler::mangle_region(mangle_region);
-    }
-
-    // Expand space -- also expands space's BOT
-    // (which uses (part of) shared array above)
-    _the_space->set_end((HeapWord*)_virtual_space.high());
-
-    // update the space and generation capacity counters
-    update_counters();
-
-    if (Verbose && PrintGC) {
-      size_t new_mem_size = _virtual_space.committed_size();
-      size_t old_mem_size = new_mem_size - bytes;
-      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
-                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-    }
-  }
-  return result;
-}
-
-
-bool OneContigSpaceCardGeneration::grow_to_reserved() {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  bool success = true;
-  const size_t remaining_bytes = _virtual_space.uncommitted_size();
-  if (remaining_bytes > 0) {
-    success = grow_by(remaining_bytes);
-    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
-  }
-  return success;
-}
-
-void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  // Shrink committed space
-  _virtual_space.shrink_by(bytes);
-  // Shrink space; this also shrinks the space's BOT
-  _the_space->set_end((HeapWord*) _virtual_space.high());
-  size_t new_word_size = heap_word_size(_the_space->capacity());
-  // Shrink the shared block offset array
-  _bts->resize(new_word_size);
-  MemRegion mr(_the_space->bottom(), new_word_size);
-  // Shrink the card table
-  Universe::heap()->barrier_set()->resize_covered_region(mr);
-
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _virtual_space.committed_size();
-    size_t old_mem_size = new_mem_size + bytes;
-    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                  name(), old_mem_size/K, new_mem_size/K);
-  }
-}
-
-// Currently nothing to do.
-void OneContigSpaceCardGeneration::prepare_for_verify() {}
-
-
-// Override for a card-table generation with one contiguous
-// space. NOTE: For reasons that are lost in the fog of history,
-// this code is used when you iterate over perm gen objects,
-// even when one uses CDS, where the perm gen has a couple of
-// other spaces; this is because CompactingPermGenGen derives
-// from OneContigSpaceCardGeneration. This should be cleaned up,
-// see CR 6897789..
-void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
-  _the_space->object_iterate(blk);
-}
-
-void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
-                                                 bool usedOnly) {
-  blk->do_space(_the_space);
-}
-
-void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
-  blk->set_generation(this);
-  younger_refs_in_space_iterate(_the_space, blk);
-  blk->reset_generation();
-}
-
-void OneContigSpaceCardGeneration::save_marks() {
-  _the_space->set_saved_mark();
-}
-
-
-void OneContigSpaceCardGeneration::reset_saved_marks() {
-  _the_space->reset_saved_mark();
-}
-
-
-bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
-  return _the_space->saved_mark_at_top();
-}
-
-#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
-                                                                                \
-void OneContigSpaceCardGeneration::                                             \
-oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
-  blk->set_generation(this);                                                    \
-  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
-  blk->reset_generation();                                                      \
-  save_marks();                                                                 \
-}
-
-ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
-
-#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
-
-
-void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
-  _last_gc = WaterMark(the_space(), the_space()->top());
-
-  // update the generation and space performance counters
-  update_counters();
-  if (ZapUnusedHeapArea) {
-    the_space()->check_mangled_unused_area_complete();
-  }
-}
-
-void OneContigSpaceCardGeneration::record_spaces_top() {
-  assert(ZapUnusedHeapArea, "Not mangling unused space");
-  the_space()->set_top_for_allocations();
-}
-
-void OneContigSpaceCardGeneration::verify() {
-  the_space()->verify();
-}
-
-void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
-  Generation::print_on(st);
-  st->print("   the");
-  the_space()->print_on(st);
-}
--- a/src/share/vm/memory/generation.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/generation.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -45,9 +45,7 @@
 //   - ParNewGeneration            - a DefNewGeneration that is collected by
 //                                   several threads
 // - CardGeneration                 - abstract class adding offset array behavior
-//   - OneContigSpaceCardGeneration - abstract class holding a single
-//                                    contiguous space with card marking
-//     - TenuredGeneration         - tenured (old object) space (markSweepCompact)
+//   - TenuredGeneration             - tenured (old object) space (markSweepCompact)
 //   - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
 //                                       (Detlefs-Printezis refinement of
 //                                       Boehm-Demers-Schenker)
@@ -55,9 +53,7 @@
 // The system configurations currently allowed are:
 //
 //   DefNewGeneration + TenuredGeneration
-//   DefNewGeneration + ConcurrentMarkSweepGeneration
 //
-//   ParNewGeneration + TenuredGeneration
 //   ParNewGeneration + ConcurrentMarkSweepGeneration
 //
 
@@ -229,10 +225,6 @@
     return _reserved.contains(p);
   }
 
-  // Check that the generation kind is DefNewGeneration or a sub
-  // class of DefNewGeneration and return a DefNewGeneration*
-  DefNewGeneration*  as_DefNewGeneration();
-
   // If some space in the generation contains the given "addr", return a
   // pointer to that space, else return "NULL".
   virtual Space* space_containing(const void* addr) const;
@@ -317,11 +309,6 @@
   virtual oop par_promote(int thread_num,
                           oop obj, markOop m, size_t word_sz);
 
-  // Undo, if possible, the most recent par_promote_alloc allocation by
-  // "thread_num" ("obj", of "word_sz").
-  virtual void par_promote_alloc_undo(int thread_num,
-                                      HeapWord* obj, size_t word_sz);
-
   // Informs the current generation that all par_promote_alloc's in the
   // collection have been completed; any supporting data structures can be
   // reset.  Default is to do nothing.
@@ -517,13 +504,6 @@
 
   int level() const { return _level; }
 
-  // Attributes
-
-  // True iff the given generation may only be the youngest generation.
-  virtual bool must_be_youngest() const = 0;
-  // True iff the given generation may only be the oldest generation.
-  virtual bool must_be_oldest() const = 0;
-
   // Reference Processing accessor
   ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
@@ -657,99 +637,4 @@
   virtual bool grow_to_reserved() = 0;
 };
 
-// OneContigSpaceCardGeneration models a heap of old objects contained in a single
-// contiguous space.
-//
-// Garbage collection is performed using mark-compact.
-
-class OneContigSpaceCardGeneration: public CardGeneration {
-  friend class VMStructs;
-  // Abstractly, this is a subtype that gets access to protected fields.
-  friend class VM_PopulateDumpSharedSpace;
-
- protected:
-  ContiguousSpace*  _the_space;       // actual space holding objects
-  WaterMark  _last_gc;                // watermark between objects allocated before
-                                      // and after last GC.
-
-  // Grow generation with specified size (returns false if unable to grow)
-  virtual bool grow_by(size_t bytes);
-  // Grow generation to reserved size.
-  virtual bool grow_to_reserved();
-  // Shrink generation with specified size (returns false if unable to shrink)
-  void shrink_by(size_t bytes);
-
-  // Allocation failure
-  virtual bool expand(size_t bytes, size_t expand_bytes);
-  void shrink(size_t bytes);
-
-  // Accessing spaces
-  ContiguousSpace* the_space() const { return _the_space; }
-
- public:
-  OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
-                               int level, GenRemSet* remset,
-                               ContiguousSpace* space) :
-    CardGeneration(rs, initial_byte_size, level, remset),
-    _the_space(space)
-  {}
-
-  inline bool is_in(const void* p) const;
-
-  // Space enquiries
-  size_t capacity() const;
-  size_t used() const;
-  size_t free() const;
-
-  MemRegion used_region() const;
-
-  size_t unsafe_max_alloc_nogc() const;
-  size_t contiguous_available() const;
-
-  // Iteration
-  void object_iterate(ObjectClosure* blk);
-  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
-  void younger_refs_iterate(OopsInGenClosure* blk);
-
-  inline CompactibleSpace* first_compaction_space() const;
-
-  virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
-  virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
-
-  // Accessing marks
-  inline WaterMark top_mark();
-  inline WaterMark bottom_mark();
-
-#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)      \
-  void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
-  OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
-  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
-
-  void save_marks();
-  void reset_saved_marks();
-  bool no_allocs_since_save_marks();
-
-  inline size_t block_size(const HeapWord* addr) const;
-
-  inline bool block_is_obj(const HeapWord* addr) const;
-
-  virtual void collect(bool full,
-                       bool clear_all_soft_refs,
-                       size_t size,
-                       bool is_tlab);
-  HeapWord* expand_and_allocate(size_t size,
-                                bool is_tlab,
-                                bool parallel = false);
-
-  virtual void prepare_for_verify();
-
-  virtual void gc_epilogue(bool full);
-
-  virtual void record_spaces_top();
-
-  virtual void verify();
-  virtual void print_on(outputStream* st) const;
-};
-
 #endif // SHARE_VM_MEMORY_GENERATION_HPP
--- a/src/share/vm/memory/generation.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP
-#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP
-
-#include "memory/genCollectedHeap.hpp"
-#include "memory/generation.hpp"
-#include "memory/space.hpp"
-
-bool OneContigSpaceCardGeneration::is_in(const void* p) const {
-  return the_space()->is_in(p);
-}
-
-
-WaterMark OneContigSpaceCardGeneration::top_mark() {
-  return the_space()->top_mark();
-}
-
-CompactibleSpace*
-OneContigSpaceCardGeneration::first_compaction_space() const {
-  return the_space();
-}
-
-HeapWord* OneContigSpaceCardGeneration::allocate(size_t word_size,
-                                                 bool is_tlab) {
-  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
-  return the_space()->allocate(word_size);
-}
-
-HeapWord* OneContigSpaceCardGeneration::par_allocate(size_t word_size,
-                                                     bool is_tlab) {
-  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
-  return the_space()->par_allocate(word_size);
-}
-
-WaterMark OneContigSpaceCardGeneration::bottom_mark() {
-  return the_space()->bottom_mark();
-}
-
-size_t OneContigSpaceCardGeneration::block_size(const HeapWord* addr) const {
-  if (addr < the_space()->top()) return oop(addr)->size();
-  else {
-    assert(addr == the_space()->top(), "non-block head arg to block_size");
-    return the_space()->_end - the_space()->top();
-  }
-}
-
-bool OneContigSpaceCardGeneration::block_is_obj(const HeapWord* addr) const {
-  return addr < the_space()->top();
-}
-
-#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP
--- a/src/share/vm/memory/generationSpec.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/generationSpec.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -59,10 +59,6 @@
     set_init_size(align_size_up(init_size(), alignment));
     set_max_size(align_size_up(max_size(), alignment));
   }
-
-  // Return the number of regions contained in the generation which
-  // might need to be independently covered by a remembered set.
-  virtual int n_covered_regions() const { return 1; }
 };
 
 typedef GenerationSpec* GenerationSpecPtr;
--- a/src/share/vm/memory/heapInspection.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/heapInspection.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -367,7 +367,7 @@
       _csv_format(csv_format), _print_help(print_help),
       _print_class_stats(print_class_stats), _columns(columns) {}
   void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
-  size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN;
+  size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0);
   static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
  private:
   void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
--- a/src/share/vm/memory/metaspace.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/metaspace.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -47,6 +47,7 @@
 #include "services/memoryService.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -1411,7 +1412,7 @@
 
 size_t MetaspaceGC::capacity_until_GC() {
   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
-  assert(value >= MetaspaceSize, "Not initialied properly?");
+  assert(value >= MetaspaceSize, "Not initialized properly?");
   return value;
 }
 
--- a/src/share/vm/memory/metaspaceShared.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/metaspaceShared.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -97,7 +97,7 @@
   static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
   static int preload_and_dump(const char * class_list_path,
                               GrowableArray<Klass*>* class_promote_order,
-                              TRAPS) NOT_CDS_RETURN;
+                              TRAPS) NOT_CDS_RETURN_(0);
 
   static ReservedSpace* shared_rs() {
     CDS_ONLY(return _shared_rs);
--- a/src/share/vm/memory/modRefBarrierSet.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/modRefBarrierSet.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -95,10 +95,6 @@
   // The caller guarantees that "mr" contains no references.  (Perhaps it's
   // objects have been moved elsewhere.)
   virtual void clear(MemRegion mr) = 0;
-
-  // Pass along the argument to the superclass.
-  ModRefBarrierSet(int max_covered_regions) :
-    BarrierSet(max_covered_regions) {}
 };
 
 #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/src/share/vm/memory/oopFactory.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/oopFactory.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -41,20 +41,20 @@
 class oopFactory: AllStatic {
  public:
   // Basic type leaf array allocation
-  static typeArrayOop    new_boolArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::boolArrayKlassObj  ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_charArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::charArrayKlassObj  ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_singleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::singleArrayKlassObj())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_doubleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::doubleArrayKlassObj())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_byteArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::byteArrayKlassObj  ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_shortArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::shortArrayKlassObj ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_intArray   (int length, TRAPS) { return TypeArrayKlass::cast(Universe::intArrayKlassObj   ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_longArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::longArrayKlassObj  ())->allocate(length, CHECK_NULL); }
+  static typeArrayOop    new_boolArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::boolArrayKlassObj  ())->allocate(length, THREAD); }
+  static typeArrayOop    new_charArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::charArrayKlassObj  ())->allocate(length, THREAD); }
+  static typeArrayOop    new_singleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::singleArrayKlassObj())->allocate(length, THREAD); }
+  static typeArrayOop    new_doubleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::doubleArrayKlassObj())->allocate(length, THREAD); }
+  static typeArrayOop    new_byteArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::byteArrayKlassObj  ())->allocate(length, THREAD); }
+  static typeArrayOop    new_shortArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::shortArrayKlassObj ())->allocate(length, THREAD); }
+  static typeArrayOop    new_intArray   (int length, TRAPS) { return TypeArrayKlass::cast(Universe::intArrayKlassObj   ())->allocate(length, THREAD); }
+  static typeArrayOop    new_longArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::longArrayKlassObj  ())->allocate(length, THREAD); }
 
   // create java.lang.Object[]
   static objArrayOop     new_objectArray(int length, TRAPS)  {
     assert(Universe::objectArrayKlassObj() != NULL, "Too early?");
     return ObjArrayKlass::
-      cast(Universe::objectArrayKlassObj())->allocate(length, CHECK_NULL);
+      cast(Universe::objectArrayKlassObj())->allocate(length, THREAD);
   }
 
   static typeArrayOop    new_charArray           (const char* utf8_str,  TRAPS);
--- a/src/share/vm/memory/sharedHeap.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/sharedHeap.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -68,9 +68,7 @@
     vm_exit_during_initialization("Failed necessary allocation.");
   }
   _sh = this;  // ch is static, should be set only once.
-  if (UseParNewGC ||
-      UseG1GC ||
-      (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled || CMSParallelRemarkEnabled) && use_parallel_gc_threads())) {
+  if (UseConcMarkSweepGC || UseG1GC) {
     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
                             /* are_GC_task_threads */true,
                             /* are_ConcurrentGC_threads */false);
--- a/src/share/vm/memory/space.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/space.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -70,15 +70,13 @@
   // Used in support of save_marks()
   HeapWord* _saved_mark_word;
 
-  MemRegionClosure* _preconsumptionDirtyCardClosure;
-
   // A sequential tasks done structure. This supports
   // parallel GC, where we have threads dynamically
   // claiming sub-tasks from a larger parallel task.
   SequentialSubTasksDone _par_seq_tasks;
 
   Space():
-    _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
+    _bottom(NULL), _end(NULL) { }
 
  public:
   // Accessors
@@ -97,11 +95,8 @@
     return (HeapWord*)obj >= saved_mark_word();
   }
 
-  MemRegionClosure* preconsumptionDirtyCardClosure() const {
-    return _preconsumptionDirtyCardClosure;
-  }
-  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
-    _preconsumptionDirtyCardClosure = cl;
+  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
+    return NULL;
   }
 
   // Returns a subregion of the space containing only the allocated objects in
@@ -500,7 +495,6 @@
 // A space in which the free area is contiguous.  It therefore supports
 // faster allocation, and compaction.
 class ContiguousSpace: public CompactibleSpace {
-  friend class OneContigSpaceCardGeneration;
   friend class VMStructs;
   // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
   template <typename SpaceType>
--- a/src/share/vm/memory/tenuredGeneration.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -24,13 +24,15 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
-#include "memory/generation.inline.hpp"
 #include "memory/generationSpec.hpp"
+#include "memory/genMarkSweep.hpp"
+#include "memory/genOopClosures.inline.hpp"
 #include "memory/space.hpp"
-#include "memory/tenuredGeneration.hpp"
+#include "memory/tenuredGeneration.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
@@ -38,8 +40,7 @@
 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                      size_t initial_byte_size, int level,
                                      GenRemSet* remset) :
-  OneContigSpaceCardGeneration(rs, initial_byte_size,
-                               level, remset, NULL)
+  CardGeneration(rs, initial_byte_size, level, remset)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   HeapWord* end    = (HeapWord*) _virtual_space.high();
@@ -64,46 +65,13 @@
   _space_counters = new CSpaceCounters(gen_name, 0,
                                        _virtual_space.reserved_size(),
                                        _the_space, _gen_counters);
-#if INCLUDE_ALL_GCS
-  if (UseParNewGC) {
-    typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
-    _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
-                                      ParallelGCThreads, mtGC);
-    if (_alloc_buffers == NULL)
-      vm_exit_during_initialization("Could not allocate alloc_buffers");
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _alloc_buffers[i] =
-        new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
-      if (_alloc_buffers[i] == NULL)
-        vm_exit_during_initialization("Could not allocate alloc_buffers");
-    }
-  } else {
-    _alloc_buffers = NULL;
-  }
-#endif // INCLUDE_ALL_GCS
-}
-
-
-const char* TenuredGeneration::name() const {
-  return "tenured generation";
 }
 
 void TenuredGeneration::gc_prologue(bool full) {
   _capacity_at_prologue = capacity();
   _used_at_prologue = used();
-  if (VerifyBeforeGC) {
-    verify_alloc_buffers_clean();
-  }
 }
 
-void TenuredGeneration::gc_epilogue(bool full) {
-  if (VerifyAfterGC) {
-    verify_alloc_buffers_clean();
-  }
-  OneContigSpaceCardGeneration::gc_epilogue(full);
-}
-
-
 bool TenuredGeneration::should_collect(bool  full,
                                        size_t size,
                                        bool   is_tlab) {
@@ -149,15 +117,6 @@
   return result;
 }
 
-void TenuredGeneration::collect(bool   full,
-                                bool   clear_all_soft_refs,
-                                size_t size,
-                                bool   is_tlab) {
-  retire_alloc_buffers_before_full_gc();
-  OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
-                                        size, is_tlab);
-}
-
 void TenuredGeneration::compute_new_size() {
   assert_locked_or_safepoint(Heap_lock);
 
@@ -171,6 +130,7 @@
          err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
 }
+
 void TenuredGeneration::update_gc_stats(int current_level,
                                         bool full) {
   // If the next lower level(s) has been collected, gather any statistics
@@ -198,96 +158,6 @@
   }
 }
 
-
-#if INCLUDE_ALL_GCS
-oop TenuredGeneration::par_promote(int thread_num,
-                                   oop old, markOop m, size_t word_sz) {
-
-  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
-  HeapWord* obj_ptr = buf->allocate(word_sz);
-  bool is_lab = true;
-  if (obj_ptr == NULL) {
-#ifndef PRODUCT
-    if (Universe::heap()->promotion_should_fail()) {
-      return NULL;
-    }
-#endif  // #ifndef PRODUCT
-
-    // Slow path:
-    if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
-      // Is small enough; abandon this buffer and start a new one.
-      size_t buf_size = buf->word_sz();
-      HeapWord* buf_space =
-        TenuredGeneration::par_allocate(buf_size, false);
-      if (buf_space == NULL) {
-        buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
-      }
-      if (buf_space != NULL) {
-        buf->retire(false, false);
-        buf->set_buf(buf_space);
-        obj_ptr = buf->allocate(word_sz);
-        assert(obj_ptr != NULL, "Buffer was definitely big enough...");
-      }
-    };
-    // Otherwise, buffer allocation failed; try allocating object
-    // individually.
-    if (obj_ptr == NULL) {
-      obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
-      if (obj_ptr == NULL) {
-        obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
-      }
-    }
-    if (obj_ptr == NULL) return NULL;
-  }
-  assert(obj_ptr != NULL, "program logic");
-  Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
-  oop obj = oop(obj_ptr);
-  // Restore the mark word copied above.
-  obj->set_mark(m);
-  return obj;
-}
-
-void TenuredGeneration::par_promote_alloc_undo(int thread_num,
-                                               HeapWord* obj,
-                                               size_t word_sz) {
-  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
-  if (buf->contains(obj)) {
-    guarantee(buf->contains(obj + word_sz - 1),
-              "should contain whole object");
-    buf->undo_allocation(obj, word_sz);
-  } else {
-    CollectedHeap::fill_with_object(obj, word_sz);
-  }
-}
-
-void TenuredGeneration::par_promote_alloc_done(int thread_num) {
-  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
-  buf->retire(true, ParallelGCRetainPLAB);
-}
-
-void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
-  if (UseParNewGC) {
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
-    }
-  }
-}
-
-// Verify that any retained parallel allocation buffers do not
-// intersect with dirty cards.
-void TenuredGeneration::verify_alloc_buffers_clean() {
-  if (UseParNewGC) {
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
-    }
-  }
-}
-
-#else  // INCLUDE_ALL_GCS
-void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
-void TenuredGeneration::verify_alloc_buffers_clean() {}
-#endif // INCLUDE_ALL_GCS
-
 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   size_t available = max_contiguous_available();
   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
@@ -301,3 +171,244 @@
   }
   return res;
 }
+
+void TenuredGeneration::collect(bool   full,
+                                bool   clear_all_soft_refs,
+                                size_t size,
+                                bool   is_tlab) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  SpecializationStats::clear();
+  // Temporarily expand the span of our ref processor, so
+  // refs discovery is over the entire heap, not just this generation
+  ReferenceProcessorSpanMutator
+    x(ref_processor(), gch->reserved_region());
+
+  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
+  gc_timer->register_gc_start();
+
+  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
+  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
+
+  GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+
+  gc_timer->register_gc_end();
+
+  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
+
+  SpecializationStats::print();
+}
+
+HeapWord*
+TenuredGeneration::expand_and_allocate(size_t word_size,
+                                       bool is_tlab,
+                                       bool parallel) {
+  assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
+  if (parallel) {
+    MutexLocker x(ParGCRareEvent_lock);
+    HeapWord* result = NULL;
+    size_t byte_size = word_size * HeapWordSize;
+    while (true) {
+      expand(byte_size, _min_heap_delta_bytes);
+      if (GCExpandToAllocateDelayMillis > 0) {
+        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
+      }
+      result = _the_space->par_allocate(word_size);
+      if ( result != NULL) {
+        return result;
+      } else {
+        // If there's not enough expansion space available, give up.
+        if (_virtual_space.uncommitted_size() < byte_size) {
+          return NULL;
+        }
+        // else try again
+      }
+    }
+  } else {
+    expand(word_size*HeapWordSize, _min_heap_delta_bytes);
+    return _the_space->allocate(word_size);
+  }
+}
+
+bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
+  GCMutexLocker x(ExpandHeap_lock);
+  return CardGeneration::expand(bytes, expand_bytes);
+}
+
+
+void TenuredGeneration::shrink(size_t bytes) {
+  assert_locked_or_safepoint(ExpandHeap_lock);
+  size_t size = ReservedSpace::page_align_size_down(bytes);
+  if (size > 0) {
+    shrink_by(size);
+  }
+}
+
+
+size_t TenuredGeneration::capacity() const {
+  return _the_space->capacity();
+}
+
+
+size_t TenuredGeneration::used() const {
+  return _the_space->used();
+}
+
+
+size_t TenuredGeneration::free() const {
+  return _the_space->free();
+}
+
+MemRegion TenuredGeneration::used_region() const {
+  return the_space()->used_region();
+}
+
+size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
+  return _the_space->free();
+}
+
+size_t TenuredGeneration::contiguous_available() const {
+  return _the_space->free() + _virtual_space.uncommitted_size();
+}
+
+bool TenuredGeneration::grow_by(size_t bytes) {
+  assert_locked_or_safepoint(ExpandHeap_lock);
+  bool result = _virtual_space.expand_by(bytes);
+  if (result) {
+    size_t new_word_size =
+       heap_word_size(_virtual_space.committed_size());
+    MemRegion mr(_the_space->bottom(), new_word_size);
+    // Expand card table
+    Universe::heap()->barrier_set()->resize_covered_region(mr);
+    // Expand shared block offset array
+    _bts->resize(new_word_size);
+
+    // Fix for bug #4668531
+    if (ZapUnusedHeapArea) {
+      MemRegion mangle_region(_the_space->end(),
+      (HeapWord*)_virtual_space.high());
+      SpaceMangler::mangle_region(mangle_region);
+    }
+
+    // Expand space -- also expands space's BOT
+    // (which uses (part of) shared array above)
+    _the_space->set_end((HeapWord*)_virtual_space.high());
+
+    // update the space and generation capacity counters
+    update_counters();
+
+    if (Verbose && PrintGC) {
+      size_t new_mem_size = _virtual_space.committed_size();
+      size_t old_mem_size = new_mem_size - bytes;
+      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
+                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
+    }
+  }
+  return result;
+}
+
+
+bool TenuredGeneration::grow_to_reserved() {
+  assert_locked_or_safepoint(ExpandHeap_lock);
+  bool success = true;
+  const size_t remaining_bytes = _virtual_space.uncommitted_size();
+  if (remaining_bytes > 0) {
+    success = grow_by(remaining_bytes);
+    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
+  }
+  return success;
+}
+
+void TenuredGeneration::shrink_by(size_t bytes) {
+  assert_locked_or_safepoint(ExpandHeap_lock);
+  // Shrink committed space
+  _virtual_space.shrink_by(bytes);
+  // Shrink space; this also shrinks the space's BOT
+  _the_space->set_end((HeapWord*) _virtual_space.high());
+  size_t new_word_size = heap_word_size(_the_space->capacity());
+  // Shrink the shared block offset array
+  _bts->resize(new_word_size);
+  MemRegion mr(_the_space->bottom(), new_word_size);
+  // Shrink the card table
+  Universe::heap()->barrier_set()->resize_covered_region(mr);
+
+  if (Verbose && PrintGC) {
+    size_t new_mem_size = _virtual_space.committed_size();
+    size_t old_mem_size = new_mem_size + bytes;
+    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                  name(), old_mem_size/K, new_mem_size/K);
+  }
+}
+
+// Currently nothing to do.
+void TenuredGeneration::prepare_for_verify() {}
+
+void TenuredGeneration::object_iterate(ObjectClosure* blk) {
+  _the_space->object_iterate(blk);
+}
+
+void TenuredGeneration::space_iterate(SpaceClosure* blk,
+                                                 bool usedOnly) {
+  blk->do_space(_the_space);
+}
+
+void TenuredGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
+  blk->set_generation(this);
+  younger_refs_in_space_iterate(_the_space, blk);
+  blk->reset_generation();
+}
+
+void TenuredGeneration::save_marks() {
+  _the_space->set_saved_mark();
+}
+
+
+void TenuredGeneration::reset_saved_marks() {
+  _the_space->reset_saved_mark();
+}
+
+
+bool TenuredGeneration::no_allocs_since_save_marks() {
+  return _the_space->saved_mark_at_top();
+}
+
+#define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
+                                                                                \
+void TenuredGeneration::                                                        \
+oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
+  blk->set_generation(this);                                                    \
+  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
+  blk->reset_generation();                                                      \
+  save_marks();                                                                 \
+}
+
+ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
+
+#undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
+
+
+void TenuredGeneration::gc_epilogue(bool full) {
+  _last_gc = WaterMark(the_space(), the_space()->top());
+
+  // update the generation and space performance counters
+  update_counters();
+  if (ZapUnusedHeapArea) {
+    the_space()->check_mangled_unused_area_complete();
+  }
+}
+
+void TenuredGeneration::record_spaces_top() {
+  assert(ZapUnusedHeapArea, "Not mangling unused space");
+  the_space()->set_top_for_allocations();
+}
+
+void TenuredGeneration::verify() {
+  the_space()->verify();
+}
+
+void TenuredGeneration::print_on(outputStream* st)  const {
+  Generation::print_on(st);
+  st->print("   the");
+  the_space()->print_on(st);
+}
--- a/src/share/vm/memory/tenuredGeneration.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/tenuredGeneration.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -31,38 +31,47 @@
 #include "memory/generation.hpp"
 #include "utilities/macros.hpp"
 
-// TenuredGeneration models the heap containing old (promoted/tenured) objects.
+// TenuredGeneration models the heap containing old (promoted/tenured) objects
+// contained in a single contiguous space.
+//
+// Garbage collection is performed using mark-compact.
 
-class ParGCAllocBufferWithBOT;
+class TenuredGeneration: public CardGeneration {
+  friend class VMStructs;
+  // Abstractly, this is a subtype that gets access to protected fields.
+  friend class VM_PopulateDumpSharedSpace;
 
-class TenuredGeneration: public OneContigSpaceCardGeneration {
-  friend class VMStructs;
  protected:
-
-#if INCLUDE_ALL_GCS
-  // To support parallel promotion: an array of parallel allocation
-  // buffers, one per thread, initially NULL.
-  ParGCAllocBufferWithBOT** _alloc_buffers;
-#endif // INCLUDE_ALL_GCS
-
-  // Retire all alloc buffers before a full GC, so that they will be
-  // re-allocated at the start of the next young GC.
-  void retire_alloc_buffers_before_full_gc();
+  ContiguousSpace*  _the_space;       // actual space holding objects
+  WaterMark  _last_gc;                // watermark between objects allocated before
+                                      // and after last GC.
 
   GenerationCounters*   _gen_counters;
   CSpaceCounters*       _space_counters;
 
+  // Grow generation with specified size (returns false if unable to grow)
+  virtual bool grow_by(size_t bytes);
+  // Grow generation to reserved size.
+  virtual bool grow_to_reserved();
+  // Shrink generation with specified size (returns false if unable to shrink)
+  void shrink_by(size_t bytes);
+
+  // Allocation failure
+  virtual bool expand(size_t bytes, size_t expand_bytes);
+  void shrink(size_t bytes);
+
+  // Accessing spaces
+  ContiguousSpace* the_space() const { return _the_space; }
+
  public:
-  TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
-                    GenRemSet* remset);
+  TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
+                               int level, GenRemSet* remset);
 
   Generation::Name kind() { return Generation::MarkSweepCompact; }
 
   // Printing
-  const char* name() const;
+  const char* name() const { return "tenured generation"; }
   const char* short_name() const { return "Tenured"; }
-  bool must_be_youngest() const { return false; }
-  bool must_be_oldest() const { return true; }
 
   // Does a "full" (forced) collection invoked on this generation collect
   // all younger generations as well? Note that this is a
@@ -72,37 +81,78 @@
     return !ScavengeBeforeFullGC;
   }
 
+  inline bool is_in(const void* p) const;
+
+  // Space enquiries
+  size_t capacity() const;
+  size_t used() const;
+  size_t free() const;
+
+  MemRegion used_region() const;
+
+  size_t unsafe_max_alloc_nogc() const;
+  size_t contiguous_available() const;
+
+  // Iteration
+  void object_iterate(ObjectClosure* blk);
+  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
+
+  void younger_refs_iterate(OopsInGenClosure* blk);
+
+  inline CompactibleSpace* first_compaction_space() const;
+
+  virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
+  virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
+
+  // Accessing marks
+  inline WaterMark top_mark();
+  inline WaterMark bottom_mark();
+
+#define TenuredGen_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)     \
+  void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
+  TenuredGen_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
+  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_DECL)
+
+  void save_marks();
+  void reset_saved_marks();
+  bool no_allocs_since_save_marks();
+
+  inline size_t block_size(const HeapWord* addr) const;
+
+  inline bool block_is_obj(const HeapWord* addr) const;
+
+  virtual void collect(bool full,
+                       bool clear_all_soft_refs,
+                       size_t size,
+                       bool is_tlab);
+  HeapWord* expand_and_allocate(size_t size,
+                                bool is_tlab,
+                                bool parallel = false);
+
+  virtual void prepare_for_verify();
+
+
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
   bool should_collect(bool   full,
                       size_t word_size,
                       bool   is_tlab);
 
-  virtual void collect(bool full,
-                       bool clear_all_soft_refs,
-                       size_t size,
-                       bool is_tlab);
   virtual void compute_new_size();
 
-#if INCLUDE_ALL_GCS
-  // Overrides.
-  virtual oop par_promote(int thread_num,
-                          oop obj, markOop m, size_t word_sz);
-  virtual void par_promote_alloc_undo(int thread_num,
-                                      HeapWord* obj, size_t word_sz);
-  virtual void par_promote_alloc_done(int thread_num);
-#endif // INCLUDE_ALL_GCS
-
   // Performance Counter support
   void update_counters();
 
+  virtual void record_spaces_top();
+
   // Statistics
 
   virtual void update_gc_stats(int level, bool full);
 
   virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
 
-  void verify_alloc_buffers_clean();
+  virtual void verify();
+  virtual void print_on(outputStream* st) const;
 };
 
 #endif // SHARE_VM_MEMORY_TENUREDGENERATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/tenuredGeneration.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP
+#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP
+
+#include "memory/genCollectedHeap.hpp"
+#include "memory/space.hpp"
+#include "memory/tenuredGeneration.hpp"
+
+bool TenuredGeneration::is_in(const void* p) const {
+  return the_space()->is_in(p);
+}
+
+
+WaterMark TenuredGeneration::top_mark() {
+  return the_space()->top_mark();
+}
+
+CompactibleSpace*
+TenuredGeneration::first_compaction_space() const {
+  return the_space();
+}
+
+HeapWord* TenuredGeneration::allocate(size_t word_size,
+                                                 bool is_tlab) {
+  assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
+  return the_space()->allocate(word_size);
+}
+
+HeapWord* TenuredGeneration::par_allocate(size_t word_size,
+                                                     bool is_tlab) {
+  assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
+  return the_space()->par_allocate(word_size);
+}
+
+WaterMark TenuredGeneration::bottom_mark() {
+  return the_space()->bottom_mark();
+}
+
+size_t TenuredGeneration::block_size(const HeapWord* addr) const {
+  if (addr < the_space()->top()) return oop(addr)->size();
+  else {
+    assert(addr == the_space()->top(), "non-block head arg to block_size");
+    return the_space()->end() - the_space()->top();
+  }
+}
+
+bool TenuredGeneration::block_is_obj(const HeapWord* addr) const {
+  return addr < the_space()->top();
+}
+
+#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP
--- a/src/share/vm/memory/universe.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/memory/universe.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -26,9 +26,6 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedClassUtil.hpp"
-#endif
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -79,9 +76,12 @@
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #endif // INCLUDE_ALL_GCS
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -799,7 +799,7 @@
 
   } else if (UseG1GC) {
 #if INCLUDE_ALL_GCS
-    G1CollectorPolicy* g1p = new G1CollectorPolicy();
+    G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
     g1p->initialize_all();
     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
     Universe::_collectedHeap = g1h;
--- a/src/share/vm/oops/constantPool.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/constantPool.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -461,7 +461,7 @@
 
 
 Klass* ConstantPool::klass_ref_at(int which, TRAPS) {
-  return klass_at(klass_ref_index_at(which), CHECK_NULL);
+  return klass_at(klass_ref_index_at(which), THREAD);
 }
 
 
--- a/src/share/vm/oops/constantPool.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/constantPool.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -336,13 +336,13 @@
 
   Klass* klass_at(int which, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return klass_at_impl(h_this, which, true, CHECK_NULL);
+    return klass_at_impl(h_this, which, true, THREAD);
   }
 
   // Version of klass_at that doesn't save the resolution error, called during deopt
   Klass* klass_at_ignore_error(int which, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return klass_at_impl(h_this, which, false, CHECK_NULL);
+    return klass_at_impl(h_this, which, false, THREAD);
   }
 
   Symbol* klass_name_at(int which);  // Returns the name, w/o resolving.
--- a/src/share/vm/oops/instanceKlass.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/instanceKlass.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -532,7 +532,7 @@
   // 1) Verify the bytecodes
   Verifier::Mode mode =
     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
-  return Verifier::verify(this_k, mode, this_k->should_verify_class(), CHECK_false);
+  return Verifier::verify(this_k, mode, this_k->should_verify_class(), THREAD);
 }
 
 
@@ -1130,7 +1130,7 @@
   if (or_null) {
     return oak->array_klass_or_null(n);
   }
-  return oak->array_klass(n, CHECK_NULL);
+  return oak->array_klass(n, THREAD);
 }
 
 Klass* InstanceKlass::array_klass_impl(bool or_null, TRAPS) {
--- a/src/share/vm/oops/klass.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/klass.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -152,7 +152,7 @@
 
 void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
   return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
-                             MetaspaceObj::ClassType, CHECK_NULL);
+                             MetaspaceObj::ClassType, THREAD);
 }
 
 Klass::Klass() {
--- a/src/share/vm/oops/method.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/method.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -936,7 +936,7 @@
   // so making them eagerly shouldn't be too expensive.
   AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
   if (adapter == NULL ) {
-    THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "out of space in CodeCache for adapters");
+    THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for adapters");
   }
 
   mh->set_adapter_entry(adapter);
--- a/src/share/vm/oops/methodData.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/methodData.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -658,7 +658,7 @@
   int size = MethodData::compute_allocation_size_in_words(method);
 
   return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
-    MethodData(method(), size, CHECK_NULL);
+    MethodData(method(), size, THREAD);
 }
 
 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
--- a/src/share/vm/oops/objArrayKlass.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/objArrayKlass.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -189,7 +189,7 @@
     if (length <= arrayOopDesc::max_array_length(T_OBJECT)) {
       int size = objArrayOopDesc::object_size(length);
       KlassHandle h_k(THREAD, this);
-      return (objArrayOop)CollectedHeap::array_allocate(h_k, size, length, CHECK_NULL);
+      return (objArrayOop)CollectedHeap::array_allocate(h_k, size, length, THREAD);
     } else {
       report_java_out_of_memory("Requested array size exceeds VM limit");
       JvmtiExport::post_array_size_exhausted();
@@ -362,11 +362,11 @@
   if (or_null) {
     return ak->array_klass_or_null(n);
   }
-  return ak->array_klass(n, CHECK_NULL);
+  return ak->array_klass(n, THREAD);
 }
 
 Klass* ObjArrayKlass::array_klass_impl(bool or_null, TRAPS) {
-  return array_klass_impl(or_null, dimension() +  1, CHECK_NULL);
+  return array_klass_impl(or_null, dimension() +  1, THREAD);
 }
 
 bool ObjArrayKlass::can_be_primary_super_slow() const {
--- a/src/share/vm/oops/oop.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/oop.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -441,12 +441,12 @@
       s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
         HeapWordSize);
 
-      // UseParNewGC, UseParallelGC and UseG1GC can change the length field
+      // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
       // of an "old copy" of an object array in the young gen so it indicates
       // the grey portion of an already copied array. This will cause the first
       // disjunct below to fail if the two comparands are computed across such
       // a concurrent change.
-      // UseParNewGC also runs with promotion labs (which look like int
+      // ParNew also runs with promotion labs (which look like int
       // filler arrays) which are subject to changing their declared size
       // when finally retiring a PLAB; this also can cause the first disjunct
       // to fail for another worker thread that is concurrently walking the block
@@ -458,8 +458,8 @@
       // technique, we will need to suitably modify the assertion.
       assert((s == klass->oop_size(this)) ||
              (Universe::heap()->is_gc_active() &&
-              ((is_typeArray() && UseParNewGC) ||
-               (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
+              ((is_typeArray() && UseConcMarkSweepGC) ||
+               (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
              "wrong array object size");
     } else {
       // Must be zero, so bite the bullet and take the virtual call.
--- a/src/share/vm/oops/oop.pcgc.inline.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/oop.pcgc.inline.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -25,8 +25,8 @@
 #ifndef SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
 #define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
 
+#include "runtime/atomic.inline.hpp"
 #include "utilities/macros.hpp"
-#include "runtime/atomic.inline.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
--- a/src/share/vm/oops/typeArrayKlass.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -191,7 +191,7 @@
   if (or_null) {
     return h_ak->array_klass_or_null(n);
   }
-  return h_ak->array_klass(n, CHECK_NULL);
+  return h_ak->array_klass(n, THREAD);
 }
 
 Klass* TypeArrayKlass::array_klass_impl(bool or_null, TRAPS) {
--- a/src/share/vm/opto/c2_globals.hpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/opto/c2_globals.hpp	Mon Dec 08 00:15:55 2014 -0800
@@ -647,7 +647,7 @@
   develop(bool, AlwaysIncrementalInline, false,                             \
           "do all inlining incrementally")                                  \
                                                                             \
-  product(intx, LiveNodeCountInliningCutoff, 20000,                         \
+  product(intx, LiveNodeCountInliningCutoff, 40000,                         \
           "max number of live nodes in a method")                           \
                                                                             \
   diagnostic(bool, OptimizeExpensiveOps, true,                              \
--- a/src/share/vm/opto/c2compiler.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/opto/c2compiler.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -102,23 +102,25 @@
     // Attempt to compile while subsuming loads into machine instructions.
     Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis, eliminate_boxing);
 
-
     // Check result and retry if appropriate.
     if (C.failure_reason() != NULL) {
       if (C.failure_reason_is(retry_no_subsuming_loads())) {
         assert(subsume_loads, "must make progress");
         subsume_loads = false;
+        env->report_failure(C.failure_reason());
         continue;  // retry
       }
       if (C.failure_reason_is(retry_no_escape_analysis())) {
         assert(do_escape_analysis, "must make progress");
         do_escape_analysis = false;
+        env->report_failure(C.failure_reason());
         continue;  // retry
       }
       if (C.has_boxed_value()) {
         // Recompile without boxing elimination regardless failure reason.
         assert(eliminate_boxing, "must make progress");
         eliminate_boxing = false;
+        env->report_failure(C.failure_reason());
         continue;  // retry
       }
       // Pass any other failure reason up to the ciEnv.
--- a/src/share/vm/opto/castnode.cpp	Sat Dec 06 04:30:00 2014 +0000
+++ b/src/share/vm/opto/castnode.cpp	Mon Dec 08 00:15:55 2014 -0800
@@ -83,6 +83,101 @@
   return this;
 }
 
+uint CastIINode::size_of() const {
+  return sizeof(*this);
+}
+
+uint CastIINode::cmp(const Node &n) const {
+  return TypeNode::cmp(n) && ((CastIINode&)n)._carry_dependency == _carry_dependency;
+}
+
+Node *CastIINode::Identity(PhaseTransform *phase) {
+  if (_carry_dependency) {
+    return this;
+  }
+  return ConstraintCastNode::Identity(phase);
+}
+
+const Type *CastIINode::Value(PhaseTransform *phase) const {
+  const Type *res = ConstraintCastNode::Value(phase);
+
+  // Try to improve the type of the CastII if we recognize a CmpI/If
+  // pattern.
+  if (_carry_dependency) {
+    if (in(0) != NULL && (in(0)->is_IfFalse() || in(0)->is_IfTrue())) {
+      Node* proj = in(0);
+      if (proj->in(0)->in(1)->is_Bool()) {
+        Node* b = proj->in(0)->in(1);
+        if (b->in(1)->Opcode() == Op_CmpI) {
+          Node* cmp = b->in(1);
+          if (cmp->in(1) == in(1) && phase->type(cmp->in(2))->isa_int()) {
+            const TypeInt* in2_t = phase->type(cmp->in(2))->is_int();
+            const Type* t = TypeInt::INT;
+            BoolTest test = b->as_Bool()->_test;
+            if (proj->is_IfFalse()) {
+              test = test.negate();
+            }
+            BoolTest::mask m = test._test;
+            jlong lo_long = min_jint;
+            jlong hi_long = max_jint;
+            if (m == BoolTest::le || m == BoolTest::lt) {
+              hi_long = in2_t->_hi;
+              if (m == BoolTest::lt) {
+                hi_long -= 1;