changeset 5964:e2722a66aba7

Merge
author kvn
date Thu, 05 Sep 2013 11:04:39 -0700
parents bdd155477289 3f4392035ec7
children 7687c56b6693
files make/linux/makefiles/gcc.make src/os/bsd/vm/os_bsd.cpp src/os/linux/vm/os_linux.cpp src/os/posix/vm/os_posix.cpp src/os/solaris/vm/os_solaris.cpp src/os_cpu/bsd_x86/vm/bsd_x86_32.ad src/os_cpu/bsd_x86/vm/bsd_x86_64.ad src/os_cpu/linux_x86/vm/linux_x86_32.ad src/os_cpu/linux_x86/vm/linux_x86_64.ad src/os_cpu/solaris_sparc/vm/solaris_sparc.ad src/os_cpu/solaris_x86/vm/solaris_x86_32.ad src/os_cpu/solaris_x86/vm/solaris_x86_64.ad src/os_cpu/windows_x86/vm/windows_x86_32.ad src/os_cpu/windows_x86/vm/windows_x86_64.ad src/share/vm/code/nmethod.cpp src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp src/share/vm/memory/allocation.hpp src/share/vm/memory/universe.cpp src/share/vm/opto/c2_globals.hpp src/share/vm/opto/compile.cpp src/share/vm/opto/gcm.cpp src/share/vm/opto/lcm.cpp src/share/vm/opto/matcher.cpp src/share/vm/opto/output.cpp src/share/vm/opto/output.hpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/mutexLocker.hpp src/share/vm/runtime/os.cpp src/share/vm/runtime/os.hpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/virtualspace.cpp src/share/vm/runtime/vmStructs.cpp src/share/vm/runtime/vm_version.cpp src/share/vm/utilities/debug.hpp src/share/vm/utilities/globalDefinitions.hpp src/share/vm/utilities/taskqueue.hpp test/runtime/7051189/Xchecksig.sh
diffstat 242 files changed, 7986 insertions(+), 4043 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Aug 22 09:39:54 2013 -0700
+++ b/.hgtags	Thu Sep 05 11:04:39 2013 -0700
@@ -368,3 +368,9 @@
 c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102
 7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45
 6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103
+580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46
+104743074675359cfbf7f4dcd9ab2a5974a16627 jdk8-b104
+c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47
+acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
+18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48
+aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Sep 05 11:04:39 2013 -0700
@@ -75,19 +75,19 @@
     javaFieldsCount      = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
     constants            = new MetadataField(type.getAddressField("_constants"), 0);
     classLoaderData      = type.getAddressField("_class_loader_data");
-    sourceFileName       = type.getAddressField("_source_file_name");
     sourceDebugExtension = type.getAddressField("_source_debug_extension");
     innerClasses         = type.getAddressField("_inner_classes");
+    sourceFileNameIndex  = new CIntField(type.getCIntegerField("_source_file_name_index"), 0);
     nonstaticFieldSize   = new CIntField(type.getCIntegerField("_nonstatic_field_size"), 0);
     staticFieldSize      = new CIntField(type.getCIntegerField("_static_field_size"), 0);
-    staticOopFieldCount   = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0);
+    staticOopFieldCount  = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0);
     nonstaticOopMapSize  = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0);
     isMarkedDependent    = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0);
     initState            = new CIntField(type.getCIntegerField("_init_state"), 0);
     vtableLen            = new CIntField(type.getCIntegerField("_vtable_len"), 0);
     itableLen            = new CIntField(type.getCIntegerField("_itable_len"), 0);
     breakpoints          = type.getAddressField("_breakpoints");
-    genericSignature     = type.getAddressField("_generic_signature");
+    genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0);
     majorVersion         = new CIntField(type.getCIntegerField("_major_version"), 0);
     minorVersion         = new CIntField(type.getCIntegerField("_minor_version"), 0);
     headerSize           = Oop.alignObjectOffset(type.getSize());
@@ -134,9 +134,9 @@
   private static CIntField javaFieldsCount;
   private static MetadataField constants;
   private static AddressField  classLoaderData;
-  private static AddressField  sourceFileName;
   private static AddressField  sourceDebugExtension;
   private static AddressField  innerClasses;
+  private static CIntField sourceFileNameIndex;
   private static CIntField nonstaticFieldSize;
   private static CIntField staticFieldSize;
   private static CIntField staticOopFieldCount;
@@ -146,7 +146,7 @@
   private static CIntField vtableLen;
   private static CIntField itableLen;
   private static AddressField breakpoints;
-  private static AddressField  genericSignature;
+  private static CIntField genericSignatureIndex;
   private static CIntField majorVersion;
   private static CIntField minorVersion;
 
@@ -346,7 +346,7 @@
   public ConstantPool getConstants()        { return (ConstantPool) constants.getValue(this); }
   public ClassLoaderData getClassLoaderData() { return                ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
   public Oop       getClassLoader()         { return                getClassLoaderData().getClassLoader(); }
-  public Symbol    getSourceFileName()      { return getSymbol(sourceFileName); }
+  public Symbol    getSourceFileName()      { return                getConstants().getSymbolAt(sourceFileNameIndex.getValue(this)); }
   public String    getSourceDebugExtension(){ return                CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
   public long      getNonstaticFieldSize()  { return                nonstaticFieldSize.getValue(this); }
   public long      getStaticOopFieldCount() { return                staticOopFieldCount.getValue(this); }
@@ -354,7 +354,7 @@
   public boolean   getIsMarkedDependent()   { return                isMarkedDependent.getValue(this) != 0; }
   public long      getVtableLen()           { return                vtableLen.getValue(this); }
   public long      getItableLen()           { return                itableLen.getValue(this); }
-  public Symbol    getGenericSignature()    { return getSymbol(genericSignature); }
+  public Symbol    getGenericSignature()    { return                getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
   public long      majorVersion()           { return                majorVersion.getValue(this); }
   public long      minorVersion()           { return                minorVersion.getValue(this); }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Thu Sep 05 11:04:39 2013 -0700
@@ -44,7 +44,7 @@
     Type type      = db.lookupType("PhaseCFG");
     numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
     blocksField = type.getAddressField("_blocks");
-    bbsField = type.getAddressField("_bbs");
+    bbsField = type.getAddressField("_node_to_block_mapping");
     brootField = type.getAddressField("_broot");
   }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Thu Sep 05 11:04:39 2013 -0700
@@ -92,8 +92,13 @@
                     System.err.println("Warning: Can not create class filter!");
                 }
             }
-            String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
-            setOutputDirectory(outputDirectory);
+
+            // outputDirectory and jarStream are alternatives: setting one closes the other.
+            // If neither is set, use outputDirectory from the System property:
+            if (outputDirectory == null && jarStream == null) {
+                String dirName = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
+                setOutputDirectory(dirName);
+            }
 
             // walk through the system dictionary
             SystemDictionary dict = VM.getVM().getSystemDictionary();
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Thu Aug 22 09:39:54 2013 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Thu Sep 05 11:04:39 2013 -0700
@@ -35,8 +35,9 @@
 sapkg.code = sapkg.hotspot.code;
 sapkg.compiler = sapkg.hotspot.compiler;
 
-// 'debugger' is a JavaScript keyword :-(
-// sapkg.debugger = sapkg.hotspot.debugger;
+// 'debugger' is a JavaScript keyword, but ES5 relaxes the
+// restriction of using keywords as property name
+sapkg.debugger = sapkg.hotspot.debugger;
 
 sapkg.interpreter = sapkg.hotspot.interpreter;
 sapkg.jdi = sapkg.hotspot.jdi;
@@ -116,27 +117,36 @@
       return args;
     }
 
+    // Handle __has__ specially to avoid metacircularity problems
+    // when called from __get__.
+    // Calling
+    //   this.__has__(name)
+    // will in turn call
+    //   this.__call__('__has__', name)
+    // which is not handled below
+    function __has__(name) {
+      if (typeof(name) == 'number') {
+        return so["has(int)"](name);
+      } else {
+        if (name == '__wrapped__') {
+          return true;
+        } else if (so["has(java.lang.String)"](name)) {
+          return true;
+        } else if (name.equals('toString')) {
+          return true;
+        } else {
+          return false;
+        }
+      }
+    }
+
     if (so instanceof sapkg.utilities.soql.ScriptObject) {
       return new JSAdapter() {
-        __getIds__: function() {                  
-          return so.getIds();         
+        __getIds__: function() {
+          return so.getIds();
         },
   
-        __has__ : function(name) {
-          if (typeof(name) == 'number') {
-            return so["has(int)"](name);
-          } else {
-            if (name == '__wrapped__') {
-              return true;
-            } else if (so["has(java.lang.String)"](name)) {
-              return true;
-            } else if (name.equals('toString')) {
-              return true;
-            } else {
-              return false;
-            }
-          }
-        },
+        __has__ : __has__,
   
         __delete__ : function(name) {
           if (typeof(name) == 'number') {
@@ -147,7 +157,8 @@
         },
   
         __get__ : function(name) {
-          if (! this.__has__(name)) {
+	      // don't call this.__has__(name); see comments above function __has__
+          if (! __has__.call(this, name)) {
             return undefined;
           }
           if (typeof(name) == 'number') {
@@ -162,7 +173,7 @@
                   var args = prepareArgsArray(arguments);
                   var r;
                   try {
-                    r = value.call(args);
+                    r = value.call(Java.to(args, 'java.lang.Object[]'));
                   } catch (e) {
                     println("call to " + name + " failed!");
                     throw e;
@@ -204,6 +215,18 @@
   }
 
   // define "writeln" and "write" if not defined
+  if (typeof(println) == 'undefined') {
+    println = function (str) {
+      java.lang.System.out.println(String(str));
+    }
+  }
+
+  if (typeof(print) == 'undefined') {
+    print = function (str) {
+      java.lang.System.out.print(String(str));
+    }
+  }
+
   if (typeof(writeln) == 'undefined') {
     writeln = println;
   }
@@ -235,7 +258,7 @@
 
     this.jclasses = function() {
       forEachKlass(function (clazz) {
-        writeln(clazz.getName().asString() + " @" + clazz.getHandle().toString()); 
+        writeln(clazz.getName().asString() + " @" + clazz.getAddress().toString()); 
       });
     }
     registerCommand("classes", "classes", "jclasses");
@@ -490,14 +513,14 @@
 function forEachKlass(callback) {
    var VisitorClass = sapkg.memory.SystemDictionary.ClassVisitor;
    var visitor = new VisitorClass() { visit: callback };
-   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassVisitor)"](visitor);
+   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassVisitor)"](visitor);
 }
 
 // iterate system dictionary for each 'Klass' and initiating loader
 function forEachKlassAndLoader(callback) {
    var VisitorClass = sapkg.memory.SystemDictionary.ClassAndLoaderVisitor;
    var visitor = new VisitorClass() { visit: callback };
-   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassAndLoaderVisitor)"](visitor);
+   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassAndLoaderVisitor)"](visitor);
 }
 
 // iterate system dictionary for each primitive array klass
@@ -522,7 +545,12 @@
 
 // iterates Java heap for each Oop
 function forEachOop(callback) {
-   sa.objHeap.iterate(new sapkg.oops.HeapVisitor() { doObj: callback });
+   function empty() { }
+   sa.objHeap.iterate(new sapkg.oops.HeapVisitor() {
+       prologue: empty,
+       doObj: callback,
+       epilogue: empty
+   });
 }
 
 // iterates Java heap for each Oop of given 'klass'.
@@ -536,8 +564,14 @@
    if (includeSubtypes == undefined) {
       includeSubtypes = true;
    }
+
+   function empty() { }
    sa.objHeap.iterateObjectsOfKlass(
-        new sapkg.oops.HeapVisitor() { doObj: callback },
+        new sapkg.oops.HeapVisitor() {
+            prologue: empty,
+            doObj: callback,
+            epilogue: empty
+        },
         klass, includeSubtypes);
 }
 
@@ -746,9 +780,9 @@
          // ignore;
          continue;
    } else {
-      // some type names have ':'. replace to make it as a 
+      // some type names have ':', '<', '>', '*', ' '. replace to make it as a
       // JavaScript identifier
-      tmp.name = tmp.name.replace(':', '_').replace('<', '_').replace('>', '_').replace('*', '_').replace(' ', '_');
+      tmp.name = ("" + tmp.name).replace(/[:<>* ]/g, '_');
       eval("function read" + tmp.name + "(addr) {" +
            "   return readVMType('" + tmp.name + "', addr);}"); 
       eval("function print" + tmp.name + "(addr) {" + 
--- a/make/bsd/makefiles/adlc.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/bsd/makefiles/adlc.make	Thu Sep 05 11:04:39 2013 -0700
@@ -41,13 +41,11 @@
 
 ifeq ("${Platform_arch_model}", "${Platform_arch}")
   SOURCES.AD = \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
 else
   SOURCES.AD = \
   $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
 endif
 
 EXEC	= $(OUTDIR)/adlc
--- a/make/bsd/makefiles/gcc.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/bsd/makefiles/gcc.make	Thu Sep 05 11:04:39 2013 -0700
@@ -247,7 +247,7 @@
 # Not yet supported by clang in Xcode 4.6.2
 #  WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
   WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
-  WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
+  WARNINGS_ARE_ERRORS += -Wno-empty-body
 endif
 
 WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
--- a/make/hotspot_version	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/hotspot_version	Thu Sep 05 11:04:39 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=45
+HS_BUILD_NUMBER=48
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/makefiles/adlc.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/linux/makefiles/adlc.make	Thu Sep 05 11:04:39 2013 -0700
@@ -41,13 +41,11 @@
 
 ifeq ("${Platform_arch_model}", "${Platform_arch}")
   SOURCES.AD = \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
 else
   SOURCES.AD = \
   $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
 endif
 
 EXEC	= $(OUTDIR)/adlc
--- a/make/linux/makefiles/amd64.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/linux/makefiles/amd64.make	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,4 @@
 
 CFLAGS += -D_LP64=1
 
-# The serviceability agent relies on frame pointer (%rbp) to walk thread stack
-ifndef USE_SUNCC
-  CFLAGS += -fno-omit-frame-pointer
-endif
-
 OPT_CFLAGS/compactingPermGenGen.o = -O1
--- a/make/linux/makefiles/gcc.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/linux/makefiles/gcc.make	Thu Sep 05 11:04:39 2013 -0700
@@ -402,3 +402,10 @@
 ifdef MINIMIZE_RAM_USAGE
 CFLAGS += -DMINIMIZE_RAM_USAGE
 endif
+
+# Stack walking in the JVM relies on frame pointer (%rbp) to walk thread stack.
+# Explicitly specify -fno-omit-frame-pointer because it is off by default
+# starting with gcc 4.6.
+ifndef USE_SUNCC
+  CFLAGS += -fno-omit-frame-pointer
+endif
--- a/make/solaris/makefiles/adlc.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/solaris/makefiles/adlc.make	Thu Sep 05 11:04:39 2013 -0700
@@ -42,13 +42,11 @@
 
 ifeq ("${Platform_arch_model}", "${Platform_arch}")
   SOURCES.AD = \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
 else
   SOURCES.AD = \
   $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
 endif
 
 EXEC	= $(OUTDIR)/adlc
--- a/make/solaris/makefiles/dtrace.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/solaris/makefiles/dtrace.make	Thu Sep 05 11:04:39 2013 -0700
@@ -283,9 +283,9 @@
 	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
      $(DTraced_Files) ||\
   STATUS=$$?;\
-	if [ x"$$STATUS" = x"1" -a \
-       x`uname -r` = x"5.10" -a \
-       x`uname -p` = x"sparc" ]; then\
+  if [ x"$$STATUS" = x"1" ]; then \
+      if [ x`uname -r` = x"5.10" -a \
+           x`uname -p` = x"sparc" ]; then\
     echo "*****************************************************************";\
     echo "* If you are building server compiler, and the error message is ";\
     echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\
@@ -294,6 +294,20 @@
     echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\
     echo "* dtrace probes for this build.";\
     echo "*****************************************************************";\
+      elif [ x`uname -r` = x"5.10" ]; then\
+    echo "*****************************************************************";\
+    echo "* If you are seeing 'syntax error near \"umpiconninfo_t\"' on Solaris";\
+    echo "* 10, try doing 'cd /usr/lib/dtrace && gzip mpi.d' as root, ";\
+    echo "* or set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
+    echo "* to disable dtrace probes for this build.";\
+    echo "*****************************************************************";\
+      else \
+    echo "*****************************************************************";\
+    echo "* If you cannot fix dtrace build issues, try to ";\
+    echo "* set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
+    echo "* to disable dtrace probes for this build.";\
+    echo "*****************************************************************";\
+      fi; \
   fi;\
   exit $$STATUS
   # Since some DTraced_Files are in LIBJVM.o and they are touched by this
--- a/make/windows/build_vm_def.sh	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/build_vm_def.sh	Thu Sep 05 11:04:39 2013 -0700
@@ -42,8 +42,6 @@
  MKS_HOME=`dirname "$SH"`
 fi
 
-echo "EXPORTS" > vm1.def
-
 AWK="$MKS_HOME/awk.exe"
 if [ ! -e $AWK ]; then
     AWK="$MKS_HOME/gawk.exe"
@@ -55,6 +53,22 @@
 RM="$MKS_HOME/rm.exe"
 DUMPBIN="link.exe /dump"
 
+if [ "$1" = "-nosa" ]; then
+    echo EXPORTS > vm.def
+    echo ""
+    echo "***"
+    echo "*** Not building SA: BUILD_WIN_SA != 1"
+    echo "*** C++ Vtables NOT included in vm.def"
+    echo "*** This jvm.dll will NOT work properly with SA."
+    echo "***"
+    echo "*** When in doubt, set BUILD_WIN_SA=1, clean and rebuild."
+    echo "***"
+    echo ""
+    exit
+fi
+
+echo "EXPORTS" > vm1.def
+
 # When called from IDE the first param should contain the link version, otherwise may be nill
 if [ "x$1" != "x" ]; then
 LD_VER="$1"
--- a/make/windows/create.bat	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/create.bat	Thu Sep 05 11:04:39 2013 -0700
@@ -1,6 +1,6 @@
 @echo off
 REM
-REM Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+REM Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 REM
 REM This code is free software; you can redistribute it and/or modify it
@@ -148,7 +148,7 @@
 
 REM This is now safe to do.
 :copyfiles
-for /D %%i in (compiler1, compiler2, tiered, core) do (
+for /D %%i in (compiler1, compiler2, tiered ) do (
 if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated
 copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL
 )
@@ -156,7 +156,7 @@
 REM force regneration of ProjectFile
 if exist %ProjectFile% del %ProjectFile%
 
-for /D %%i in (compiler1, compiler2, tiered, core) do (
+for /D %%i in (compiler1, compiler2, tiered ) do (
 echo -- %%i --
 echo # Generated file!                                                        >    %HotSpotBuildSpace%\%%i\local.make
 echo # Changing a variable below and then deleting %ProjectFile% will cause  >>    %HotSpotBuildSpace%\%%i\local.make
--- a/make/windows/create_obj_files.sh	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/create_obj_files.sh	Thu Sep 05 11:04:39 2013 -0700
@@ -73,19 +73,17 @@
 
 BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
 
-if [ -d "${ALTSRC}/share/vm/jfr" ]; then
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
+if [ -d "${ALTSRC}/share/vm/jfr/buffers" ]; then
   BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
 fi
 
 BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
 
-CORE_PATHS="${BASE_PATHS}"
 # shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
 if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
-  CORE_PATHS="${CORE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
+  BASE_PATHS="${BASE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
 fi
-CORE_PATHS="${CORE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
+BASE_PATHS="${BASE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
 
 if [ -d "${ALTSRC}/share/vm/c1" ]; then
   COMPILER1_PATHS="${ALTSRC}/share/vm/c1"
@@ -104,12 +102,11 @@
 
 # Include dirs per type.
 case "${TYPE}" in
-    "core")      Src_Dirs="${CORE_PATHS}" ;;
-    "compiler1") Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS}" ;;
-    "compiler2") Src_Dirs="${CORE_PATHS} ${COMPILER2_PATHS}" ;;
-    "tiered")    Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
-    "zero")      Src_Dirs="${CORE_PATHS}" ;;
-    "shark")     Src_Dirs="${CORE_PATHS}" ;;
+    "compiler1") Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS}" ;;
+    "compiler2") Src_Dirs="${BASE_PATHS} ${COMPILER2_PATHS}" ;;
+    "tiered")    Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
+    "zero")      Src_Dirs="${BASE_PATHS}" ;;
+    "shark")     Src_Dirs="${BASE_PATHS}" ;;
 esac
 
 COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
@@ -122,7 +119,6 @@
 
 # Exclude per type.
 case "${TYPE}" in
-    "core")      Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
     "compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
     "compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
     "tiered")    Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
@@ -149,9 +145,17 @@
    Src_Files="${Src_Files}`findsrc ${e}` "
 done 
 
-Obj_Files=
+Obj_Files=" "
 for e in ${Src_Files}; do
-	Obj_Files="${Obj_Files}${e%\.[!.]*}.obj "
+        o="${e%\.[!.]*}.obj"
+        set +e
+        chk=`expr "${Obj_Files}" : ".* $o"`
+        set -e
+        if [ "$chk" != 0 ]; then
+             echo "# INFO: skipping duplicate $o"
+             continue
+        fi
+	Obj_Files="${Obj_Files}$o "
 done
 
 echo Obj_Files=${Obj_Files}
--- a/make/windows/makefiles/adlc.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/adlc.make	Thu Sep 05 11:04:39 2013 -0700
@@ -55,13 +55,11 @@
 
 !if "$(Platform_arch_model)" == "$(Platform_arch)"
 SOURCES_AD=\
-  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
-  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
+  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad 
 !else
 SOURCES_AD=\
   $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
-  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \
-  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
+  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad 
 !endif
 
 # NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR
--- a/make/windows/makefiles/debug.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/debug.make	Thu Sep 05 11:04:39 2013 -0700
@@ -49,9 +49,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/make/windows/makefiles/fastdebug.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/fastdebug.make	Thu Sep 05 11:04:39 2013 -0700
@@ -48,9 +48,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/make/windows/makefiles/product.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/product.make	Thu Sep 05 11:04:39 2013 -0700
@@ -51,9 +51,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/make/windows/makefiles/projectcreator.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/projectcreator.make	Thu Sep 05 11:04:39 2013 -0700
@@ -44,10 +44,11 @@
 
 # This is only used internally
 ProjectCreatorIncludesPRIVATE=\
-        -relativeInclude src\closed\share\vm \
-        -relativeInclude src\closed\os\windows\vm \
-        -relativeInclude src\closed\os_cpu\windows_$(Platform_arch)\vm \
-        -relativeInclude src\closed\cpu\$(Platform_arch)\vm \
+        -relativeAltSrcInclude src\closed \
+        -altRelativeInclude share\vm \
+        -altRelativeInclude os\windows\vm \
+        -altRelativeInclude os_cpu\windows_$(Platform_arch)\vm \
+        -altRelativeInclude cpu\$(Platform_arch)\vm \
         -relativeInclude src\share\vm \
         -relativeInclude src\share\vm\precompiled \
         -relativeInclude src\share\vm\prims\wbtestmethods \
@@ -91,7 +92,11 @@
         -disablePch        getThread_windows_$(Platform_arch).cpp \
         -disablePch_compiler2     opcodes.cpp
 
-# Common options for the IDE builds for core, c1, and c2
+!if "$(BUILD_WIN_SA)" != "1"
+BUILD_VM_DEF_FLAG=-nosa
+!endif
+
+# Common options for the IDE builds for c1, and c2
 ProjectCreatorIDEOptions=\
         $(ProjectCreatorIDEOptions) \
         -sourceBase $(HOTSPOTWORKSPACE) \
@@ -103,7 +108,7 @@
         -jdkTargetRoot $(HOTSPOTJDKDIST) \
         -define ALIGN_STACK_FRAMES \
         -define VM_LITTLE_ENDIAN \
-        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
+        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(BUILD_VM_DEF_FLAG) $(LD_VER)" \
         -ignoreFile jsig.c \
         -ignoreFile jvmtiEnvRecommended.cpp \
         -ignoreFile jvmtiEnvStub.cpp \
@@ -158,18 +163,10 @@
  -ignoreFile_TARGET $(Platform_arch_model).ad
 
 ##################################################
-# Without compiler(core) specific options
-##################################################
-ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
-$(ProjectCreatorIDEOptionsIgnoreCompiler1:TARGET=core) \
-$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=core)
-
-##################################################
 # Client(C1) compiler specific options
 ##################################################
 ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
  -define_compiler1 COMPILER1 \
- -ignorePath_compiler1 core \
 $(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
 
 ##################################################
@@ -178,7 +175,6 @@
 #NOTE! This list must be kept in sync with GENERATED_NAMES in adlc.make.
 ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
  -define_compiler2 COMPILER2 \
- -ignorePath_compiler2 core \
  -additionalFile_compiler2 $(Platform_arch_model).ad \
  -additionalFile_compiler2 ad_$(Platform_arch_model).cpp \
  -additionalFile_compiler2 ad_$(Platform_arch_model).hpp \
--- a/make/windows/makefiles/trace.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/trace.make	Thu Sep 05 11:04:39 2013 -0700
@@ -90,25 +90,25 @@
 !if "$(OPENJDK)" == "true"
 
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating OpenJDK $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
 
 !else
 
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
 
 $(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
 
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
 
 $(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
 
 !endif
--- a/make/windows/makefiles/vm.make	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/makefiles/vm.make	Thu Sep 05 11:04:39 2013 -0700
@@ -36,10 +36,6 @@
 CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
 !endif
 
-!if "$(Variant)" == "core"
-# No need to define anything, CORE is defined as !COMPILER1 && !COMPILER2
-!endif
-
 !if "$(Variant)" == "compiler1"
 CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
 !endif
@@ -397,3 +393,11 @@
 _build_pch_file.obj:
         @echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
         $(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
+
+!if "$(BUILD_WIN_SA)" != "1"
+BUILD_VM_DEF_FLAG=-nosa
+!endif
+
+vm.def: $(Obj_Files)
+	sh $(WorkSpace)/make/windows/build_vm_def.sh $(BUILD_VM_DEF_FLAG)
+
--- a/make/windows/projectfiles/common/Makefile	Thu Aug 22 09:39:54 2013 -0700
+++ b/make/windows/projectfiles/common/Makefile	Thu Sep 05 11:04:39 2013 -0700
@@ -112,6 +112,7 @@
 ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
 
 $(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
+	@if "$(MSC_VER)"=="1500" echo Make sure you have VS2008 SP1 or later, or you may see 'expanded command line too long'
 	@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
 
 clean:
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "interpreter/interpreter.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -1145,7 +1146,7 @@
   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  narrowOop encoded_k = oopDesc::encode_klass(k);
+  narrowOop encoded_k = Klass::encode_klass(k);
 
   assert_not_delayed();
   // Relocation with special format (see relocInfo_sparc.hpp).
@@ -1419,7 +1420,6 @@
   load_klass(O0_obj, O0_obj);
   // assert((klass != NULL)
   br_null_short(O0_obj, pn, fail);
-  // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
 
   wrccr( O5_save_flags ); // Restore CCR's
 
@@ -4089,52 +4089,91 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
   assert (UseCompressedKlassPointers, "must be compressed");
-  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-  if (Universe::narrow_klass_base() != NULL)
-    sub(r, G6_heapbase, r);
-  srlx(r, LogKlassAlignmentInBytes, r);
+  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
+  assert(r != G6_heapbase, "bad register choice");
+  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+  sub(r, G6_heapbase, r);
+  if (Universe::narrow_klass_shift() != 0) {
+    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+    srlx(r, LogKlassAlignmentInBytes, r);
+  }
+  reinit_heapbase();
 }
 
 void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
-  assert (UseCompressedKlassPointers, "must be compressed");
-  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-  if (Universe::narrow_klass_base() == NULL) {
-    srlx(src, LogKlassAlignmentInBytes, dst);
+  if (src == dst) {
+    encode_klass_not_null(src);
   } else {
-    sub(src, G6_heapbase, dst);
-    srlx(dst, LogKlassAlignmentInBytes, dst);
+    assert (UseCompressedKlassPointers, "must be compressed");
+    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
+    set((intptr_t)Universe::narrow_klass_base(), dst);
+    sub(src, dst, dst);
+    if (Universe::narrow_klass_shift() != 0) {
+      srlx(dst, LogKlassAlignmentInBytes, dst);
+    }
   }
 }
 
+// Function instr_size_for_decode_klass_not_null() counts the instructions
+// generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
+// the instructions they generate change, then this method needs to be updated.
+int MacroAssembler::instr_size_for_decode_klass_not_null() {
+  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  // set + add + set
+  int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
+    insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
+  if (Universe::narrow_klass_shift() == 0) {
+    return num_instrs * BytesPerInstWord;
+  } else { // sllx
+    return (num_instrs + 1) * BytesPerInstWord;
+  }
+}
+
+// !!! If the instructions that get generated here change then function
+// instr_size_for_decode_klass_not_null() needs to get updated.
 void  MacroAssembler::decode_klass_not_null(Register r) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
   assert (UseCompressedKlassPointers, "must be compressed");
-  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-  sllx(r, LogKlassAlignmentInBytes, r);
-  if (Universe::narrow_klass_base() != NULL)
-    add(r, G6_heapbase, r);
+  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
+  assert(r != G6_heapbase, "bad register choice");
+  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+  if (Universe::narrow_klass_shift() != 0)
+    sllx(r, LogKlassAlignmentInBytes, r);
+  add(r, G6_heapbase, r);
+  reinit_heapbase();
 }
 
 void  MacroAssembler::decode_klass_not_null(Register src, Register dst) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
-  // Do not add assert code to this unless you change vtableStubs_sparc.cpp
-  // pd_code_size_limit.
-  assert (UseCompressedKlassPointers, "must be compressed");
-  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-  sllx(src, LogKlassAlignmentInBytes, dst);
-  if (Universe::narrow_klass_base() != NULL)
-    add(dst, G6_heapbase, dst);
+  if (src == dst) {
+    decode_klass_not_null(src);
+  } else {
+    // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+    // pd_code_size_limit.
+    assert (UseCompressedKlassPointers, "must be compressed");
+    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
+    if (Universe::narrow_klass_shift() != 0) {
+      assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
+      set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+      sllx(src, LogKlassAlignmentInBytes, dst);
+      add(dst, G6_heapbase, dst);
+      reinit_heapbase();
+    } else {
+      set((intptr_t)Universe::narrow_klass_base(), dst);
+      add(src, dst, dst);
+    }
+  }
 }
 
 void MacroAssembler::reinit_heapbase() {
   if (UseCompressedOops || UseCompressedKlassPointers) {
-    AddressLiteral base(Universe::narrow_ptrs_base_addr());
-    load_ptr_contents(base, G6_heapbase);
+    if (Universe::heap() != NULL) {
+      set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
+    } else {
+      AddressLiteral base(Universe::narrow_ptrs_base_addr());
+      load_ptr_contents(base, G6_heapbase);
+    }
   }
 }
 
--- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1177,6 +1177,9 @@
   void push_CPU_state();
   void pop_CPU_state();
 
+  // Returns the byte size of the instructions generated by decode_klass_not_null().
+  static int instr_size_for_decode_klass_not_null();
+
   // if heap base register is used - reinit it with the correct value
   void reinit_heapbase();
 
--- a/src/cpu/sparc/vm/relocInfo_sparc.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,7 @@
     guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
     if (format() != 0) {
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
-      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x);
+      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
       inst &= ~Assembler::hi22(-1);
       inst |=  Assembler::hi22((intptr_t)np);
       if (verify_only) {
--- a/src/cpu/sparc/vm/sparc.ad	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Thu Sep 05 11:04:39 2013 -0700
@@ -559,10 +559,7 @@
     int klass_load_size;
     if (UseCompressedKlassPointers) {
       assert(Universe::heap() != NULL, "java heap should be initialized");
-      if (Universe::narrow_klass_base() == NULL)
-        klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
-      else
-        klass_load_size = 3*BytesPerInstWord;
+      klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
     } else {
       klass_load_size = 1*BytesPerInstWord;
     }
@@ -1663,9 +1660,12 @@
   if (UseCompressedKlassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
-    st->print_cr("\tSLL    R_G5,3,R_G5");
-    if (Universe::narrow_klass_base() != NULL)
-      st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
+    st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
+    if (Universe::narrow_klass_shift() != 0) {
+      st->print_cr("\tSLL    R_G5,3,R_G5");
+    }
+    st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
+    st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
   } else {
     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
   }
@@ -2563,10 +2563,7 @@
       int klass_load_size;
       if (UseCompressedKlassPointers) {
         assert(Universe::heap() != NULL, "java heap should be initialized");
-        if (Universe::narrow_klass_base() == NULL)
-          klass_load_size = 2*BytesPerInstWord;
-        else
-          klass_load_size = 3*BytesPerInstWord;
+        klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
       } else {
         klass_load_size = 1*BytesPerInstWord;
       }
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1887,6 +1887,27 @@
   if (ProfileInterpreter) {
     __ set_method_data_pointer_for_bcp();
   }
+
+#if INCLUDE_JVMTI
+  if (EnableInvokeDynamic) {
+    Label L_done;
+
+    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
+    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
+
+    __ br_null(G1_scratch, false, Assembler::pn, L_done);
+    __ delayed()->nop();
+
+    __ st_ptr(G1_scratch, Lesp, wordSize);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
   // Resume bytecode interpretation at the current bcp
   __ dispatch_next(vtos);
   // end of JVMTI PopFrame support
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,13 +219,13 @@
       const int basic = 5*BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
                         (UseCompressedKlassPointers ?
-                         ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
+                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
                         (UseCompressedKlassPointers ?
-                         ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
+                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return (basic + slop);
     }
   }
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -30,6 +30,7 @@
 #include "interpreter/interpreter.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -4810,23 +4811,8 @@
 }
 
 void MacroAssembler::load_prototype_header(Register dst, Register src) {
-#ifdef _LP64
-  if (UseCompressedKlassPointers) {
-    assert (Universe::heap() != NULL, "java heap should be initialized");
-    movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
-    if (Universe::narrow_klass_shift() != 0) {
-      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-      assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
-      movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
-    } else {
-      movq(dst, Address(dst, Klass::prototype_header_offset()));
-    }
-  } else
-#endif
-  {
-    movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
-    movptr(dst, Address(dst, Klass::prototype_header_offset()));
-  }
+  load_klass(dst, src);
+  movptr(dst, Address(dst, Klass::prototype_header_offset()));
 }
 
 void MacroAssembler::store_klass(Register dst, Register src) {
@@ -4914,7 +4900,7 @@
 
 #ifdef ASSERT
 void MacroAssembler::verify_heapbase(const char* msg) {
-  assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
+  assert (UseCompressedOops, "should be compressed");
   assert (Universe::heap() != NULL, "java heap should be initialized");
   if (CheckCompressedOops) {
     Label ok;
@@ -5058,69 +5044,80 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
-#ifdef ASSERT
-  verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
-#endif
-  if (Universe::narrow_klass_base() != NULL) {
-    subq(r, r12_heapbase);
-  }
+  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
+  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
+  assert(r != r12_heapbase, "Encoding a klass in r12");
+  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+  subq(r, r12_heapbase);
   if (Universe::narrow_klass_shift() != 0) {
     assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
     shrq(r, LogKlassAlignmentInBytes);
   }
+  reinit_heapbase();
 }
 
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
-#ifdef ASSERT
-  verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
-#endif
-  if (dst != src) {
-    movq(dst, src);
-  }
-  if (Universe::narrow_klass_base() != NULL) {
-    subq(dst, r12_heapbase);
-  }
-  if (Universe::narrow_klass_shift() != 0) {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-    shrq(dst, LogKlassAlignmentInBytes);
-  }
-}
-
+  if (dst == src) {
+    encode_klass_not_null(src);
+  } else {
+    mov64(dst, (int64_t)Universe::narrow_klass_base());
+    negq(dst);
+    addq(dst, src);
+    if (Universe::narrow_klass_shift() != 0) {
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+      shrq(dst, LogKlassAlignmentInBytes);
+    }
+  }
+}
+
+// Function instr_size_for_decode_klass_not_null() counts the instructions
+// generated by decode_klass_not_null(register r) and reinit_heapbase(),
+// when (Universe::heap() != NULL).  Hence, if the instructions they
+// generate change, then this method needs to be updated.
+int MacroAssembler::instr_size_for_decode_klass_not_null() {
+  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
+  return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
+}
+
+// !!! If the instructions that get generated here change then function
+// instr_size_for_decode_klass_not_null() needs to get updated.
 void  MacroAssembler::decode_klass_not_null(Register r) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
   // Note: it will change flags
+  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert(r != r12_heapbase, "Decoding a klass in r12");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   if (Universe::narrow_klass_shift() != 0) {
     assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
     shlq(r, LogKlassAlignmentInBytes);
-    if (Universe::narrow_klass_base() != NULL) {
-      addq(r, r12_heapbase);
-    }
+  }
+  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
+  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+  addq(r, r12_heapbase);
+  reinit_heapbase();
+}
+
+void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+  // Note: it will change flags
+  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
+  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  if (dst == src) {
+    decode_klass_not_null(dst);
   } else {
-    assert (Universe::narrow_klass_base() == NULL, "sanity");
-  }
-}
-
-void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
-  assert(Metaspace::is_initialized(), "metaspace should be initialized");
-  // Note: it will change flags
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
-  // Cannot assert, unverified entry point counts instructions (see .ad file)
-  // vtableStubs also counts instructions in pd_code_size_limit.
-  // Also do not verify_oop as this is called by verify_oop.
-  if (Universe::narrow_klass_shift() != 0) {
-    assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-    assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
-    leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
-  } else {
-    assert (Universe::narrow_klass_base() == NULL, "sanity");
-    if (dst != src) {
-      movq(dst, src);
+    // Cannot assert, unverified entry point counts instructions (see .ad file)
+    // vtableStubs also counts instructions in pd_code_size_limit.
+    // Also do not verify_oop as this is called by verify_oop.
+
+    mov64(dst, (int64_t)Universe::narrow_klass_base());
+    if (Universe::narrow_klass_shift() != 0) {
+      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+      assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
+      leaq(dst, Address(dst, src, Address::times_8, 0));
+    } else {
+      addq(dst, src);
     }
   }
 }
@@ -5148,7 +5145,7 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
 }
 
 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
@@ -5156,7 +5153,7 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
 }
 
 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
@@ -5182,7 +5179,7 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
 }
 
 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
@@ -5190,14 +5187,23 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
 }
 
 void MacroAssembler::reinit_heapbase() {
   if (UseCompressedOops || UseCompressedKlassPointers) {
-    movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
-  }
-}
+    if (Universe::heap() != NULL) {
+      if (Universe::narrow_oop_base() == NULL) {
+        MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
+      } else {
+        mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
+      }
+    } else {
+      movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+    }
+  }
+}
+
 #endif // _LP64
 
 
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -371,6 +371,10 @@
   void cmp_narrow_klass(Register dst, Klass* k);
   void cmp_narrow_klass(Address dst, Klass* k);
 
+  // Returns the byte size of the instructions generated by decode_klass_not_null()
+  // when compressed klass pointers are being used.
+  static int instr_size_for_decode_klass_not_null();
+
   // if heap base register is used - reinit it with the correct value
   void reinit_heapbase();
 
--- a/src/cpu/x86/vm/relocInfo_x86.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/relocInfo_x86.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,9 +55,9 @@
     }
   } else {
       if (verify_only) {
-        assert(*(uint32_t*) disp == oopDesc::encode_klass((Klass*)x), "instructions must match");
+        assert(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
       } else {
-        *(int32_t*) disp = oopDesc::encode_klass((Klass*)x);
+        *(int32_t*) disp = Klass::encode_klass((Klass*)x);
       }
     }
   } else {
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -675,7 +675,6 @@
     __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, error);              // if klass is NULL it is broken
-    // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
 
     // return if everything seems ok
     __ bind(exit);
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1021,7 +1021,6 @@
     __ load_klass(rax, rax);  // get klass
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, error); // if klass is NULL it is broken
-    // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
 
     // return if everything seems ok
     __ bind(exit);
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1920,6 +1920,29 @@
   __ get_thread(thread);
   __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
 
+#if INCLUDE_JVMTI
+  if (EnableInvokeDynamic) {
+    Label L_done;
+    const Register local0 = rdi;
+
+    __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
+    __ jcc(Assembler::notEqual, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ get_method(rdx);
+    __ movptr(rax, Address(local0, 0));
+    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, L_done);
+
+    __ movptr(Address(rbx, 0), rax);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
   __ dispatch_next(vtos);
   // end of PopFrame support
 
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -849,9 +849,9 @@
     address entry = __ pc();
 
     // rbx,: Method*
-    // rsi: senderSP must preserved for slow path, set SP to it on fast path
-    // rdx: scratch
-    // rdi: scratch
+    // r13: senderSP must preserved for slow path, set SP to it on fast path
+    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
+    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
 
     Label slow_path;
     // If we need a safepoint check, generate full interpreter entry.
@@ -865,8 +865,8 @@
 
     // Load parameters
     const Register crc = rax;  // crc
-    const Register val = rdx;  // source java byte value
-    const Register tbl = rdi;  // scratch
+    const Register val = c_rarg0;  // source java byte value
+    const Register tbl = c_rarg1;  // scratch
 
     // Arguments are reversed on java expression stack
     __ movl(val, Address(rsp,   wordSize)); // byte value
@@ -880,7 +880,7 @@
 
     // _areturn
     __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
+    __ mov(rsp, r13);           // set sp to sender sp
     __ jmp(rdi);
 
     // generate a vanilla native entry as the slow path
@@ -919,20 +919,24 @@
     const Register crc = c_rarg0;  // crc
     const Register buf = c_rarg1;  // source java byte array address
     const Register len = c_rarg2;  // length
+    const Register off = len;      // offset (never overlaps with 'len')
 
     // Arguments are reversed on java expression stack
-    __ movl(len,   Address(rsp,   wordSize)); // Length
     // Calculate address of start element
     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
       __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
-      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
+      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
+      __ addq(buf, off); // + offset
       __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
     } else {
       __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
+      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
+      __ addq(buf, off); // + offset
       __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
     }
+    // Can now load 'len' since we're finished with 'off'
+    __ movl(len, Address(rsp, wordSize)); // Length
 
     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
     // result in rax
@@ -1929,6 +1933,29 @@
   __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
           JavaThread::popframe_inactive);
 
+#if INCLUDE_JVMTI
+  if (EnableInvokeDynamic) {
+    Label L_done;
+    const Register local0 = r14;
+
+    __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
+    __ jcc(Assembler::notEqual, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ get_method(rdx);
+    __ movptr(rax, Address(local0, 0));
+    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, L_done);
+
+    __ movptr(Address(rbx, 0), rax);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
   __ dispatch_next(vtos);
   // end of PopFrame support
 
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -211,11 +211,11 @@
   if (is_vtable_stub) {
     // Vtable stub size
     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ? 16 : 0);  // 1 leaq can be 3 bytes + 1 long
+           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   } else {
     // Itable stub size
     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ? 32 : 0);  // 2 leaqs
+           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/src/cpu/x86/vm/x86_64.ad	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Thu Sep 05 11:04:39 2013 -0700
@@ -1393,9 +1393,7 @@
 {
   if (UseCompressedKlassPointers) {
     st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
-    if (Universe::narrow_klass_shift() != 0) {
-      st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
-    }
+    st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
   } else {
     st->print_cr("\tcmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
@@ -4035,146 +4033,6 @@
   %}
 %}
 
-operand indirectNarrowKlass(rRegN reg)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(DecodeNKlass reg);
-
-  format %{ "[$reg]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index(0x4);
-    scale(0x0);
-    disp(0x0);
-  %}
-%}
-
-operand indOffset8NarrowKlass(rRegN reg, immL8 off)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (DecodeNKlass reg) off);
-
-  format %{ "[$reg + $off (8-bit)]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index(0x4);
-    scale(0x0);
-    disp($off);
-  %}
-%}
-
-operand indOffset32NarrowKlass(rRegN reg, immL32 off)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (DecodeNKlass reg) off);
-
-  format %{ "[$reg + $off (32-bit)]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index(0x4);
-    scale(0x0);
-    disp($off);
-  %}
-%}
-
-operand indIndexOffsetNarrowKlass(rRegN reg, rRegL lreg, immL32 off)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP (DecodeNKlass reg) lreg) off);
-
-  op_cost(10);
-  format %{"[$reg + $off + $lreg]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale(0x0);
-    disp($off);
-  %}
-%}
-
-operand indIndexNarrowKlass(rRegN reg, rRegL lreg)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (DecodeNKlass reg) lreg);
-
-  op_cost(10);
-  format %{"[$reg + $lreg]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale(0x0);
-    disp(0x0);
-  %}
-%}
-
-operand indIndexScaleNarrowKlass(rRegN reg, rRegL lreg, immI2 scale)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (DecodeNKlass reg) (LShiftL lreg scale));
-
-  op_cost(10);
-  format %{"[$reg + $lreg << $scale]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale($scale);
-    disp(0x0);
-  %}
-%}
-
-operand indIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
-%{
-  predicate(Universe::narrow_klass_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP (DecodeNKlass reg) (LShiftL lreg scale)) off);
-
-  op_cost(10);
-  format %{"[$reg + $off + $lreg << $scale]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
-operand indCompressedKlassOffset(rRegN reg, immL32 off) %{
-  predicate(UseCompressedKlassPointers && (Universe::narrow_klass_shift() == Address::times_8));
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (DecodeNKlass reg) off);
-
-  op_cost(10);
-  format %{"[R12 + $reg << 3 + $off] (compressed klass addressing)" %}
-  interface(MEMORY_INTER) %{
-    base(0xc); // R12
-    index($reg);
-    scale(0x3);
-    disp($off);
-  %}
-%}
-
-operand indPosIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegI idx, immI2 scale)
-%{
-  constraint(ALLOC_IN_RC(ptr_reg));
-  predicate(Universe::narrow_klass_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
-  match(AddP (AddP (DecodeNKlass reg) (LShiftL (ConvI2L idx) scale)) off);
-
-  op_cost(10);
-  format %{"[$reg + $off + $idx << $scale]" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($idx);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
 //----------Special Memory Operands--------------------------------------------
 // Stack Slot Operand - This operand is used for loading and storing temporary
 //                      values on the stack where a match requires a value to
@@ -4345,11 +4203,7 @@
                indCompressedOopOffset,
                indirectNarrow, indOffset8Narrow, indOffset32Narrow,
                indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
-               indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow,
-               indCompressedKlassOffset,
-               indirectNarrowKlass, indOffset8NarrowKlass, indOffset32NarrowKlass,
-               indIndexOffsetNarrowKlass, indIndexNarrowKlass, indIndexScaleNarrowKlass,
-               indIndexScaleOffsetNarrowKlass, indPosIndexScaleOffsetNarrowKlass);
+               indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
 
 //----------PIPELINE-----------------------------------------------------------
 // Rules which define the behavior of the target architectures pipeline.
@@ -6665,7 +6519,7 @@
 instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
   match(Set dst (EncodePKlass src));
   effect(KILL cr);
-  format %{ "encode_heap_oop_not_null $dst,$src" %}
+  format %{ "encode_klass_not_null $dst,$src" %}
   ins_encode %{
     __ encode_klass_not_null($dst$$Register, $src$$Register);
   %}
@@ -6675,7 +6529,7 @@
 instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
   match(Set dst (DecodeNKlass src));
   effect(KILL cr);
-  format %{ "decode_heap_oop_not_null $dst,$src" %}
+  format %{ "decode_klass_not_null $dst,$src" %}
   ins_encode %{
     Register s = $src$$Register;
     Register d = $dst$$Register;
--- a/src/cpu/zero/vm/assembler_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/assembler_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -50,6 +50,7 @@
 #ifdef ASSERT
 bool AbstractAssembler::pd_check_instruction_mark() {
   ShouldNotCallThis();
+  return false;
 }
 #endif
 
@@ -73,6 +74,7 @@
 RegisterOrConstant MacroAssembler::delayed_value_impl(
   intptr_t* delayed_value_addr, Register tmpl, int offset) {
   ShouldNotCallThis();
+  return RegisterOrConstant();
 }
 
 void MacroAssembler::store_oop(jobject obj) {
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1008,6 +1008,7 @@
 
 address CppInterpreter::return_entry(TosState state, int length) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 address CppInterpreter::deopt_entry(TosState state, int length) {
--- a/src/cpu/zero/vm/entryFrame_zero.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/entryFrame_zero.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -58,8 +58,8 @@
                            JavaCallWrapper* call_wrapper,
                            TRAPS);
  public:
-  JavaCallWrapper *call_wrapper() const {
-    return (JavaCallWrapper *) value_of_word(call_wrapper_off);
+  JavaCallWrapper **call_wrapper() const {
+    return (JavaCallWrapper **) addr_of_word(call_wrapper_off);
   }
 
  public:
--- a/src/cpu/zero/vm/frame_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/frame_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -116,6 +116,7 @@
 
 bool frame::safe_for_sender(JavaThread *thread) {
   ShouldNotCallThis();
+  return false;
 }
 
 void frame::pd_gc_epilog() {
@@ -123,6 +124,7 @@
 
 bool frame::is_interpreted_frame_valid(JavaThread *thread) const {
   ShouldNotCallThis();
+  return false;
 }
 
 BasicType frame::interpreter_frame_result(oop* oop_result,
@@ -184,9 +186,8 @@
 int frame::frame_size(RegisterMap* map) const {
 #ifdef PRODUCT
   ShouldNotCallThis();
-#else
+#endif // PRODUCT
   return 0; // make javaVFrame::print_value work
-#endif // PRODUCT
 }
 
 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -36,7 +36,7 @@
   _deopt_state = unknown;
 }
 
-inline address  frame::sender_pc()           const { ShouldNotCallThis();  }
+inline address  frame::sender_pc()           const { ShouldNotCallThis(); return NULL; }
 
 inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
   _zeroframe = zf;
@@ -89,6 +89,7 @@
 
 inline intptr_t* frame::link() const {
   ShouldNotCallThis();
+  return NULL;
 }
 
 #ifdef CC_INTERP
@@ -141,7 +142,7 @@
   return fp();
 }
 
-inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
+inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
   return zero_entryframe()->call_wrapper();
 }
 
@@ -151,14 +152,17 @@
 
 inline oop frame::saved_oop_result(RegisterMap* map) const {
   ShouldNotCallThis();
+  return NULL;
 }
 
 inline bool frame::is_older(intptr_t* id) const {
   ShouldNotCallThis();
+  return false;
 }
 
 inline intptr_t* frame::entry_frame_argument_at(int offset) const {
   ShouldNotCallThis();
+  return NULL;
 }
 
 inline intptr_t* frame::unextended_sp() const {
--- a/src/cpu/zero/vm/icBuffer_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/icBuffer_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -49,8 +49,10 @@
 address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
   // NB ic_stub_code_size() must return the size of the code we generate
   ShouldNotCallThis();
+  return NULL;
 }
 
 void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
   ShouldNotCallThis();
+  return NULL;
 }
--- a/src/cpu/zero/vm/interp_masm_zero.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/interp_masm_zero.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -40,6 +40,7 @@
                                         Register  tmp,
                                         int       offset) {
     ShouldNotCallThis();
+    return RegisterOrConstant();
   }
 };
 
--- a/src/cpu/zero/vm/interpreter_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/interpreter_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -64,6 +64,7 @@
     return NULL;
 
   Unimplemented();
+  return NULL;
 }
 
 address InterpreterGenerator::generate_abstract_entry() {
--- a/src/cpu/zero/vm/nativeInst_zero.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/nativeInst_zero.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -51,15 +51,18 @@
  public:
   bool is_jump() {
     ShouldNotCallThis();
+    return false;
   }
 
   bool is_safepoint_poll() {
     ShouldNotCallThis();
+    return false;
   }
 };
 
 inline NativeInstruction* nativeInstruction_at(address address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 class NativeCall : public NativeInstruction {
@@ -70,18 +73,22 @@
 
   address instruction_address() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   address next_instruction_address() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   address return_address() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   address destination() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   void set_destination_mt_safe(address dest) {
@@ -98,25 +105,30 @@
 
   static bool is_call_before(address return_address) {
     ShouldNotCallThis();
+    return false;
   }
 };
 
 inline NativeCall* nativeCall_before(address return_address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 inline NativeCall* nativeCall_at(address address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 class NativeMovConstReg : public NativeInstruction {
  public:
   address next_instruction_address() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   intptr_t data() const {
     ShouldNotCallThis();
+    return 0;
   }
 
   void set_data(intptr_t x) {
@@ -126,12 +138,14 @@
 
 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 class NativeMovRegMem : public NativeInstruction {
  public:
   int offset() const {
     ShouldNotCallThis();
+    return 0;
   }
 
   void set_offset(intptr_t x) {
@@ -145,6 +159,7 @@
 
 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 class NativeJump : public NativeInstruction {
@@ -155,6 +170,7 @@
 
   address jump_destination() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   void set_jump_destination(address dest) {
@@ -172,12 +188,14 @@
 
 inline NativeJump* nativeJump_at(address address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 class NativeGeneralJump : public NativeInstruction {
  public:
   address jump_destination() const {
     ShouldNotCallThis();
+    return NULL;
   }
 
   static void insert_unconditional(address code_pos, address entry) {
@@ -191,6 +209,7 @@
 
 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 #endif // CPU_ZERO_VM_NATIVEINST_ZERO_HPP
--- a/src/cpu/zero/vm/register_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/register_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -32,8 +32,10 @@
 
 const char* RegisterImpl::name() const {
   ShouldNotCallThis();
+  return NULL;
 }
 
 const char* FloatRegisterImpl::name() const {
   ShouldNotCallThis();
+  return NULL;
 }
--- a/src/cpu/zero/vm/relocInfo_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/relocInfo_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -37,6 +37,7 @@
 
 address Relocation::pd_call_destination(address orig_addr) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 void Relocation::pd_set_call_destination(address x) {
@@ -45,6 +46,7 @@
 
 address Relocation::pd_get_address_from_code() {
   ShouldNotCallThis();
+  return NULL;
 }
 
 address* Relocation::pd_address_in_code() {
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -89,6 +89,7 @@
                                                             ret_type);
 #else
   ShouldNotCallThis();
+  return NULL;
 #endif // SHARK
 }
 
@@ -99,6 +100,7 @@
 
 uint SharedRuntime::out_preserve_stack_slots() {
   ShouldNotCallThis();
+  return 0;
 }
 
 JRT_LEAF(void, zero_stub())
@@ -135,4 +137,5 @@
                                          VMRegPair *regs,
                                          int total_args_passed) {
   ShouldNotCallThis();
+  return 0;
 }
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -176,6 +176,19 @@
       StubRoutines::_oop_arraycopy;
   }
 
+  static int SafeFetch32(int *adr, int errValue) {
+    int value = errValue;
+    value = *adr;
+    return value;
+  }
+
+  static intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
+    intptr_t value = errValue;
+    value = *adr;
+    return value;
+  }
+
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -225,6 +238,15 @@
 
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
+
+    // Safefetch stubs.
+    StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32);
+    StubRoutines::_safefetch32_fault_pc = NULL;
+    StubRoutines::_safefetch32_continuation_pc = NULL;
+
+    StubRoutines::_safefetchN_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetchN);
+    StubRoutines::_safefetchN_fault_pc = NULL;
+    StubRoutines::_safefetchN_continuation_pc = NULL;
   }
 
  public:
--- a/src/cpu/zero/vm/vtableStubs_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/cpu/zero/vm/vtableStubs_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -39,16 +39,20 @@
 
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
   ShouldNotCallThis();
+  return 0;
 }
 
 int VtableStub::pd_code_alignment() {
   ShouldNotCallThis();
+  return 0;
 }
--- a/src/os/bsd/vm/attachListener_bsd.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/bsd/vm/attachListener_bsd.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -445,14 +445,14 @@
 
 void AttachListener::vm_start() {
   char fn[UNIX_PATH_MAX];
-  struct stat64 st;
+  struct stat st;
   int ret;
 
   int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
            os::get_temp_directory(), os::current_process_id());
   assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
 
-  RESTARTABLE(::stat64(fn, &st), ret);
+  RESTARTABLE(::stat(fn, &st), ret);
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -642,13 +642,14 @@
 #endif
 
 #ifdef __APPLE__
-static uint64_t locate_unique_thread_id() {
+static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
   // Additional thread_id used to correlate threads in SA
   thread_identifier_info_data_t     m_ident_info;
   mach_msg_type_number_t            count = THREAD_IDENTIFIER_INFO_COUNT;
 
-  thread_info(::mach_thread_self(), THREAD_IDENTIFIER_INFO,
+  thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
               (thread_info_t) &m_ident_info, &count);
+
   return m_ident_info.thread_id;
 }
 #endif
@@ -679,9 +680,14 @@
   }
 
 #ifdef __APPLE__
-  // thread_id is mach thread on macos
-  osthread->set_thread_id(::mach_thread_self());
-  osthread->set_unique_thread_id(locate_unique_thread_id());
+  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
+  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(thread_id != 0, "thread id missing from pthreads");
+  osthread->set_thread_id(thread_id);
+
+  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  guarantee(unique_thread_id != 0, "unique thread id was not found");
+  osthread->set_unique_thread_id(unique_thread_id);
 #else
   // thread_id is pthread_id on BSD
   osthread->set_thread_id(::pthread_self());
@@ -843,8 +849,14 @@
 
   // Store pthread info into the OSThread
 #ifdef __APPLE__
-  osthread->set_thread_id(::mach_thread_self());
-  osthread->set_unique_thread_id(locate_unique_thread_id());
+  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
+  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(thread_id != 0, "just checking");
+  osthread->set_thread_id(thread_id);
+
+  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  guarantee(unique_thread_id != 0, "just checking");
+  osthread->set_unique_thread_id(unique_thread_id);
 #else
   osthread->set_thread_id(::pthread_self());
 #endif
@@ -1115,7 +1127,7 @@
 
 intx os::current_thread_id() {
 #ifdef __APPLE__
-  return (intx)::mach_thread_self();
+  return (intx)::pthread_mach_thread_np(::pthread_self());
 #else
   return (intx)::pthread_self();
 #endif
@@ -2267,7 +2279,9 @@
 }
 
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  fatal("This code is not used or maintained.");
+
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
@@ -3229,11 +3243,15 @@
     // and if UserSignalHandler is installed all bets are off
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        if (PrintJNIResolving) {
+          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        }
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        if (PrintJNIResolving) {
+          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        }
         check_signals = false;
       }
     }
@@ -4692,3 +4710,8 @@
   return n;
 }
 
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/os/linux/vm/globals_linux.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/linux/vm/globals_linux.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -40,6 +40,9 @@
   product(bool, UseHugeTLBFS, false,                                    \
           "Use MAP_HUGETLB for large pages")                            \
                                                                         \
+  product(bool, UseTransparentHugePages, false,                         \
+          "Use MADV_HUGEPAGE for large pages")                          \
+                                                                        \
   product(bool, LoadExecStackDllInVMThread, true,                       \
           "Load DLLs with executable-stack attribute in the VM Thread") \
                                                                         \
--- a/src/os/linux/vm/os_linux.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -2676,36 +2676,7 @@
 
 int os::Linux::commit_memory_impl(char* addr, size_t size,
                                   size_t alignment_hint, bool exec) {
-  int err;
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
-    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
-    uintptr_t res =
-      (uintptr_t) ::mmap(addr, size, prot,
-                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
-                         -1, 0);
-    if (res != (uintptr_t) MAP_FAILED) {
-      if (UseNUMAInterleaving) {
-        numa_make_global(addr, size);
-      }
-      return 0;
-    }
-
-    err = errno;  // save errno from mmap() call above
-
-    if (!recoverable_mmap_error(err)) {
-      // However, it is not clear that this loss of our reserved mapping
-      // happens with large pages on Linux or that we cannot recover
-      // from the loss. For now, we just issue a warning and we don't
-      // call vm_exit_out_of_memory(). This issue is being tracked by
-      // JBS-8007074.
-      warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
-//    vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
-//                          "committing reserved memory.");
-    }
-    // Fall through and try to use small pages
-  }
-
-  err = os::Linux::commit_memory_impl(addr, size, exec);
+  int err = os::Linux::commit_memory_impl(addr, size, exec);
   if (err == 0) {
     realign_memory(addr, size, alignment_hint);
   }
@@ -2730,7 +2701,7 @@
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+  if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
     // be supported or the memory may already be backed by huge pages.
     ::madvise(addr, bytes, MADV_HUGEPAGE);
@@ -2743,7 +2714,7 @@
   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
   // small pages on top of the SHM segment. This method always works for small pages, so we
   // allow that in any case.
-  if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
+  if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
     commit_memory(addr, bytes, alignment_hint, !ExecMem);
   }
 }
@@ -3113,11 +3084,31 @@
   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
 }
 
+bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
+  bool result = false;
+  void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE,
+                 -1, 0);
+  if (p != MAP_FAILED) {
+    void *aligned_p = align_ptr_up(p, page_size);
+
+    result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
+
+    munmap(p, page_size * 2);
+  }
+
+  if (warn && !result) {
+    warning("TransparentHugePages is not supported by the operating system.");
+  }
+
+  return result;
+}
+
 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
   bool result = false;
-  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
-                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-                  -1, 0);
+  void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
+                 -1, 0);
 
   if (p != MAP_FAILED) {
     // We don't know if this really is a huge page or not.
@@ -3138,12 +3129,10 @@
       }
       fclose(fp);
     }
-    munmap (p, page_size);
-    if (result)
-      return true;
-  }
-
-  if (warn) {
+    munmap(p, page_size);
+  }
+
+  if (warn && !result) {
     warning("HugeTLBFS is not supported by the operating system.");
   }
 
@@ -3191,82 +3180,114 @@
 
 static size_t _large_page_size = 0;
 
-void os::large_page_init() {
-  if (!UseLargePages) {
-    UseHugeTLBFS = false;
-    UseSHM = false;
-    return;
-  }
-
-  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // If UseLargePages is specified on the command line try both methods,
-    // if it's default, then try only HugeTLBFS.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseHugeTLBFS = true;
-    } else {
-      UseHugeTLBFS = UseSHM = true;
-    }
-  }
-
-  if (LargePageSizeInBytes) {
-    _large_page_size = LargePageSizeInBytes;
-  } else {
-    // large_page_size on Linux is used to round up heap size. x86 uses either
-    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
-    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
-    // page as large as 256M.
-    //
-    // Here we try to figure out page size by parsing /proc/meminfo and looking
-    // for a line with the following format:
-    //    Hugepagesize:     2048 kB
-    //
-    // If we can't determine the value (e.g. /proc is not mounted, or the text
-    // format has been changed), we'll use the largest page size supported by
-    // the processor.
+size_t os::Linux::find_large_page_size() {
+  size_t large_page_size = 0;
+
+  // large_page_size on Linux is used to round up heap size. x86 uses either
+  // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
+  // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
+  // page as large as 256M.
+  //
+  // Here we try to figure out page size by parsing /proc/meminfo and looking
+  // for a line with the following format:
+  //    Hugepagesize:     2048 kB
+  //
+  // If we can't determine the value (e.g. /proc is not mounted, or the text
+  // format has been changed), we'll use the largest page size supported by
+  // the processor.
 
 #ifndef ZERO
-    _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
-                       ARM_ONLY(2 * M) PPC_ONLY(4 * M);
+  large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
+                     ARM_ONLY(2 * M) PPC_ONLY(4 * M);
 #endif // ZERO
 
-    FILE *fp = fopen("/proc/meminfo", "r");
-    if (fp) {
-      while (!feof(fp)) {
-        int x = 0;
-        char buf[16];
-        if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
-          if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
-            _large_page_size = x * K;
-            break;
-          }
-        } else {
-          // skip to next line
-          for (;;) {
-            int ch = fgetc(fp);
-            if (ch == EOF || ch == (int)'\n') break;
-          }
+  FILE *fp = fopen("/proc/meminfo", "r");
+  if (fp) {
+    while (!feof(fp)) {
+      int x = 0;
+      char buf[16];
+      if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
+        if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
+          large_page_size = x * K;
+          break;
+        }
+      } else {
+        // skip to next line
+        for (;;) {
+          int ch = fgetc(fp);
+          if (ch == EOF || ch == (int)'\n') break;
         }
       }
-      fclose(fp);
     }
-  }
-
-  // print a warning if any large page related flag is specified on command line
-  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
+    fclose(fp);
+  }
+
+  if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
+    warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
+        SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+        proper_unit_for_byte_size(large_page_size));
+  }
+
+  return large_page_size;
+}
+
+size_t os::Linux::setup_large_page_size() {
+  _large_page_size = Linux::find_large_page_size();
   const size_t default_page_size = (size_t)Linux::page_size();
   if (_large_page_size > default_page_size) {
     _page_sizes[0] = _large_page_size;
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
-  UseHugeTLBFS = UseHugeTLBFS &&
-                 Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
-  if (UseHugeTLBFS)
+
+  return _large_page_size;
+}
+
+bool os::Linux::setup_large_page_type(size_t page_size) {
+  if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
+      FLAG_IS_DEFAULT(UseSHM) &&
+      FLAG_IS_DEFAULT(UseTransparentHugePages)) {
+    // If UseLargePages is specified on the command line try all methods,
+    // if it's default, then try only UseTransparentHugePages.
+    if (FLAG_IS_DEFAULT(UseLargePages)) {
+      UseTransparentHugePages = true;
+    } else {
+      UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
+    }
+  }
+
+  if (UseTransparentHugePages) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
+    if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
+      UseHugeTLBFS = false;
+      UseSHM = false;
+      return true;
+    }
+    UseTransparentHugePages = false;
+  }
+
+  if (UseHugeTLBFS) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
+    if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
+      UseSHM = false;
+      return true;
+    }
+    UseHugeTLBFS = false;
+  }
+
+  return UseSHM;
+}
+
+void os::large_page_init() {
+  if (!UseLargePages) {
+    UseHugeTLBFS = false;
+    UseTransparentHugePages = false;
     UseSHM = false;
-
-  UseLargePages = UseHugeTLBFS || UseSHM;
+    return;
+  }
+
+  size_t large_page_size = Linux::setup_large_page_size();
+  UseLargePages          = Linux::setup_large_page_type(large_page_size);
 
   set_coredump_filter();
 }
@@ -3275,16 +3296,22 @@
 #define SHM_HUGETLB 04000
 #endif
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   key_t key = IPC_PRIVATE;
   char *addr;
 
   bool warn_on_failure = UseLargePages &&
                         (!FLAG_IS_DEFAULT(UseLargePages) ||
+                         !FLAG_IS_DEFAULT(UseSHM) ||
                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
                         );
   char msg[128];
@@ -3332,42 +3359,219 @@
      return NULL;
   }
 
-  if ((addr != NULL) && UseNUMAInterleaving) {
-    numa_make_global(addr, bytes);
-  }
-
-  // The memory is committed
-  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
-
   return addr;
 }
 
+static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
+  assert(error == ENOMEM, "Only expect to fail if no memory is available");
+
+  bool warn_on_failure = UseLargePages &&
+      (!FLAG_IS_DEFAULT(UseLargePages) ||
+       !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
+       !FLAG_IS_DEFAULT(LargePageSizeInBytes));
+
+  if (warn_on_failure) {
+    char msg[128];
+    jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
+        PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+    warning(msg);
+  }
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+  char* addr = (char*)::mmap(req_addr, bytes, prot,
+                             MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
+                             -1, 0);
+
+  if (addr == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
+
+  return addr;
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  size_t large_page_size = os::large_page_size();
+
+  assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
+
+  // Allocate small pages.
+
+  char* start;
+  if (req_addr != NULL) {
+    assert(is_ptr_aligned(req_addr, alignment), "Must be");
+    assert(is_size_aligned(bytes, alignment), "Must be");
+    start = os::reserve_memory(bytes, req_addr);
+    assert(start == NULL || start == req_addr, "Must be");
+  } else {
+    start = os::reserve_memory_aligned(bytes, alignment);
+  }
+
+  if (start == NULL) {
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(start, alignment), "Must be");
+
+  // os::reserve_memory_special will record this memory area.
+  // Need to release it here to prevent overlapping reservations.
+  MemTracker::record_virtual_memory_release((address)start, bytes);
+
+  char* end = start + bytes;
+
+  // Find the regions of the allocated chunk that can be promoted to large pages.
+  char* lp_start = (char*)align_ptr_up(start, large_page_size);
+  char* lp_end   = (char*)align_ptr_down(end, large_page_size);
+
+  size_t lp_bytes = lp_end - lp_start;
+
+  assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
+
+  if (lp_bytes == 0) {
+    // The mapped region doesn't even span the start and the end of a large page.
+    // Fall back to allocate a non-special area.
+    ::munmap(start, end - start);
+    return NULL;
+  }
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+
+
+  void* result;
+
+  if (start != lp_start) {
+    result = ::mmap(start, lp_start - start, prot,
+                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                    -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(lp_start, end - lp_start);
+      return NULL;
+    }
+  }
+
+  result = ::mmap(lp_start, lp_bytes, prot,
+                  MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
+                  -1, 0);
+  if (result == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    // If the mmap above fails, the large pages region will be unmapped and we
+    // have regions before and after with small pages. Release these regions.
+    //
+    // |  mapped  |  unmapped  |  mapped  |
+    // ^          ^            ^          ^
+    // start      lp_start     lp_end     end
+    //
+    ::munmap(start, lp_start - start);
+    ::munmap(lp_end, end - lp_end);
+    return NULL;
+  }
+
+  if (lp_end != end) {
+      result = ::mmap(lp_end, end - lp_end, prot,
+                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                      -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(start, lp_end - start);
+      return NULL;
+    }
+  }
+
+  return start;
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_ptr_aligned(req_addr, alignment), "Must be");
+  assert(is_power_of_2(alignment), "Must be");
+  assert(is_power_of_2(os::large_page_size()), "Must be");
+  assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
+
+  if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
+    return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
+  } else {
+    return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
+  }
+}
+
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  char* addr;
+  if (UseSHM) {
+    addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
+  }
+
+  if (addr != NULL) {
+    if (UseNUMAInterleaving) {
+      numa_make_global(addr, bytes);
+    }
+
+    // The memory is committed
+    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  }
+
+  return addr;
+}
+
+bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
+  // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
+  return shmdt(base) == 0;
+}
+
+bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
+  return pd_release_memory(base, bytes);
+}
+
 bool os::release_memory_special(char* base, size_t bytes) {
+  assert(UseLargePages, "only for large pages");
+
   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  // detaching the SHM segment will also delete it, see reserve_memory_special()
-  int rslt = shmdt(base);
-  if (rslt == 0) {
+
+  bool res;
+  if (UseSHM) {
+    res = os::Linux::release_memory_special_shm(base, bytes);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
+  }
+
+  if (res) {
     tkr.record((address)base, bytes);
-    return true;
   } else {
     tkr.discard();
-    return false;
-  }
+  }
+
+  return res;
 }
 
 size_t os::large_page_size() {
   return _large_page_size;
 }
 
-// HugeTLBFS allows application to commit large page memory on demand;
-// with SysV SHM the entire memory region must be allocated as shared
+// With SysV SHM the entire memory region must be allocated as shared
 // memory.
+// HugeTLBFS allows application to commit large page memory on demand.
+// However, when committing memory with HugeTLBFS fails, the region
+// that was supposed to be committed will lose the old reservation
+// and allow other threads to steal that memory region. Because of this
+// behavior we can't commit HugeTLBFS memory.
 bool os::can_commit_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages;
 }
 
 bool os::can_execute_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages || UseHugeTLBFS;
 }
 
 // Reserve memory at an arbitrary address, only if that area is
@@ -4521,21 +4725,23 @@
         UseNUMA = false;
       }
     }
-    // With SHM large pages we cannot uncommit a page, so there's not way
+    // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
     // we can make the adaptive lgrp chunk resizing work. If the user specified
-    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
+    // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
     // disable adaptive resizing.
-    if (UseNUMA && UseLargePages && UseSHM) {
-      if (!FLAG_IS_DEFAULT(UseNUMA)) {
-        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
+    if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
+      if (FLAG_IS_DEFAULT(UseNUMA)) {
+        UseNUMA = false;
+      } else {
+        if (FLAG_IS_DEFAULT(UseLargePages) &&
+            FLAG_IS_DEFAULT(UseSHM) &&
+            FLAG_IS_DEFAULT(UseHugeTLBFS)) {
           UseLargePages = false;
         } else {
-          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
+          warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
           UseAdaptiveSizePolicy = false;
           UseAdaptiveNUMAChunkSizing = false;
         }
-      } else {
-        UseNUMA = false;
       }
     }
     if (!UseNUMA && ForceNUMA) {
@@ -5805,3 +6011,149 @@
 }
 
 #endif // JAVASE_EMBEDDED
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReserveMemorySpecial : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    size_t lp = os::large_page_size();
+
+    for (size_t size = lp; size <= lp * 10; size += lp) {
+      test_reserve_memory_special_huge_tlbfs_only(size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
+    if (!UseHugeTLBFS) {
+        return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
+        size, alignment);
+
+    assert(size >= os::large_page_size(), "Incorrect input to test");
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+      test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_reserve_memory_special_huge_tlbfs_only();
+    test_reserve_memory_special_huge_tlbfs_mixed();
+  }
+
+  static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
+    if (!UseSHM) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
+
+    char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      assert(is_ptr_aligned(addr, alignment), "Check");
+      assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
+
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_shm(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_shm() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t size = ag; size < lp * 3; size += ag) {
+      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+        test_reserve_memory_special_shm(size, alignment);
+      }
+    }
+  }
+
+  static void test() {
+    test_reserve_memory_special_huge_tlbfs();
+    test_reserve_memory_special_shm();
+  }
+};
+
+void TestReserveMemorySpecial_test() {
+  TestReserveMemorySpecial::test();
+}
+
+#endif
--- a/src/os/linux/vm/os_linux.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/linux/vm/os_linux.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -32,6 +32,7 @@
 
 class Linux {
   friend class os;
+  friend class TestReserveMemorySpecial;
 
   // For signal-chaining
 #define MAXSIGNUM 32
@@ -92,8 +93,21 @@
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
 
+  static size_t find_large_page_size();
+  static size_t setup_large_page_size();
+
+  static bool setup_large_page_type(size_t page_size);
+  static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
   static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
 
+  static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
+
+  static bool release_memory_special_shm(char* base, size_t bytes);
+  static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
+
   static void print_full_memory_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
--- a/src/os/posix/vm/os_posix.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/posix/vm/os_posix.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -165,7 +165,7 @@
   else st->print("%uk", rlim.rlim_cur >> 10);
 
   // Isn't there on solaris
-#if! defined(TARGET_OS_FAMILY_solaris) && !defined(TARGET_OS_FAMILY_aix)
+#if !defined(TARGET_OS_FAMILY_solaris) && !defined(TARGET_OS_FAMILY_aix)
   st->print(", NPROC ");
   getrlimit(RLIMIT_NPROC, &rlim);
   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
@@ -268,6 +268,54 @@
   return ::fdopen(fd, mode);
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
+
+// Builds a platform dependent Agent_OnLoad_<lib_name> function name
+// which is used to find statically linked in agents.
+// Parameters:
+//            sym_name: Symbol in library we are looking for
+//            lib_name: Name of library to look in, NULL for shared libs.
+//            is_absolute_path == true if lib_name is absolute path to agent
+//                                     such as "/a/b/libL.so"
+//            == false if only the base name of the library is passed in
+//               such as "L"
+char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
+                                    bool is_absolute_path) {
+  char *agent_entry_name;
+  size_t len;
+  size_t name_len;
+  size_t prefix_len = strlen(JNI_LIB_PREFIX);
+  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
+  const char *start;
+
+  if (lib_name != NULL) {
+    len = name_len = strlen(lib_name);
+    if (is_absolute_path) {
+      // Need to strip path, prefix and suffix
+      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
+        lib_name = ++start;
+      }
+      if (len <= (prefix_len + suffix_len)) {
+        return NULL;
+      }
+      lib_name += prefix_len;
+      name_len = strlen(lib_name) - suffix_len;
+    }
+  }
+  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
+  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
+  if (agent_entry_name == NULL) {
+    return NULL;
+  }
+  strcpy(agent_entry_name, sym_name);
+  if (lib_name != NULL) {
+    strcat(agent_entry_name, "_");
+    strncat(agent_entry_name, lib_name, name_len);
+  }
+  return agent_entry_name;
+}
 
 // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
 const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -3341,7 +3341,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
@@ -6557,3 +6557,9 @@
 
   return strlen(buffer);
 }
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/os/windows/vm/os_windows.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1642,6 +1642,8 @@
 
 void os::win32::print_windows_version(outputStream* st) {
   OSVERSIONINFOEX osvi;
+  SYSTEM_INFO si;
+
   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
 
@@ -1651,6 +1653,18 @@
   }
 
   int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
+
+  ZeroMemory(&si, sizeof(SYSTEM_INFO));
+  if (os_vers >= 5002) {
+    // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
+    // find out whether we are running on 64 bit processor or not.
+    if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
+      os::Kernel32Dll::GetNativeSystemInfo(&si);
+    } else {
+      GetSystemInfo(&si);
+    }
+  }
+
   if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
     switch (os_vers) {
     case 3051: st->print(" Windows NT 3.51"); break;
@@ -1658,57 +1672,48 @@
     case 5000: st->print(" Windows 2000"); break;
     case 5001: st->print(" Windows XP"); break;
     case 5002:
-    case 6000:
-    case 6001:
-    case 6002: {
-      // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
-      // find out whether we are running on 64 bit processor or not.
-      SYSTEM_INFO si;
-      ZeroMemory(&si, sizeof(SYSTEM_INFO));
-        if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){
-          GetSystemInfo(&si);
+      if (osvi.wProductType == VER_NT_WORKSTATION &&
+          si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+        st->print(" Windows XP x64 Edition");
       } else {
-        os::Kernel32Dll::GetNativeSystemInfo(&si);
-      }
-      if (os_vers == 5002) {
-        if (osvi.wProductType == VER_NT_WORKSTATION &&
-            si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-          st->print(" Windows XP x64 Edition");
-        else
-            st->print(" Windows Server 2003 family");
-      } else if (os_vers == 6000) {
-        if (osvi.wProductType == VER_NT_WORKSTATION)
-            st->print(" Windows Vista");
-        else
-            st->print(" Windows Server 2008");
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
-      } else if (os_vers == 6001) {
-        if (osvi.wProductType == VER_NT_WORKSTATION) {
-            st->print(" Windows 7");
-        } else {
-            // Unrecognized windows, print out its major and minor versions
-            st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
-        }
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
-      } else if (os_vers == 6002) {
-        if (osvi.wProductType == VER_NT_WORKSTATION) {
-            st->print(" Windows 8");
-        } else {
-            st->print(" Windows Server 2012");
-        }
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
-      } else { // future os
-        // Unrecognized windows, print out its major and minor versions
-        st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
+        st->print(" Windows Server 2003 family");
       }
       break;
-    }
-    default: // future windows, print out its major and minor versions
+
+    case 6000:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows Vista");
+      } else {
+        st->print(" Windows Server 2008");
+      }
+      break;
+
+    case 6001:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows 7");
+      } else {
+        st->print(" Windows Server 2008 R2");
+      }
+      break;
+
+    case 6002:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows 8");
+      } else {
+        st->print(" Windows Server 2012");
+      }
+      break;
+
+    case 6003:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows 8.1");
+      } else {
+        st->print(" Windows Server 2012 R2");
+      }
+      break;
+
+    default: // future os
+      // Unrecognized windows, print out its major and minor versions
       st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
     }
   } else {
@@ -1720,6 +1725,11 @@
       st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
     }
   }
+
+  if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+    st->print(" , 64 bit");
+  }
+
   st->print(" Build %d", osvi.dwBuildNumber);
   st->print(" %s", osvi.szCSDVersion);           // service pack
   st->cr();
@@ -3146,7 +3156,12 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -5384,6 +5399,75 @@
   return true;
 }
 
+void* os::get_default_process_handle() {
+  return (void*)GetModuleHandle(NULL);
+}
+
+// Builds a platform dependent Agent_OnLoad_<lib_name> function name
+// which is used to find statically linked in agents.
+// Additionally for windows, takes into account __stdcall names.
+// Parameters:
+//            sym_name: Symbol in library we are looking for
+//            lib_name: Name of library to look in, NULL for shared libs.
+//            is_absolute_path == true if lib_name is absolute path to agent
+//                                     such as "C:/a/b/L.dll"
+//            == false if only the base name of the library is passed in
+//               such as "L"
+char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
+                                    bool is_absolute_path) {
+  char *agent_entry_name;
+  size_t len;
+  size_t name_len;
+  size_t prefix_len = strlen(JNI_LIB_PREFIX);
+  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
+  const char *start;
+
+  if (lib_name != NULL) {
+    len = name_len = strlen(lib_name);
+    if (is_absolute_path) {
+      // Need to strip path, prefix and suffix
+      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
+        lib_name = ++start;
+      } else {
+        // Need to check for C:
+        if ((start = strchr(lib_name, ':')) != NULL) {
+          lib_name = ++start;
+        }
+      }
+      if (len <= (prefix_len + suffix_len)) {
+        return NULL;
+      }
+      lib_name += prefix_len;
+      name_len = strlen(lib_name) - suffix_len;
+    }
+  }
+  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
+  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
+  if (agent_entry_name == NULL) {
+    return NULL;
+  }
+  if (lib_name != NULL) {
+    const char *p = strrchr(sym_name, '@');
+    if (p != NULL && p != sym_name) {
+      // sym_name == _Agent_OnLoad@XX
+      strncpy(agent_entry_name, sym_name, (p - sym_name));
+      agent_entry_name[(p-sym_name)] = '\0';
+      // agent_entry_name == _Agent_OnLoad
+      strcat(agent_entry_name, "_");
+      strncat(agent_entry_name, lib_name, name_len);
+      strcat(agent_entry_name, p);
+      // agent_entry_name == _Agent_OnLoad_lib_name@XX
+    } else {
+      strcpy(agent_entry_name, sym_name);
+      strcat(agent_entry_name, "_");
+      strncat(agent_entry_name, lib_name, name_len);
+    }
+  } else {
+    strcpy(agent_entry_name, sym_name);
+  }
+  return agent_entry_name;
+}
+
 #else
 // Kernel32 API
 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
@@ -5628,3 +5712,9 @@
 }
 
 #endif
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Bsd Architecture Description File
-
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-//
-// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Bsd Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
-// This block specifies the encoding classes used by the compiler to
-// output byte streams.  Encoding classes generate functions which are
-// called by Machine Instruction Nodes in order to generate the bit
-// encoding of the instruction.  Operands specify their base encoding
-// interface with the interface keyword.  There are currently
-// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
-// COND_INTER.  REG_INTER causes an operand to generate a function
-// which returns its register number when queried.  CONST_INTER causes
-// an operand to generate a function which returns the value of the
-// constant when queried.  MEMORY_INTER causes an operand to generate
-// four functions which return the Base Register, the Index Register,
-// the Scale Value, and the Offset Value of the operand when queried.
-// COND_INTER causes an operand to generate six functions which return
-// the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional
-// instruction.  Instructions specify two basic values for encoding.
-// They use the ins_encode keyword to specify their encoding class
-// (which must be one of the class names specified in the encoding
-// block), and they use the opcode keyword to specify, in order, their
-// primary, secondary, and tertiary opcode.  Only the opcode sections
-// which a particular instruction needs for encoding need to be
-// specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-%}
-
-
-// Platform dependent source
-
-source %{
-
-%}
--- a/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -190,7 +190,7 @@
 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
 
 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jdouble_cast(v)); }
+inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
 
 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
 #ifdef AMD64
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -715,6 +715,7 @@
   err.report_and_die();
 
   ShouldNotReachHere();
+  return false;
 }
 
 // From solaris_i486.s ported to bsd_i486.s
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -66,6 +66,7 @@
 
 frame os::get_sender_for_C_frame(frame* fr) {
   ShouldNotCallThis();
+  return frame();
 }
 
 frame os::current_frame() {
@@ -103,16 +104,19 @@
 
 address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
   ShouldNotCallThis();
+  return NULL;
 }
 
 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
                                         intptr_t** ret_sp,
                                         intptr_t** ret_fp) {
   ShouldNotCallThis();
+  return ExtendedPC();
 }
 
 frame os::fetch_frame_from_context(void* ucVoid) {
   ShouldNotCallThis();
+  return frame();
 }
 
 extern "C" JNIEXPORT int
@@ -240,6 +244,7 @@
 
   sprintf(buf, fmt, sig, info->si_addr);
   fatal(buf);
+  return false;
 }
 
 void os::Bsd::init_thread_fpu_state(void) {
@@ -373,17 +378,7 @@
 
 extern "C" {
   int SpinPause() {
-  }
-
-  int SafeFetch32(int *adr, int errValue) {
-    int value = errValue;
-    value = *adr;
-    return value;
-  }
-  intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
-    intptr_t value = errValue;
-    value = *adr;
-    return value;
+    return 1;
   }
 
   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
--- a/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -110,6 +110,7 @@
                                            void* ucontext,
                                            bool isInJava) {
     ShouldNotCallThis();
+    return false;
   }
 
   // These routines are only used on cpu architectures that
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Linux Architecture Description File
-
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-//
-// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Linux Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
-// This block specifies the encoding classes used by the compiler to
-// output byte streams.  Encoding classes generate functions which are
-// called by Machine Instruction Nodes in order to generate the bit
-// encoding of the instruction.  Operands specify their base encoding
-// interface with the interface keyword.  There are currently
-// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
-// COND_INTER.  REG_INTER causes an operand to generate a function
-// which returns its register number when queried.  CONST_INTER causes
-// an operand to generate a function which returns the value of the
-// constant when queried.  MEMORY_INTER causes an operand to generate
-// four functions which return the Base Register, the Index Register,
-// the Scale Value, and the Offset Value of the operand when queried.
-// COND_INTER causes an operand to generate six functions which return
-// the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional
-// instruction.  Instructions specify two basic values for encoding.
-// They use the ins_encode keyword to specify their encoding class
-// (which must be one of the class names specified in the encoding
-// block), and they use the opcode keyword to specify, in order, their
-// primary, secondary, and tertiary opcode.  Only the opcode sections
-// which a particular instruction needs for encoding need to be
-// specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-%}
-
-
-// Platform dependent source
-
-source %{
-
-%}
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -410,16 +410,6 @@
   int SpinPause() {
   }
 
-  int SafeFetch32(int *adr, int errValue) {
-    int value = errValue;
-    value = *adr;
-    return value;
-  }
-  intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
-    intptr_t value = errValue;
-    value = *adr;
-    return value;
-  }
 
   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
     if (from > to) {
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-//
-// Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-//
-//
-
-// SPARC Solaris Architecture Description File
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Solaris Architecture Description File
-
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-//
-// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Solaris Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
-// This block specifies the encoding classes used by the compiler to
-// output byte streams.  Encoding classes generate functions which are
-// called by Machine Instruction Nodes in order to generate the bit
-// encoding of the instruction.  Operands specify their base encoding
-// interface with the interface keyword.  There are currently
-// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
-// COND_INTER.  REG_INTER causes an operand to generate a function
-// which returns its register number when queried.  CONST_INTER causes
-// an operand to generate a function which returns the value of the
-// constant when queried.  MEMORY_INTER causes an operand to generate
-// four functions which return the Base Register, the Index Register,
-// the Scale Value, and the Offset Value of the operand when queried.
-// COND_INTER causes an operand to generate six functions which return
-// the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional
-// instruction.  Instructions specify two basic values for encoding.
-// They use the ins_encode keyword to specify their encoding class
-// (which must be one of the class names specified in the encoding
-// block), and they use the opcode keyword to specify, in order, their
-// primary, secondary, and tertiary opcode.  Only the opcode sections
-// which a particular instruction needs for encoding need to be
-// specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-%}
-
-
-// Platform dependent source
-
-source %{
-%}
--- a/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Win32 Architecture Description File
-
--- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Thu Aug 22 09:39:54 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-//
-// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Win32 Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-%}
-
-
-// Platform dependent source
-
-source %{
-
-%}
--- a/src/share/tools/ProjectCreator/BuildConfig.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/BuildConfig.java	Thu Sep 05 11:04:39 2013 -0700
@@ -142,6 +142,69 @@
         return rv;
     }
 
+    // Returns true if the specified path refers to a relative alternate
+    // source file. RelativeAltSrcInclude is usually "src\closed".
+    public static boolean matchesRelativeAltSrcInclude(String path) {
+        String relativeAltSrcInclude =
+            getFieldString(null, "RelativeAltSrcInclude");
+        Vector<String> v = getFieldVector(null, "AltRelativeInclude");
+        for (String pathPart : v) {
+            if (path.contains(relativeAltSrcInclude + Util.sep + pathPart))  {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    // Returns the relative alternate source file for the specified path.
+    // Null is returned if the specified path does not have a matching
+    // alternate source file.
+    public static String getMatchingRelativeAltSrcFile(String path) {
+        Vector<String> v = getFieldVector(null, "RelativeAltSrcFileList");
+        if (v == null) {
+            return null;
+        }
+        for (String pathPart : v) {
+            if (path.endsWith(pathPart)) {
+                String relativeAltSrcInclude =
+                    getFieldString(null, "RelativeAltSrcInclude");
+                return relativeAltSrcInclude + Util.sep + pathPart;
+            }
+        }
+        return null;
+    }
+
+    // Returns true if the specified path has a matching alternate
+    // source file.
+    public static boolean matchesRelativeAltSrcFile(String path) {
+        return getMatchingRelativeAltSrcFile(path) != null;
+    }
+
+    // Track the specified alternate source file. The source file is
+    // tracked without the leading .*<sep><RelativeAltSrcFileList><sep>
+    // part to make matching regular source files easier.
+    public static void trackRelativeAltSrcFile(String path) {
+        String pattern = getFieldString(null, "RelativeAltSrcInclude") +
+            Util.sep;
+        int altSrcInd = path.indexOf(pattern);
+        if (altSrcInd == -1) {
+            // not an AltSrc path
+            return;
+        }
+
+        altSrcInd += pattern.length();
+        if (altSrcInd >= path.length()) {
+            // not a valid AltSrc path
+            return;
+        }
+
+        String altSrcFile = path.substring(altSrcInd);
+        Vector v = getFieldVector(null, "RelativeAltSrcFileList");
+        if (v == null || !v.contains(altSrcFile)) {
+            addFieldVector(null, "RelativeAltSrcFileList", altSrcFile);
+        }
+    }
+
     void addTo(Hashtable ht, String key, String value) {
         ht.put(expandFormat(key), expandFormat(value));
     }
@@ -272,8 +335,19 @@
 
     private Vector getSourceIncludes() {
         Vector<String> rv = new Vector<String>();
+        String sourceBase = getFieldString(null, "SourceBase");
+
+        // add relative alternate source include values:
+        String relativeAltSrcInclude =
+            getFieldString(null, "RelativeAltSrcInclude");
+        Vector<String> asri = new Vector<String>();
+        collectRelevantVectors(asri, "AltRelativeInclude");
+        for (String f : asri) {
+            rv.add(sourceBase + Util.sep + relativeAltSrcInclude +
+                   Util.sep + f);
+        }
+
         Vector<String> ri = new Vector<String>();
-        String sourceBase = getFieldString(null, "SourceBase");
         collectRelevantVectors(ri, "RelativeInclude");
         for (String f : ri) {
             rv.add(sourceBase + Util.sep + f);
@@ -541,35 +615,6 @@
     }
 }
 
-class CoreDebugConfig extends GenericDebugNonKernelConfig {
-    String getOptFlag() {
-        return getCI().getNoOptFlag();
-    }
-
-    CoreDebugConfig() {
-        initNames("core", "debug", "jvm.dll");
-        init(getIncludes(), getDefines());
-    }
-}
-
-class CoreFastDebugConfig extends GenericDebugNonKernelConfig {
-    String getOptFlag() {
-        return getCI().getOptFlag();
-    }
-
-    CoreFastDebugConfig() {
-        initNames("core", "fastdebug", "jvm.dll");
-        init(getIncludes(), getDefines());
-    }
-}
-
-class CoreProductConfig extends ProductConfig {
-    CoreProductConfig() {
-        initNames("core", "product", "jvm.dll");
-        init(getIncludes(), getDefines());
-    }
-}
-
 
 abstract class CompilerInterface {
     abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);
--- a/src/share/tools/ProjectCreator/FileTreeCreator.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/FileTreeCreator.java	Thu Sep 05 11:04:39 2013 -0700
@@ -12,11 +12,15 @@
    final int startDirLength;
    Stack<DirAttributes> attributes = new Stack<DirAttributes>();
    Vector<BuildConfig> allConfigs;
-   WinGammaPlatformVC10 wg;
+   WinGammaPlatform wg;
+   WinGammaPlatformVC10 wg10;
 
-   public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatformVC10 wg) {
+   public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
       super();
       this.wg = wg;
+      if (wg instanceof WinGammaPlatformVC10) {
+          wg10 = (WinGammaPlatformVC10)wg;
+      }
       this.allConfigs = allConfigs;
       this.startDir = startDir;
       startDirLength = startDir.toAbsolutePath().toString().length();
--- a/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java	Thu Sep 05 11:04:39 2013 -0700
@@ -1,3 +1,27 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
 import static java.nio.file.FileVisitResult.CONTINUE;
 
 import java.io.IOException;
@@ -21,6 +45,8 @@
          boolean usePch = false;
          boolean disablePch = false;
          boolean useIgnore = false;
+         boolean isAltSrc = false;  // only needed as a debugging crumb
+         boolean isReplacedByAltSrc = false;
          String fileName = file.getFileName().toString();
 
          // TODO hideFile
@@ -30,6 +56,26 @@
             usePch = true;
          }
 
+         String fileLoc = vcProjLocation.relativize(file).toString();
+
+         // isAltSrc and isReplacedByAltSrc applies to all configs for a file
+         if (BuildConfig.matchesRelativeAltSrcInclude(
+               file.toAbsolutePath().toString())) {
+            // current file is an alternate source file so track it
+            isAltSrc = true;
+            BuildConfig.trackRelativeAltSrcFile(
+                file.toAbsolutePath().toString());
+         } else if (BuildConfig.matchesRelativeAltSrcFile(
+                    file.toAbsolutePath().toString())) {
+            // current file is a regular file that matches an alternate
+            // source file so yack about replacing the regular file
+            isReplacedByAltSrc = true;
+            System.out.println("INFO: alternate source file '" +
+                               BuildConfig.getMatchingRelativeAltSrcFile(
+                                   file.toAbsolutePath().toString()) +
+                               "' replaces '" + fileLoc + "'");
+         }
+
          for (BuildConfig cfg : allConfigs) {
             if (cfg.lookupHashFieldInContext("IgnoreFile", fileName) != null) {
                useIgnore = true;
@@ -57,10 +103,9 @@
             }
          }
 
-         String tagName = wg.getFileTagFromSuffix(fileName);
-         String fileLoc = vcProjLocation.relativize(file).toString();
+         String tagName = wg10.getFileTagFromSuffix(fileName);
 
-         if (!useIgnore && !disablePch && !usePch) {
+         if (!useIgnore && !disablePch && !usePch && !isReplacedByAltSrc) {
             wg.tag(tagName, new String[] { "Include", fileLoc});
          } else {
             wg.startTag(
@@ -78,12 +123,17 @@
                if (disablePch) {
                   wg.tag("PrecompiledHeader", "Condition", "'$(Configuration)|$(Platform)'=='" + cfg.get("Name") + "'");
                }
+               if (isReplacedByAltSrc) {
+                  wg.tagData("ExcludedFromBuild", "true", "Condition",
+                             "'$(Configuration)|$(Platform)'=='" +
+                             cfg.get("Name") + "'");
+               }
             }
             wg.endTag();
          }
 
          String filter = startDir.relativize(file.getParent().toAbsolutePath()).toString();
-         wg.addFilterDependency(fileLoc, filter);
+         wg10.addFilterDependency(fileLoc, filter);
 
          return CONTINUE;
       }
@@ -112,7 +162,7 @@
          if (!hide) {
             String name = startDir.relativize(path.toAbsolutePath()).toString();
             if (!"".equals(name)) {
-               wg.addFilter(name);
+               wg10.addFilter(name);
             }
 
             attributes.push(newAttr);
@@ -137,6 +187,4 @@
       public void writeFileTree() throws IOException {
          Files.walkFileTree(this.startDir, this);
       }
-
-
-   }
\ No newline at end of file
+}
--- a/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java	Thu Sep 05 11:04:39 2013 -0700
@@ -12,7 +12,7 @@
 public class FileTreeCreatorVC7 extends FileTreeCreator {
 
       public FileTreeCreatorVC7(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
-         super(startDir, allConfigs, null);
+         super(startDir, allConfigs, wg);
       }
 
       @Override
--- a/src/share/tools/ProjectCreator/ProjectCreator.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/ProjectCreator.java	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,15 @@
             + "jvm.dll; no trailing slash>");
       System.err.println("  If any of the above are specified, "
             + "they must all be.");
+      System.err.println("  Note: if '-altRelativeInclude' option below is "
+            + "used, then the '-relativeAltSrcInclude' option must be used "
+            + "to specify the alternate source dir, e.g., 'src\\closed'");
       System.err.println("  Additional, optional arguments, which can be "
             + "specified multiple times:");
       System.err.println("    -absoluteInclude <string containing absolute "
             + "path to include directory>");
+      System.err.println("    -altRelativeInclude <string containing "
+            + "alternate include directory relative to -envVar>");
       System.err.println("    -relativeInclude <string containing include "
             + "directory relative to -envVar>");
       System.err.println("    -define <preprocessor flag to be #defined "
--- a/src/share/tools/ProjectCreator/WinGammaPlatform.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/WinGammaPlatform.java	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,10 +140,17 @@
                            "already exist>");
         System.err.println("  If any of the above are specified, "+
                            "they must all be.");
+        System.err.println("  Note: if '-altRelativeInclude' option below " +
+                           "is used, then the '-relativeAltSrcInclude' " +
+                           "option must be used to specify the alternate " +
+                           "source dir, e.g., 'src\\closed'");
         System.err.println("  Additional, optional arguments, which can be " +
                            "specified multiple times:");
         System.err.println("    -absoluteInclude <string containing absolute " +
                            "path to include directory>");
+        System.err.println("    -altRelativeInclude <string containing " +
+                           "alternate include directory relative to " +
+                           "-sourceBase>");
         System.err.println("    -relativeInclude <string containing include " +
                            "directory relative to -sourceBase>");
         System.err.println("    -define <preprocessor flag to be #defined " +
@@ -343,6 +350,12 @@
                               HsArgHandler.VECTOR
                               ),
 
+                new HsArgRule("-altRelativeInclude",
+                              "AltRelativeInclude",
+                              null,
+                              HsArgHandler.VECTOR
+                              ),
+
                 new HsArgRule("-relativeInclude",
                               "RelativeInclude",
                               null,
@@ -355,6 +368,12 @@
                               HsArgHandler.VECTOR
                               ),
 
+                new HsArgRule("-relativeAltSrcInclude",
+                              "RelativeAltSrcInclude",
+                              null,
+                              HsArgHandler.STRING
+                              ),
+
                 new HsArgRule("-relativeSrcInclude",
                               "RelativeSrcInclude",
                               null,
@@ -560,10 +579,6 @@
         allConfigs.add(new TieredFastDebugConfig());
         allConfigs.add(new TieredProductConfig());
 
-        allConfigs.add(new CoreDebugConfig());
-        allConfigs.add(new CoreFastDebugConfig());
-        allConfigs.add(new CoreProductConfig());
-
         return allConfigs;
     }
 
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Thu Sep 05 11:04:39 2013 -0700
@@ -1,3 +1,27 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -24,7 +48,7 @@
     public void writeProjectFile(String projectFileName, String projectName,
             Vector<BuildConfig> allConfigs) throws IOException {
         System.out.println();
-        System.out.print("    Writing .vcxproj file: " + projectFileName);
+        System.out.println("    Writing .vcxproj file: " + projectFileName);
 
         String projDir = Util.normalize(new File(projectFileName).getParent());
 
@@ -114,7 +138,7 @@
 
         endTag();
         printWriter.close();
-        System.out.println("    Done.");
+        System.out.println("    Done writing .vcxproj file.");
 
         writeFilterFile(projectFileName, projectName, allConfigs, projDir);
         writeUserFile(projectFileName, allConfigs);
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Thu Sep 05 11:04:39 2013 -0700
@@ -139,19 +139,22 @@
 
       tagV("Tool", cfg.getV("LinkerFlags"));
 
-      tag("Tool",
-            new String[] {
-            "Name",
-            "VCPostBuildEventTool",
-            "Description",
-            BuildConfig
-            .getFieldString(null, "PostbuildDescription"),
-            // Caution: String.replace(String,String) is available
-            // from JDK5 onwards only
-            "CommandLine",
-            cfg.expandFormat(BuildConfig.getFieldString(null,
-                  "PostbuildCommand").replace("\t",
-                        "&#x0D;&#x0A;")) });
+      String postBuildCmd = BuildConfig.getFieldString(null,
+            "PostbuildCommand");
+      if (postBuildCmd != null) {
+         tag("Tool",
+               new String[] {
+               "Name",
+               "VCPostBuildEventTool",
+               "Description",
+               BuildConfig
+               .getFieldString(null, "PostbuildDescription"),
+               // Caution: String.replace(String,String) is available
+               // from JDK5 onwards only
+               "CommandLine",
+                   cfg.expandFormat(postBuildCmd.replace("\t",
+                           "&#x0D;&#x0A;")) });
+      }
 
       tag("Tool", new String[] { "Name", "VCPreBuildEventTool" });
 
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -915,16 +915,6 @@
     // Return to the now deoptimized frame.
   }
 
-  // If we are patching in a non-perm oop, make sure the nmethod
-  // is on the right list.
-  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
-    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
-    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
-    if (!nm->on_scavenge_root_list())
-      CodeCache::add_scavenge_root_nmethod(nm);
-  }
-
   // Now copy code back
 
   {
@@ -1125,6 +1115,21 @@
       }
     }
   }
+
+  // If we are patching in a non-perm oop, make sure the nmethod
+  // is on the right list.
+  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
+    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
+    if (!nm->on_scavenge_root_list()) {
+      CodeCache::add_scavenge_root_nmethod(nm);
+    }
+
+    // Since we've patched some oops in the nmethod,
+    // (re)register it with the heap.
+    Universe::heap()->register_nmethod(nm);
+  }
 JRT_END
 
 //
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -2590,7 +2590,7 @@
     valid_symbol_at(sourcefile_index),
     "Invalid SourceFile attribute at constant pool index %u in class file %s",
     sourcefile_index, CHECK);
-  set_class_sourcefile(_cp->symbol_at(sourcefile_index));
+  set_class_sourcefile_index(sourcefile_index);
 }
 
 
@@ -2728,7 +2728,7 @@
     valid_symbol_at(signature_index),
     "Invalid constant pool index %u in Signature attribute in class file %s",
     signature_index, CHECK);
-  set_class_generic_signature(_cp->symbol_at(signature_index));
+  set_class_generic_signature_index(signature_index);
 }
 
 void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
@@ -2975,13 +2975,11 @@
 void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
   if (_synthetic_flag)
     k->set_is_synthetic();
-  if (_sourcefile != NULL) {
-    _sourcefile->increment_refcount();
-    k->set_source_file_name(_sourcefile);
+  if (_sourcefile_index != 0) {
+    k->set_source_file_name_index(_sourcefile_index);
   }
-  if (_generic_signature != NULL) {
-    _generic_signature->increment_refcount();
-    k->set_generic_signature(_generic_signature);
+  if (_generic_signature_index != 0) {
+    k->set_generic_signature_index(_generic_signature_index);
   }
   if (_sde_buffer != NULL) {
     k->set_source_debug_extension(_sde_buffer, _sde_length);
--- a/src/share/vm/classfile/classFileParser.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/classfile/classFileParser.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -62,8 +62,8 @@
   bool       _synthetic_flag;
   int        _sde_length;
   char*      _sde_buffer;
-  Symbol*    _sourcefile;
-  Symbol*    _generic_signature;
+  u2         _sourcefile_index;
+  u2         _generic_signature_index;
 
   // Metadata created before the instance klass is created.  Must be deallocated
   // if not transferred to the InstanceKlass upon successful class loading
@@ -81,16 +81,16 @@
   Array<AnnotationArray*>* _fields_type_annotations;
   InstanceKlass*   _klass;  // InstanceKlass once created.
 
-  void set_class_synthetic_flag(bool x)           { _synthetic_flag = x; }
-  void set_class_sourcefile(Symbol* x)            { _sourcefile = x; }
-  void set_class_generic_signature(Symbol* x)     { _generic_signature = x; }
-  void set_class_sde_buffer(char* x, int len)     { _sde_buffer = x; _sde_length = len; }
+  void set_class_synthetic_flag(bool x)        { _synthetic_flag = x; }
+  void set_class_sourcefile_index(u2 x)        { _sourcefile_index = x; }
+  void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
+  void set_class_sde_buffer(char* x, int len)  { _sde_buffer = x; _sde_length = len; }
 
   void init_parsed_class_attributes(ClassLoaderData* loader_data) {
     _loader_data = loader_data;
     _synthetic_flag = false;
-    _sourcefile = NULL;
-    _generic_signature = NULL;
+    _sourcefile_index = 0;
+    _generic_signature_index = 0;
     _sde_buffer = NULL;
     _sde_length = 0;
     // initialize the other flags too:
--- a/src/share/vm/classfile/javaClasses.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -2557,6 +2557,26 @@
   *offset = value;
 }
 
+// Support for java_lang_invoke_DirectMethodHandle
+
+int java_lang_invoke_DirectMethodHandle::_member_offset;
+
+oop java_lang_invoke_DirectMethodHandle::member(oop dmh) {
+  oop member_name = NULL;
+  bool is_dmh = dmh->is_oop() && java_lang_invoke_DirectMethodHandle::is_instance(dmh);
+  assert(is_dmh, "a DirectMethodHandle oop is expected");
+  if (is_dmh) {
+    member_name = dmh->obj_field(member_offset_in_bytes());
+  }
+  return member_name;
+}
+
+void java_lang_invoke_DirectMethodHandle::compute_offsets() {
+  Klass* klass_oop = SystemDictionary::DirectMethodHandle_klass();
+  if (klass_oop != NULL && EnableInvokeDynamic) {
+    compute_offset(_member_offset, klass_oop, vmSymbols::member_name(), vmSymbols::java_lang_invoke_MemberName_signature());
+  }
+}
 
 // Support for java_lang_invoke_MethodHandle
 
@@ -3205,6 +3225,7 @@
   java_lang_ThreadGroup::compute_offsets();
   if (EnableInvokeDynamic) {
     java_lang_invoke_MethodHandle::compute_offsets();
+    java_lang_invoke_DirectMethodHandle::compute_offsets();
     java_lang_invoke_MemberName::compute_offsets();
     java_lang_invoke_LambdaForm::compute_offsets();
     java_lang_invoke_MethodType::compute_offsets();
--- a/src/share/vm/classfile/javaClasses.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -976,6 +976,32 @@
   static int form_offset_in_bytes()             { return _form_offset; }
 };
 
+// Interface to java.lang.invoke.DirectMethodHandle objects
+
+class java_lang_invoke_DirectMethodHandle: AllStatic {
+  friend class JavaClasses;
+
+ private:
+  static int _member_offset;               // the MemberName of this DMH
+
+  static void compute_offsets();
+
+ public:
+  // Accessors
+  static oop  member(oop mh);
+
+  // Testers
+  static bool is_subclass(Klass* klass) {
+    return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_klass());
+  }
+  static bool is_instance(oop obj) {
+    return obj != NULL && is_subclass(obj->klass());
+  }
+
+  // Accessors for code generation:
+  static int member_offset_in_bytes()           { return _member_offset; }
+};
+
 // Interface to java.lang.invoke.LambdaForm objects
 // (These are a private interface for managing adapter code generation.)
 
--- a/src/share/vm/classfile/systemDictionary.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -151,6 +151,7 @@
   do_klass(reflect_CallerSensitive_klass,               sun_reflect_CallerSensitive,               Opt                 ) \
                                                                                                                          \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */                                            \
+  do_klass(DirectMethodHandle_klass,                    java_lang_invoke_DirectMethodHandle,       Opt                 ) \
   do_klass(MethodHandle_klass,                          java_lang_invoke_MethodHandle,             Pre_JSR292          ) \
   do_klass(MemberName_klass,                            java_lang_invoke_MemberName,               Pre_JSR292          ) \
   do_klass(MethodHandleNatives_klass,                   java_lang_invoke_MethodHandleNatives,      Pre_JSR292          ) \
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -255,6 +255,7 @@
   /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */                                   \
   template(java_lang_invoke_CallSite,                 "java/lang/invoke/CallSite")                \
   template(java_lang_invoke_ConstantCallSite,         "java/lang/invoke/ConstantCallSite")        \
+  template(java_lang_invoke_DirectMethodHandle,       "java/lang/invoke/DirectMethodHandle")      \
   template(java_lang_invoke_MutableCallSite,          "java/lang/invoke/MutableCallSite")         \
   template(java_lang_invoke_VolatileCallSite,         "java/lang/invoke/VolatileCallSite")        \
   template(java_lang_invoke_MethodHandle,             "java/lang/invoke/MethodHandle")            \
@@ -352,6 +353,7 @@
   template(thread_id_name,                            "tid")                                      \
   template(newInstance0_name,                         "newInstance0")                             \
   template(limit_name,                                "limit")                                    \
+  template(member_name,                               "member")                                   \
   template(forName_name,                              "forName")                                  \
   template(forName0_name,                             "forName0")                                 \
   template(isJavaIdentifierStart_name,                "isJavaIdentifierStart")                    \
--- a/src/share/vm/code/nmethod.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/code/nmethod.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -687,6 +687,7 @@
     code_buffer->copy_values_to(this);
     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
       CodeCache::add_scavenge_root_nmethod(this);
+      Universe::heap()->register_nmethod(this);
     }
     debug_only(verify_scavenge_root_oops());
     CodeCache::commit(this);
@@ -881,6 +882,7 @@
     dependencies->copy_to(this);
     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
       CodeCache::add_scavenge_root_nmethod(this);
+      Universe::heap()->register_nmethod(this);
     }
     debug_only(verify_scavenge_root_oops());
 
@@ -1300,6 +1302,13 @@
   methodHandle the_method(method());
   No_Safepoint_Verifier nsv;
 
+  // during patching, depending on the nmethod state we must notify the GC that
+  // code has been unloaded, unregistering it. We cannot do this right while
+  // holding the Patching_lock because we need to use the CodeCache_lock. This
+  // would be prone to deadlocks.
+  // This flag is used to remember whether we need to later lock and unregister.
+  bool nmethod_needs_unregister = false;
+
   {
     // invalidate osr nmethod before acquiring the patching lock since
     // they both acquire leaf locks and we don't want a deadlock.
@@ -1332,6 +1341,13 @@
       inc_decompile_count();
     }
 
+    // If the state is becoming a zombie, signal to unregister the nmethod with
+    // the heap.
+    // This nmethod may have already been unloaded during a full GC.
+    if ((state == zombie) && !is_unloaded()) {
+      nmethod_needs_unregister = true;
+    }
+
     // Change state
     _state = state;
 
@@ -1367,6 +1383,9 @@
       // safepoint can sneak in, otherwise the oops used by the
       // dependency logic could have become stale.
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      if (nmethod_needs_unregister) {
+        Universe::heap()->unregister_nmethod(this);
+      }
       flush_dependencies(NULL);
     }
 
@@ -1817,21 +1836,10 @@
   if (_method != NULL) f(_method);
 }
 
-
-// This method is called twice during GC -- once while
-// tracing the "active" nmethods on thread stacks during
-// the (strong) marking phase, and then again when walking
-// the code cache contents during the weak roots processing
-// phase. The two uses are distinguished by means of the
-// 'do_strong_roots_only' flag, which is true in the first
-// case. We want to walk the weak roots in the nmethod
-// only in the second case. The weak roots in the nmethod
-// are the oops in the ExceptionCache and the InlineCache
-// oops.
-void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
+void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
   // make sure the oops ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
+  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
+  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
 
   // If the method is not entrant or zombie then a JMP is plastered over the
   // first few bytes.  If an oop in the old code was there, that oop
--- a/src/share/vm/code/nmethod.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/code/nmethod.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -566,7 +566,7 @@
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
                                      OopClosure* f);
   void oops_do(OopClosure* f) { oops_do(f, false); }
-  void oops_do(OopClosure* f, bool do_strong_roots_only);
+  void oops_do(OopClosure* f, bool allow_zombie);
   bool detect_scavenge_root_oops();
   void verify_scavenge_root_oops() PRODUCT_RETURN;
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -50,6 +50,7 @@
 #include "memory/genMarkSweep.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/iterator.hpp"
+#include "memory/padded.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/tenuredGeneration.hpp"
@@ -3459,7 +3460,9 @@
 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
   assert_locked_or_safepoint(Heap_lock);
   size_t size = ReservedSpace::page_align_size_down(bytes);
-  if (size > 0) {
+  // Only shrink if a compaction was done so that all the free space
+  // in the generation is in a contiguous block at the end.
+  if (size > 0 && did_compact()) {
     shrink_by(size);
   }
 }
@@ -5477,40 +5480,42 @@
   HandleMark   hm;
 
   SequentialSubTasksDone* pst = space->par_seq_tasks();
-  assert(pst->valid(), "Uninitialized use?");
 
   uint nth_task = 0;
   uint n_tasks  = pst->n_tasks();
 
-  HeapWord *start, *end;
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
-    // We claimed task # nth_task; compute its boundaries.
-    if (chunk_top == 0) {  // no samples were taken
-      assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
-      start = space->bottom();
-      end   = space->top();
-    } else if (nth_task == 0) {
-      start = space->bottom();
-      end   = chunk_array[nth_task];
-    } else if (nth_task < (uint)chunk_top) {
-      assert(nth_task >= 1, "Control point invariant");
-      start = chunk_array[nth_task - 1];
-      end   = chunk_array[nth_task];
-    } else {
-      assert(nth_task == (uint)chunk_top, "Control point invariant");
-      start = chunk_array[chunk_top - 1];
-      end   = space->top();
-    }
-    MemRegion mr(start, end);
-    // Verify that mr is in space
-    assert(mr.is_empty() || space->used_region().contains(mr),
-           "Should be in space");
-    // Verify that "start" is an object boundary
-    assert(mr.is_empty() || oop(mr.start())->is_oop(),
-           "Should be an oop");
-    space->par_oop_iterate(mr, cl);
-  }
-  pst->all_tasks_completed();
+  if (n_tasks > 0) {
+    assert(pst->valid(), "Uninitialized use?");
+    HeapWord *start, *end;
+    while (!pst->is_task_claimed(/* reference */ nth_task)) {
+      // We claimed task # nth_task; compute its boundaries.
+      if (chunk_top == 0) {  // no samples were taken
+        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
+        start = space->bottom();
+        end   = space->top();
+      } else if (nth_task == 0) {
+        start = space->bottom();
+        end   = chunk_array[nth_task];
+      } else if (nth_task < (uint)chunk_top) {
+        assert(nth_task >= 1, "Control point invariant");
+        start = chunk_array[nth_task - 1];
+        end   = chunk_array[nth_task];
+      } else {
+        assert(nth_task == (uint)chunk_top, "Control point invariant");
+        start = chunk_array[chunk_top - 1];
+        end   = space->top();
+      }
+      MemRegion mr(start, end);
+      // Verify that mr is in space
+      assert(mr.is_empty() || space->used_region().contains(mr),
+             "Should be in space");
+      // Verify that "start" is an object boundary
+      assert(mr.is_empty() || oop(mr.start())->is_oop(),
+             "Should be an oop");
+      space->par_oop_iterate(mr, cl);
+    }
+    pst->all_tasks_completed();
+  }
 }
 
 void
@@ -5787,7 +5792,7 @@
   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
 
   // Eden space
-  {
+  if (!dng->eden()->is_empty()) {
     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
     assert(!pst->valid(), "Clobbering existing data?");
     // Each valid entry in [0, _eden_chunk_index) represents a task.
@@ -8693,9 +8698,10 @@
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
   assert(_sp->used_region().contains(eob - 1),
-         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+         err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
+                 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
-                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+                 eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
   if (eob >= _limit) {
     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
     if (CMSTraceSweeper) {
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -4529,7 +4529,7 @@
     _total_prev_live_bytes(0), _total_next_live_bytes(0),
     _hum_used_bytes(0), _hum_capacity_bytes(0),
     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
-    _total_remset_bytes(0) {
+    _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   MemRegion g1_committed = g1h->g1_committed();
   MemRegion g1_reserved = g1h->g1_reserved();
@@ -4553,9 +4553,11 @@
                 G1PPRL_BYTE_H_FORMAT
                 G1PPRL_BYTE_H_FORMAT
                 G1PPRL_DOUBLE_H_FORMAT
+                G1PPRL_BYTE_H_FORMAT
                 G1PPRL_BYTE_H_FORMAT,
                 "type", "address-range",
-                "used", "prev-live", "next-live", "gc-eff", "remset");
+                "used", "prev-live", "next-live", "gc-eff",
+                "remset", "code-roots");
   _out->print_cr(G1PPRL_LINE_PREFIX
                 G1PPRL_TYPE_H_FORMAT
                 G1PPRL_ADDR_BASE_H_FORMAT
@@ -4563,9 +4565,11 @@
                 G1PPRL_BYTE_H_FORMAT
                 G1PPRL_BYTE_H_FORMAT
                 G1PPRL_DOUBLE_H_FORMAT
+                G1PPRL_BYTE_H_FORMAT
                 G1PPRL_BYTE_H_FORMAT,
                 "", "",
-                "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", "(bytes)");
+                "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
+                "(bytes)", "(bytes)");
 }
 
 // It takes as a parameter a reference to one of the _hum_* fields, it
@@ -4608,6 +4612,8 @@
   size_t next_live_bytes = r->next_live_bytes();
   double gc_eff          = r->gc_efficiency();
   size_t remset_bytes    = r->rem_set()->mem_size();
+  size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
+
   if (r->used() == 0) {
     type = "FREE";
   } else if (r->is_survivor()) {
@@ -4642,6 +4648,7 @@
   _total_prev_live_bytes += prev_live_bytes;
   _total_next_live_bytes += next_live_bytes;
   _total_remset_bytes    += remset_bytes;
+  _total_strong_code_roots_bytes += strong_code_roots_bytes;
 
   // Print a line for this particular region.
   _out->print_cr(G1PPRL_LINE_PREFIX
@@ -4651,9 +4658,11 @@
                  G1PPRL_BYTE_FORMAT
                  G1PPRL_BYTE_FORMAT
                  G1PPRL_DOUBLE_FORMAT
+                 G1PPRL_BYTE_FORMAT
                  G1PPRL_BYTE_FORMAT,
                  type, bottom, end,
-                 used_bytes, prev_live_bytes, next_live_bytes, gc_eff , remset_bytes);
+                 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
+                 remset_bytes, strong_code_roots_bytes);
 
   return false;
 }
@@ -4669,7 +4678,8 @@
                  G1PPRL_SUM_MB_PERC_FORMAT("used")
                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
-                 G1PPRL_SUM_MB_FORMAT("remset"),
+                 G1PPRL_SUM_MB_FORMAT("remset")
+                 G1PPRL_SUM_MB_FORMAT("code-roots"),
                  bytes_to_mb(_total_capacity_bytes),
                  bytes_to_mb(_total_used_bytes),
                  perc(_total_used_bytes, _total_capacity_bytes),
@@ -4677,6 +4687,7 @@
                  perc(_total_prev_live_bytes, _total_capacity_bytes),
                  bytes_to_mb(_total_next_live_bytes),
                  perc(_total_next_live_bytes, _total_capacity_bytes),
-                 bytes_to_mb(_total_remset_bytes));
+                 bytes_to_mb(_total_remset_bytes),
+                 bytes_to_mb(_total_strong_code_roots_bytes));
   _out->cr();
 }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1257,6 +1257,9 @@
   // Accumulator for the remembered set size
   size_t _total_remset_bytes;
 
+  // Accumulator for strong code roots memory size
+  size_t _total_strong_code_roots_bytes;
+
   static double perc(size_t val, size_t total) {
     if (total == 0) {
       return 0.0;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc_implementation/g1/bufferingOopClosure.hpp"
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
@@ -980,7 +981,8 @@
 
     if (should_try_gc) {
       bool succeeded;
-      result = do_collection_pause(word_size, gc_count_before, &succeeded);
+      result = do_collection_pause(word_size, gc_count_before, &succeeded,
+          GCCause::_g1_inc_collection_pause);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -1105,7 +1107,8 @@
       // enough space for the allocation to succeed after the pause.
 
       bool succeeded;
-      result = do_collection_pause(word_size, gc_count_before, &succeeded);
+      result = do_collection_pause(word_size, gc_count_before, &succeeded,
+          GCCause::_g1_humongous_allocation);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -1176,20 +1179,27 @@
   ModRefBarrierSet* _mr_bs;
 public:
   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
-    _g1h(g1h), _mr_bs(mr_bs) { }
+    _g1h(g1h), _mr_bs(mr_bs) {}
+
   bool doHeapRegion(HeapRegion* r) {
+    HeapRegionRemSet* hrrs = r->rem_set();
+
     if (r->continuesHumongous()) {
+      // We'll assert that the strong code root list and RSet is empty
+      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
+      assert(hrrs->occupied() == 0, "RSet should be empty");
       return false;
     }
+
     _g1h->reset_gc_time_stamps(r);
-    HeapRegionRemSet* hrrs = r->rem_set();
-    if (hrrs != NULL) hrrs->clear();
+    hrrs->clear();
     // You might think here that we could clear just the cards
     // corresponding to the used region.  But no: if we leave a dirty card
     // in a region we might allocate into, then it would prevent that card
     // from being enqueued, and cause it to be missed.
     // Re: the performance cost: we shouldn't be doing full GC anyway!
     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
+
     return false;
   }
 };
@@ -1269,30 +1279,6 @@
   heap_region_iterate(&cl);
 }
 
-double G1CollectedHeap::verify(bool guard, const char* msg) {
-  double verify_time_ms = 0.0;
-
-  if (guard && total_collections() >= VerifyGCStartAt) {
-    double verify_start = os::elapsedTime();
-    HandleMark hm;  // Discard invalid handles created during verification
-    prepare_for_verify();
-    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
-    verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
-  }
-
-  return verify_time_ms;
-}
-
-void G1CollectedHeap::verify_before_gc() {
-  double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
-  g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
-}
-
-void G1CollectedHeap::verify_after_gc() {
-  double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
-  g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
-}
-
 bool G1CollectedHeap::do_collection(bool explicit_gc,
                                     bool clear_all_soft_refs,
                                     size_t word_size) {
@@ -1433,7 +1419,7 @@
 
       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
       ClassLoaderDataGraph::purge();
-    MetaspaceAux::verify_metrics();
+      MetaspaceAux::verify_metrics();
 
       // Note: since we've just done a full GC, concurrent
       // marking is no longer active. Therefore we need not
@@ -1504,6 +1490,9 @@
         heap_region_iterate(&rebuild_rs);
       }
 
+      // Rebuild the strong code root lists for each region
+      rebuild_strong_code_roots();
+
       if (true) { // FIXME
         MetaspaceGC::compute_new_size();
       }
@@ -2019,10 +2008,12 @@
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
   size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t heap_alignment = collector_policy()->max_alignment();
 
   // Ensure that the sizes are properly aligned.
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
+  Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
 
   _cg1r = new ConcurrentG1Refine(this);
 
@@ -2039,12 +2030,8 @@
   // If this happens then we could end up using a non-optimal
   // compressed oops mode.
 
-  // Since max_byte_size is aligned to the size of a heap region (checked
-  // above).
-  Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
-
   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
-                                                 HeapRegion::GrainBytes);
+                                                 heap_alignment);
 
   // It is important to do this in a way such that concurrent readers can't
   // temporarily think something is in the heap.  (I've actually seen this
@@ -3109,6 +3096,145 @@
   return NULL; // keep some compilers happy
 }
 
+// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
+//       pass it as the perm_blk to SharedHeap::process_strong_roots.
+//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
+//       we can change this closure to extend the simpler OopClosure.
+class VerifyRootsClosure: public OopsInGenClosure {
+private:
+  G1CollectedHeap* _g1h;
+  VerifyOption     _vo;
+  bool             _failures;
+public:
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyRootsClosure(VerifyOption vo) :
+    _g1h(G1CollectedHeap::heap()),
+    _vo(vo),
+    _failures(false) { }
+
+  bool failures() { return _failures; }
+
+  template <class T> void do_oop_nv(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      if (_g1h->is_obj_dead_cond(obj, _vo)) {
+        gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
+                              "points to dead obj "PTR_FORMAT, p, (void*) obj);
+        if (_vo == VerifyOption_G1UseMarkWord) {
+          gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
+        }
+        obj->print_on(gclog_or_tty);
+        _failures = true;
+      }
+    }
+  }
+
+  void do_oop(oop* p)       { do_oop_nv(p); }
+  void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
+class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
+  G1CollectedHeap* _g1h;
+  OopClosure* _root_cl;
+  nmethod* _nm;
+  VerifyOption _vo;
+  bool _failures;
+
+  template <class T> void do_oop_work(T* p) {
+    // First verify that this root is live
+    _root_cl->do_oop(p);
+
+    if (!G1VerifyHeapRegionCodeRoots) {
+      // We're not verifying the code roots attached to heap region.
+      return;
+    }
+
+    // Don't check the code roots during marking verification in a full GC
+    if (_vo == VerifyOption_G1UseMarkWord) {
+      return;
+    }
+
+    // Now verify that the current nmethod (which contains p) is
+    // in the code root list of the heap region containing the
+    // object referenced by p.
+
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+
+      // Now fetch the region containing the object
+      HeapRegion* hr = _g1h->heap_region_containing(obj);
+      HeapRegionRemSet* hrrs = hr->rem_set();
+      // Verify that the strong code root list for this region
+      // contains the nmethod
+      if (!hrrs->strong_code_roots_list_contains(_nm)) {
+        gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
+                              "from nmethod "PTR_FORMAT" not in strong "
+                              "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
+                              p, _nm, hr->bottom(), hr->end());
+        _failures = true;
+      }
+    }
+  }
+
+public:
+  G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
+    _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
+
+  void do_oop(oop* p) { do_oop_work(p); }
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+
+  void set_nmethod(nmethod* nm) { _nm = nm; }
+  bool failures() { return _failures; }
+};
+
+class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
+  G1VerifyCodeRootOopClosure* _oop_cl;
+
+public:
+  G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
+    _oop_cl(oop_cl) {}
+
+  void do_code_blob(CodeBlob* cb) {
+    nmethod* nm = cb->as_nmethod_or_null();
+    if (nm != NULL) {
+      _oop_cl->set_nmethod(nm);
+      nm->oops_do(_oop_cl);
+    }
+  }
+};
+
+class YoungRefCounterClosure : public OopClosure {
+  G1CollectedHeap* _g1h;
+  int              _count;
+ public:
+  YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
+  void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
+  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+  int count() { return _count; }
+  void reset_count() { _count = 0; };
+};
+
+class VerifyKlassClosure: public KlassClosure {
+  YoungRefCounterClosure _young_ref_counter_closure;
+  OopClosure *_oop_closure;
+ public:
+  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
+  void do_klass(Klass* k) {
+    k->oops_do(_oop_closure);
+
+    _young_ref_counter_closure.reset_count();
+    k->oops_do(&_young_ref_counter_closure);
+    if (_young_ref_counter_closure.count() > 0) {
+      guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
+    }
+  }
+};
+
 class VerifyLivenessOopClosure: public OopClosure {
   G1CollectedHeap* _g1h;
   VerifyOption _vo;
@@ -3242,75 +3368,7 @@
   }
 };
 
-class YoungRefCounterClosure : public OopClosure {
-  G1CollectedHeap* _g1h;
-  int              _count;
- public:
-  YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
-  void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
-  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-
-  int count() { return _count; }
-  void reset_count() { _count = 0; };
-};
-
-class VerifyKlassClosure: public KlassClosure {
-  YoungRefCounterClosure _young_ref_counter_closure;
-  OopClosure *_oop_closure;
- public:
-  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
-  void do_klass(Klass* k) {
-    k->oops_do(_oop_closure);
-
-    _young_ref_counter_closure.reset_count();
-    k->oops_do(&_young_ref_counter_closure);
-    if (_young_ref_counter_closure.count() > 0) {
-      guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
-    }
-  }
-};
-
-// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
-//       pass it as the perm_blk to SharedHeap::process_strong_roots.
-//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
-//       we can change this closure to extend the simpler OopClosure.
-class VerifyRootsClosure: public OopsInGenClosure {
-private:
-  G1CollectedHeap* _g1h;
-  VerifyOption     _vo;
-  bool             _failures;
-public:
-  // _vo == UsePrevMarking -> use "prev" marking information,
-  // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
-  VerifyRootsClosure(VerifyOption vo) :
-    _g1h(G1CollectedHeap::heap()),
-    _vo(vo),
-    _failures(false) { }
-
-  bool failures() { return _failures; }
-
-  template <class T> void do_oop_nv(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      if (_g1h->is_obj_dead_cond(obj, _vo)) {
-        gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
-                              "points to dead obj "PTR_FORMAT, p, (void*) obj);
-        if (_vo == VerifyOption_G1UseMarkWord) {
-          gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
-        }
-        obj->print_on(gclog_or_tty);
-        _failures = true;
-      }
-    }
-  }
-
-  void do_oop(oop* p)       { do_oop_nv(p); }
-  void do_oop(narrowOop* p) { do_oop_nv(p); }
-};
-
-// This is the task used for parallel heap verification.
+// This is the task used for parallel verification of the heap regions
 
 class G1ParVerifyTask: public AbstractGangTask {
 private:
@@ -3344,20 +3402,15 @@
   }
 };
 
-void G1CollectedHeap::verify(bool silent) {
-  verify(silent, VerifyOption_G1UsePrevMarking);
-}
-
-void G1CollectedHeap::verify(bool silent,
-                             VerifyOption vo) {
+void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
   if (SafepointSynchronize::is_at_safepoint()) {
+    assert(Thread::current()->is_VM_thread(),
+           "Expected to be executed serially by the VM thread at this point");
+
     if (!silent) { gclog_or_tty->print("Roots "); }
     VerifyRootsClosure rootsCl(vo);
-
-    assert(Thread::current()->is_VM_thread(),
-           "Expected to be executed serially by the VM thread at this point");
-
-    CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
+    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
     VerifyKlassClosure klassCl(this, &rootsCl);
 
     // We apply the relevant closures to all the oops in the
@@ -3376,7 +3429,7 @@
                          &klassCl
                          );
 
-    bool failures = rootsCl.failures();
+    bool failures = rootsCl.failures() || codeRootsCl.failures();
 
     if (vo != VerifyOption_G1UseMarkWord) {
       // If we're verifying during a full GC then the region sets
@@ -3445,6 +3498,34 @@
   }
 }
 
+void G1CollectedHeap::verify(bool silent) {
+  verify(silent, VerifyOption_G1UsePrevMarking);
+}
+
+double G1CollectedHeap::verify(bool guard, const char* msg) {
+  double verify_time_ms = 0.0;
+
+  if (guard && total_collections() >= VerifyGCStartAt) {
+    double verify_start = os::elapsedTime();
+    HandleMark hm;  // Discard invalid handles created during verification
+    prepare_for_verify();
+    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
+    verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
+  }
+
+  return verify_time_ms;
+}
+
+void G1CollectedHeap::verify_before_gc() {
+  double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
+  g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
+}
+
+void G1CollectedHeap::verify_after_gc() {
+  double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
+  g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
+}
+
 class PrintRegionClosure: public HeapRegionClosure {
   outputStream* _st;
 public:
@@ -3619,14 +3700,15 @@
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
                                                unsigned int gc_count_before,
-                                               bool* succeeded) {
+                                               bool* succeeded,
+                                               GCCause::Cause gc_cause) {
   assert_heap_not_locked_and_not_at_safepoint();
   g1_policy()->record_stop_world_start();
   VM_G1IncCollectionPause op(gc_count_before,
                              word_size,
                              false, /* should_initiate_conc_mark */
                              g1_policy()->max_pause_time_ms(),
-                             GCCause::_g1_inc_collection_pause);
+                             gc_cause);
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
@@ -3866,8 +3948,9 @@
       append_secondary_free_list_if_not_empty_with_lock();
     }
 
-    assert(check_young_list_well_formed(),
-      "young list should be well formed");
+    assert(check_young_list_well_formed(), "young list should be well formed");
+    assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
+           "sanity check");
 
     // Don't dynamically change the number of GC threads this early.  A value of
     // 0 is used to indicate serial work.  When parallel work is done,
@@ -4987,7 +5070,11 @@
 
       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
 
-      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+      // Don't scan the scavengable methods in the code cache as part
+      // of strong root scanning. The code roots that point into a
+      // region in the collection set are scanned when we scan the
+      // region's RSet.
+      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
 
       pss.start_strong_roots();
       _g1h->g1_process_strong_roots(/* is scavenging */ true,
@@ -5029,67 +5116,6 @@
 
 // *** Common G1 Evacuation Stuff
 
-// Closures that support the filtering of CodeBlobs scanned during
-// external root scanning.
-
-// Closure applied to reference fields in code blobs (specifically nmethods)
-// to determine whether an nmethod contains references that point into
-// the collection set. Used as a predicate when walking code roots so
-// that only nmethods that point into the collection set are added to the
-// 'marked' list.
-
-class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
-
-  class G1PointsIntoCSOopClosure : public OopClosure {
-    G1CollectedHeap* _g1;
-    bool _points_into_cs;
-  public:
-    G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
-      _g1(g1), _points_into_cs(false) { }
-
-    bool points_into_cs() const { return _points_into_cs; }
-
-    template <class T>
-    void do_oop_nv(T* p) {
-      if (!_points_into_cs) {
-        T heap_oop = oopDesc::load_heap_oop(p);
-        if (!oopDesc::is_null(heap_oop) &&
-            _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
-          _points_into_cs = true;
-        }
-      }
-    }
-
-    virtual void do_oop(oop* p)        { do_oop_nv(p); }
-    virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-  };
-
-  G1CollectedHeap* _g1;
-
-public:
-  G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
-    CodeBlobToOopClosure(cl, true), _g1(g1) { }
-
-  virtual void do_code_blob(CodeBlob* cb) {
-    nmethod* nm = cb->as_nmethod_or_null();
-    if (nm != NULL && !(nm->test_oops_do_mark())) {
-      G1PointsIntoCSOopClosure predicate_cl(_g1);
-      nm->oops_do(&predicate_cl);
-
-      if (predicate_cl.points_into_cs()) {
-        // At least one of the reference fields or the oop relocations
-        // in the nmethod points into the collection set. We have to
-        // 'mark' this nmethod.
-        // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
-        // or MarkingCodeBlobClosure::do_code_blob() change.
-        if (!nm->test_set_oops_do_mark()) {
-          do_newly_marked_nmethod(nm);
-        }
-      }
-    }
-  }
-};
-
 // This method is run in a GC worker.
 
 void
@@ -5107,9 +5133,10 @@
 
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
 
-  // Walk the code cache w/o buffering, because StarTask cannot handle
-  // unaligned oop locations.
-  G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
+  assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
+  // Walk the code cache/strong code roots w/o buffering, because StarTask
+  // cannot handle unaligned oop locations.
+  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
 
   process_strong_roots(false, // no scoping; this is parallel code
                        is_scavenging, so,
@@ -5154,9 +5181,22 @@
   }
   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
 
+  // If this is an initial mark pause, and we're not scanning
+  // the entire code cache, we need to mark the oops in the
+  // strong code root lists for the regions that are not in
+  // the collection set.
+  // Note all threads participate in this set of root tasks.
+  double mark_strong_code_roots_ms = 0.0;
+  if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
+    double mark_strong_roots_start = os::elapsedTime();
+    mark_strong_code_roots(worker_i);
+    mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
+  }
+  g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
+
   // Now scan the complement of the collection set.
   if (scan_rs != NULL) {
-    g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
+    g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
   }
   _process_strong_tasks->all_tasks_completed();
 }
@@ -5774,9 +5814,6 @@
   process_discovered_references(n_workers);
 
   // Weak root processing.
-  // Note: when JSR 292 is enabled and code blobs can contain
-  // non-perm oops then we will need to process the code blobs
-  // here too.
   {
     G1STWIsAliveClosure is_alive(this);
     G1KeepAliveClosure keep_alive(this);
@@ -5792,6 +5829,17 @@
   hot_card_cache->reset_hot_cache();
   hot_card_cache->set_use_cache(true);
 
+  // Migrate the strong code roots attached to each region in
+  // the collection set. Ideally we would like to do this
+  // after we have finished the scanning/evacuation of the
+  // strong code roots for a particular heap region.
+  migrate_strong_code_roots();
+
+  if (g1_policy()->during_initial_mark_pause()) {
+    // Reset the claim values set during marking the strong code roots
+    reset_heap_region_claim_values();
+  }
+
   finalize_for_evac_failure();
 
   if (evacuation_failed()) {
@@ -6588,3 +6636,208 @@
   _humongous_set.verify_end();
   _free_list.verify_end();
 }
+
+// Optimized nmethod scanning
+
+class RegisterNMethodOopClosure: public OopClosure {
+  G1CollectedHeap* _g1h;
+  nmethod* _nm;
+
+  template <class T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      HeapRegion* hr = _g1h->heap_region_containing(obj);
+      assert(!hr->isHumongous(), "code root in humongous region?");
+
+      // HeapRegion::add_strong_code_root() avoids adding duplicate
+      // entries but having duplicates is  OK since we "mark" nmethods
+      // as visited when we scan the strong code root lists during the GC.
+      hr->add_strong_code_root(_nm);
+      assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
+    }
+  }
+
+public:
+  RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
+    _g1h(g1h), _nm(nm) {}
+
+  void do_oop(oop* p)       { do_oop_work(p); }
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class UnregisterNMethodOopClosure: public OopClosure {
+  G1CollectedHeap* _g1h;
+  nmethod* _nm;
+
+  template <class T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      HeapRegion* hr = _g1h->heap_region_containing(obj);
+      assert(!hr->isHumongous(), "code root in humongous region?");
+      hr->remove_strong_code_root(_nm);
+      assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
+    }
+  }
+
+public:
+  UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
+    _g1h(g1h), _nm(nm) {}
+
+  void do_oop(oop* p)       { do_oop_work(p); }
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+void G1CollectedHeap::register_nmethod(nmethod* nm) {
+  CollectedHeap::register_nmethod(nm);
+
+  guarantee(nm != NULL, "sanity");
+  RegisterNMethodOopClosure reg_cl(this, nm);
+  nm->oops_do(&reg_cl);
+}
+
+void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
+  CollectedHeap::unregister_nmethod(nm);
+
+  guarantee(nm != NULL, "sanity");
+  UnregisterNMethodOopClosure reg_cl(this, nm);
+  nm->oops_do(&reg_cl, true);
+}
+
+class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
+public:
+  bool doHeapRegion(HeapRegion *hr) {
+    assert(!hr->isHumongous(), "humongous region in collection set?");
+    hr->migrate_strong_code_roots();
+    return false;
+  }
+};
+
+void G1CollectedHeap::migrate_strong_code_roots() {
+  MigrateCodeRootsHeapRegionClosure cl;
+  double migrate_start = os::elapsedTime();
+  collection_set_iterate(&cl);
+  double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
+  g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
+}
+
+// Mark all the code roots that point into regions *not* in the
+// collection set.
+//
+// Note we do not want to use a "marking" CodeBlobToOopClosure while
+// walking the the code roots lists of regions not in the collection
+// set. Suppose we have an nmethod (M) that points to objects in two
+// separate regions - one in the collection set (R1) and one not (R2).
+// Using a "marking" CodeBlobToOopClosure here would result in "marking"
+// nmethod M when walking the code roots for R1. When we come to scan
+// the code roots for R2, we would see that M is already marked and it
+// would be skipped and the objects in R2 that are referenced from M
+// would not be evacuated.
+
+class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
+
+  class MarkStrongCodeRootOopClosure: public OopClosure {
+    ConcurrentMark* _cm;
+    HeapRegion* _hr;
+    uint _worker_id;
+
+    template <class T> void do_oop_work(T* p) {
+      T heap_oop = oopDesc::load_heap_oop(p);
+      if (!oopDesc::is_null(heap_oop)) {
+        oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+        // Only mark objects in the region (which is assumed
+        // to be not in the collection set).
+        if (_hr->is_in(obj)) {
+          _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
+        }
+      }
+    }
+
+  public:
+    MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
+      _cm(cm), _hr(hr), _worker_id(worker_id) {
+      assert(!_hr->in_collection_set(), "sanity");
+    }
+
+    void do_oop(narrowOop* p) { do_oop_work(p); }
+    void do_oop(oop* p)       { do_oop_work(p); }
+  };
+
+  MarkStrongCodeRootOopClosure _oop_cl;
+
+public:
+  MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
+    _oop_cl(cm, hr, worker_id) {}
+
+  void do_code_blob(CodeBlob* cb) {
+    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
+    if (nm != NULL) {
+      nm->oops_do(&_oop_cl);
+    }
+  }
+};
+
+class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
+  uint _worker_id;
+
+public:
+  MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
+    _g1h(g1h), _worker_id(worker_id) {}
+
+  bool doHeapRegion(HeapRegion *hr) {
+    HeapRegionRemSet* hrrs = hr->rem_set();
+    if (hr->isHumongous()) {
+      // Code roots should never be attached to a humongous region
+      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
+      return false;
+    }
+
+    if (hr->in_collection_set()) {
+      // Don't mark code roots into regions in the collection set here.
+      // They will be marked when we scan them.
+      return false;
+    }
+
+    MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
+    hr->strong_code_roots_do(&cb_cl);
+    return false;
+  }
+};
+
+void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
+  MarkStrongCodeRootsHRClosure cl(this, worker_id);
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    heap_region_par_iterate_chunked(&cl,
+                                    worker_id,
+                                    workers()->active_workers(),
+                                    HeapRegion::ParMarkRootClaimValue);
+  } else {
+    heap_region_iterate(&cl);
+  }
+}
+
+class RebuildStrongCodeRootClosure: public CodeBlobClosure {
+  G1CollectedHeap* _g1h;
+
+public:
+  RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
+    _g1h(g1h) {}
+
+  void do_code_blob(CodeBlob* cb) {
+    nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
+    if (nm == NULL) {
+      return;
+    }
+
+    if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
+      _g1h->register_nmethod(nm);
+    }
+  }
+};
+
+void G1CollectedHeap::rebuild_strong_code_roots() {
+  RebuildStrongCodeRootClosure blob_cl(this);
+  CodeCache::blobs_do(&blob_cl);
+}
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -46,6 +46,7 @@
 // may combine concurrent marking with parallel, incremental compaction of
 // heap subsets that will yield large amounts of garbage.
 
+// Forward declarations
 class HeapRegion;
 class HRRSCleanupTask;
 class GenerationSpec;
@@ -69,6 +70,7 @@
 class G1NewTracer;
 class G1OldTracer;
 class EvacuationFailedInfo;
+class nmethod;
 
 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@@ -163,18 +165,6 @@
     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 };
 
-// The G1 STW is alive closure.
-// An instance is embedded into the G1CH and used as the
-// (optional) _is_alive_non_header closure in the STW
-// reference processor. It is also extensively used during
-// reference processing during STW evacuation pauses.
-class G1STWIsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  bool do_object_b(oop p);
-};
-
 class SurvivorGCAllocRegion : public G1AllocRegion {
 protected:
   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
@@ -193,6 +183,18 @@
   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 };
 
+// The G1 STW is alive closure.
+// An instance is embedded into the G1CH and used as the
+// (optional) _is_alive_non_header closure in the STW
+// reference processor. It is also extensively used during
+// reference processing during STW evacuation pauses.
+class G1STWIsAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  bool do_object_b(oop p);
+};
+
 class RefineCardTableEntryClosure;
 
 class G1CollectedHeap : public SharedHeap {
@@ -774,9 +776,10 @@
   // it has to be read while holding the Heap_lock. Currently, both
   // methods that call do_collection_pause() release the Heap_lock
   // before the call, so it's easy to read gc_count_before just before.
-  HeapWord* do_collection_pause(size_t       word_size,
-                                unsigned int gc_count_before,
-                                bool*        succeeded);
+  HeapWord* do_collection_pause(size_t         word_size,
+                                unsigned int   gc_count_before,
+                                bool*          succeeded,
+                                GCCause::Cause gc_cause);
 
   // The guts of the incremental collection pause, executed by the vm
   // thread. It returns false if it is unable to do the collection due
@@ -1549,42 +1552,6 @@
 
   virtual jlong millis_since_last_gc();
 
-  // Perform any cleanup actions necessary before allowing a verification.
-  virtual void prepare_for_verify();
-
-  // Perform verification.
-
-  // vo == UsePrevMarking  -> use "prev" marking information,
-  // vo == UseNextMarking -> use "next" marking information
-  // vo == UseMarkWord    -> use the mark word in the object header
-  //
-  // NOTE: Only the "prev" marking information is guaranteed to be
-  // consistent most of the time, so most calls to this should use
-  // vo == UsePrevMarking.
-  // Currently, there is only one case where this is called with
-  // vo == UseNextMarking, which is to verify the "next" marking
-  // information at the end of remark.
-  // Currently there is only one place where this is called with
-  // vo == UseMarkWord, which is to verify the marking during a
-  // full GC.
-  void verify(bool silent, VerifyOption vo);
-
-  // Override; it uses the "prev" marking information
-  virtual void verify(bool silent);
-
-  virtual void print_on(outputStream* st) const;
-  virtual void print_extended_on(outputStream* st) const;
-  virtual void print_on_error(outputStream* st) const;
-
-  virtual void print_gc_threads_on(outputStream* st) const;
-  virtual void gc_threads_do(ThreadClosure* tc) const;
-
-  // Override
-  void print_tracing_info() const;
-
-  // The following two methods are helpful for debugging RSet issues.
-  void print_cset_rsets() PRODUCT_RETURN;
-  void print_all_rsets() PRODUCT_RETURN;
 
   // Convenience function to be used in situations where the heap type can be
   // asserted to be this type.
@@ -1661,13 +1628,86 @@
     else return is_obj_ill(obj, hr);
   }
 
+  bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
+  HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
+  bool is_marked(oop obj, VerifyOption vo);
+  const char* top_at_mark_start_str(VerifyOption vo);
+
+  ConcurrentMark* concurrent_mark() const { return _cm; }
+
+  // Refinement
+
+  ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
+
+  // The dirty cards region list is used to record a subset of regions
+  // whose cards need clearing. The list if populated during the
+  // remembered set scanning and drained during the card table
+  // cleanup. Although the methods are reentrant, population/draining
+  // phases must not overlap. For synchronization purposes the last
+  // element on the list points to itself.
+  HeapRegion* _dirty_cards_region_list;
+  void push_dirty_cards_region(HeapRegion* hr);
+  HeapRegion* pop_dirty_cards_region();
+
+  // Optimized nmethod scanning support routines
+
+  // Register the given nmethod with the G1 heap
+  virtual void register_nmethod(nmethod* nm);
+
+  // Unregister the given nmethod from the G1 heap
+  virtual void unregister_nmethod(nmethod* nm);
+
+  // Migrate the nmethods in the code root lists of the regions
+  // in the collection set to regions in to-space. In the event
+  // of an evacuation failure, nmethods that reference objects
+  // that were not successfullly evacuated are not migrated.
+  void migrate_strong_code_roots();
+
+  // During an initial mark pause, mark all the code roots that
+  // point into regions *not* in the collection set.
+  void mark_strong_code_roots(uint worker_id);
+
+  // Rebuild the stong code root lists for each region
+  // after a full GC
+  void rebuild_strong_code_roots();
+
+  // Verification
+
+  // The following is just to alert the verification code
+  // that a full collection has occurred and that the
+  // remembered sets are no longer up to date.
+  bool _full_collection;
+  void set_full_collection() { _full_collection = true;}
+  void clear_full_collection() {_full_collection = false;}
+  bool full_collection() {return _full_collection;}
+
+  // Perform any cleanup actions necessary before allowing a verification.
+  virtual void prepare_for_verify();
+
+  // Perform verification.
+
+  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information
+  // vo == UseMarkWord    -> use the mark word in the object header
+  //
+  // NOTE: Only the "prev" marking information is guaranteed to be
+  // consistent most of the time, so most calls to this should use
+  // vo == UsePrevMarking.
+  // Currently, there is only one case where this is called with
+  // vo == UseNextMarking, which is to verify the "next" marking
+  // information at the end of remark.
+  // Currently there is only one place where this is called with
+  // vo == UseMarkWord, which is to verify the marking during a
+  // full GC.
+  void verify(bool silent, VerifyOption vo);
+
+  // Override; it uses the "prev" marking information
+  virtual void verify(bool silent);
+
   // The methods below are here for convenience and dispatch the
   // appropriate method depending on value of the given VerifyOption
-  // parameter. The options for that parameter are:
-  //
-  // vo == UsePrevMarking -> use "prev" marking information,
-  // vo == UseNextMarking -> use "next" marking information,
-  // vo == UseMarkWord    -> use mark word from object header
+  // parameter. The values for that parameter, and their meanings,
+  // are the same as those above.
 
   bool is_obj_dead_cond(const oop obj,
                         const HeapRegion* hr,
@@ -1692,31 +1732,21 @@
     return false; // keep some compilers happy
   }
 
-  bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
-  HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
-  bool is_marked(oop obj, VerifyOption vo);
-  const char* top_at_mark_start_str(VerifyOption vo);
+  // Printing
 
-  // The following is just to alert the verification code
-  // that a full collection has occurred and that the
-  // remembered sets are no longer up to date.
-  bool _full_collection;
-  void set_full_collection() { _full_collection = true;}
-  void clear_full_collection() {_full_collection = false;}
-  bool full_collection() {return _full_collection;}
+  virtual void print_on(outputStream* st) const;
+  virtual void print_extended_on(outputStream* st) const;
+  virtual void print_on_error(outputStream* st) const;
 
-  ConcurrentMark* concurrent_mark() const { return _cm; }
-  ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
+  virtual void print_gc_threads_on(outputStream* st) const;
+  virtual void gc_threads_do(ThreadClosure* tc) const;
 
-  // The dirty cards region list is used to record a subset of regions
-  // whose cards need clearing. The list if populated during the
-  // remembered set scanning and drained during the card table
-  // cleanup. Although the methods are reentrant, population/draining
-  // phases must not overlap. For synchronization purposes the last
-  // element on the list points to itself.
-  HeapRegion* _dirty_cards_region_list;
-  void push_dirty_cards_region(HeapRegion* hr);
-  HeapRegion* pop_dirty_cards_region();
+  // Override
+  void print_tracing_info() const;
+
+  // The following two methods are helpful for debugging RSet issues.
+  void print_cset_rsets() PRODUCT_RETURN;
+  void print_all_rsets() PRODUCT_RETURN;
 
 public:
   void stop_conc_gc_threads();
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -313,7 +313,8 @@
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
   size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-  set_max_alignment(MAX2(card_table_alignment, min_alignment()));
+  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -161,6 +161,8 @@
   _last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
   _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
   _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
+  _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
+  _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
   _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
   _last_termination_times_ms(_max_gc_threads, "%.1lf"),
   _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
@@ -182,6 +184,8 @@
   _last_update_rs_times_ms.reset();
   _last_update_rs_processed_buffers.reset();
   _last_scan_rs_times_ms.reset();
+  _last_strong_code_root_scan_times_ms.reset();
+  _last_strong_code_root_mark_times_ms.reset();
   _last_obj_copy_times_ms.reset();
   _last_termination_times_ms.reset();
   _last_termination_attempts.reset();
@@ -197,6 +201,8 @@
   _last_update_rs_times_ms.verify();
   _last_update_rs_processed_buffers.verify();
   _last_scan_rs_times_ms.verify();
+  _last_strong_code_root_scan_times_ms.verify();
+  _last_strong_code_root_mark_times_ms.verify();
   _last_obj_copy_times_ms.verify();
   _last_termination_times_ms.verify();
   _last_termination_attempts.verify();
@@ -210,6 +216,8 @@
                                _last_satb_filtering_times_ms.get(i) +
                                _last_update_rs_times_ms.get(i) +
                                _last_scan_rs_times_ms.get(i) +
+                               _last_strong_code_root_scan_times_ms.get(i) +
+                               _last_strong_code_root_mark_times_ms.get(i) +
                                _last_obj_copy_times_ms.get(i) +
                                _last_termination_times_ms.get(i);
 
@@ -239,6 +247,9 @@
     // Now subtract the time taken to fix up roots in generated code
     misc_time_ms += _cur_collection_code_root_fixup_time_ms;
 
+    // Strong code root migration time
+    misc_time_ms += _cur_strong_code_root_migration_time_ms;
+
     // Subtract the time taken to clean the card table from the
     // current value of "other time"
     misc_time_ms += _cur_clear_ct_time_ms;
@@ -257,9 +268,13 @@
     if (_last_satb_filtering_times_ms.sum() > 0.0) {
       _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
     }
+    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
+     _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
+    }
     _last_update_rs_times_ms.print(2, "Update RS (ms)");
       _last_update_rs_processed_buffers.print(3, "Processed Buffers");
     _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
+    _last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)");
     _last_obj_copy_times_ms.print(2, "Object Copy (ms)");
     _last_termination_times_ms.print(2, "Termination (ms)");
     if (G1Log::finest()) {
@@ -273,12 +288,17 @@
     if (_last_satb_filtering_times_ms.sum() > 0.0) {
       _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
     }
+    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
+      _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
+    }
     _last_update_rs_times_ms.print(1, "Update RS (ms)");
       _last_update_rs_processed_buffers.print(2, "Processed Buffers");
     _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
+    _last_strong_code_root_scan_times_ms.print(1, "Code Root Scanning (ms)");
     _last_obj_copy_times_ms.print(1, "Object Copy (ms)");
   }
   print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
+  print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
   print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
   double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
   print_stats(1, "Other", misc_time_ms);
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -119,6 +119,8 @@
   WorkerDataArray<double> _last_update_rs_times_ms;
   WorkerDataArray<int>    _last_update_rs_processed_buffers;
   WorkerDataArray<double> _last_scan_rs_times_ms;
+  WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
+  WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
   WorkerDataArray<double> _last_obj_copy_times_ms;
   WorkerDataArray<double> _last_termination_times_ms;
   WorkerDataArray<size_t> _last_termination_attempts;
@@ -128,6 +130,7 @@
 
   double _cur_collection_par_time_ms;
   double _cur_collection_code_root_fixup_time_ms;
+  double _cur_strong_code_root_migration_time_ms;
 
   double _cur_clear_ct_time_ms;
   double _cur_ref_proc_time_ms;
@@ -179,6 +182,14 @@
     _last_scan_rs_times_ms.set(worker_i, ms);
   }
 
+  void record_strong_code_root_scan_time(uint worker_i, double ms) {
+    _last_strong_code_root_scan_times_ms.set(worker_i, ms);
+  }
+
+  void record_strong_code_root_mark_time(uint worker_i, double ms) {
+    _last_strong_code_root_mark_times_ms.set(worker_i, ms);
+  }
+
   void record_obj_copy_time(uint worker_i, double ms) {
     _last_obj_copy_times_ms.set(worker_i, ms);
   }
@@ -208,6 +219,10 @@
     _cur_collection_code_root_fixup_time_ms = ms;
   }
 
+  void record_strong_code_root_migration_time(double ms) {
+    _cur_strong_code_root_migration_time_ms = ms;
+  }
+
   void record_ref_proc_time(double ms) {
     _cur_ref_proc_time_ms = ms;
   }
@@ -294,6 +309,14 @@
     return _last_scan_rs_times_ms.average();
   }
 
+  double average_last_strong_code_root_scan_time(){
+    return _last_strong_code_root_scan_times_ms.average();
+  }
+
+  double average_last_strong_code_root_mark_time(){
+    return _last_strong_code_root_mark_times_ms.average();
+  }
+
   double average_last_obj_copy_time() {
     return _last_obj_copy_times_ms.average();
   }
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -262,6 +262,7 @@
     old_collection_counters()->update_all();
     young_collection_counters()->update_all();
     MetaspaceCounters::update_performance_counters();
+    CompressedClassSpaceCounters::update_performance_counters();
   }
 }
 
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -104,15 +104,25 @@
 class ScanRSClosure : public HeapRegionClosure {
   size_t _cards_done, _cards;
   G1CollectedHeap* _g1h;
+
   OopsInHeapRegionClosure* _oc;
+  CodeBlobToOopClosure* _code_root_cl;
+
   G1BlockOffsetSharedArray* _bot_shared;
   CardTableModRefBS *_ct_bs;
-  int _worker_i;
-  int _block_size;
-  bool _try_claimed;
+
+  double _strong_code_root_scan_time_sec;
+  int    _worker_i;
+  int    _block_size;
+  bool   _try_claimed;
+
 public:
-  ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
+  ScanRSClosure(OopsInHeapRegionClosure* oc,
+                CodeBlobToOopClosure* code_root_cl,
+                int worker_i) :
     _oc(oc),
+    _code_root_cl(code_root_cl),
+    _strong_code_root_scan_time_sec(0.0),
     _cards(0),
     _cards_done(0),
     _worker_i(worker_i),
@@ -160,6 +170,12 @@
                            card_start, card_start + G1BlockOffsetSharedArray::N_words);
   }
 
+  void scan_strong_code_roots(HeapRegion* r) {
+    double scan_start = os::elapsedTime();
+    r->strong_code_roots_do(_code_root_cl);
+    _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
+  }
+
   bool doHeapRegion(HeapRegion* r) {
     assert(r->in_collection_set(), "should only be called on elements of CS.");
     HeapRegionRemSet* hrrs = r->rem_set();
@@ -173,6 +189,7 @@
     //   _try_claimed || r->claim_iter()
     // is true: either we're supposed to work on claimed-but-not-complete
     // regions, or we successfully claimed the region.
+
     HeapRegionRemSetIterator iter(hrrs);
     size_t card_index;
 
@@ -205,30 +222,43 @@
       }
     }
     if (!_try_claimed) {
+      // Scan the strong code root list attached to the current region
+      scan_strong_code_roots(r);
+
       hrrs->set_iter_complete();
     }
     return false;
   }
+
+  double strong_code_root_scan_time_sec() {
+    return _strong_code_root_scan_time_sec;
+  }
+
   size_t cards_done() { return _cards_done;}
   size_t cards_looked_up() { return _cards;}
 };
 
-void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
+void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
+                      CodeBlobToOopClosure* code_root_cl,
+                      int worker_i) {
   double rs_time_start = os::elapsedTime();
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
-  ScanRSClosure scanRScl(oc, worker_i);
+  ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
 
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
 
-  double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
+  double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
+                            - scanRScl.strong_code_root_scan_time_sec();
 
-  assert( _cards_scanned != NULL, "invariant" );
+  assert(_cards_scanned != NULL, "invariant");
   _cards_scanned[worker_i] = scanRScl.cards_done();
 
   _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
+  _g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
+                                                         scanRScl.strong_code_root_scan_time_sec() * 1000.0);
 }
 
 // Closure used for updating RSets and recording references that
@@ -288,7 +318,8 @@
 }
 
 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
-                                             int worker_i) {
+                                           CodeBlobToOopClosure* code_root_cl,
+                                           int worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
 #endif
@@ -328,7 +359,7 @@
     _g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
   }
   if (G1UseParallelRSetScanning || (worker_i == 0)) {
-    scanRS(oc, worker_i);
+    scanRS(oc, code_root_cl, worker_i);
   } else {
     _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
   }
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -81,14 +81,23 @@
   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
   ~G1RemSet();
 
-  // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
-  // outside the CS (having invoked "blk->set_region" to set the "from"
-  // region correctly beforehand.) The "worker_i" param is for the
-  // parallel case where the number of the worker thread calling this
-  // function can be helpful in partitioning the work to be done. It
-  // should be the same as the "i" passed to the calling thread's
-  // work(i) function. In the sequential case this param will be ingored.
-  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
+  // Invoke "blk->do_oop" on all pointers into the collection set
+  // from objects in regions outside the collection set (having
+  // invoked "blk->set_region" to set the "from" region correctly
+  // beforehand.)
+  //
+  // Invoke code_root_cl->do_code_blob on the unmarked nmethods
+  // on the strong code roots list for each region in the
+  // collection set.
+  //
+  // The "worker_i" param is for the parallel case where the id
+  // of the worker thread calling this function can be helpful in
+  // partitioning the work to be done. It should be the same as
+  // the "i" passed to the calling thread's work(i) function.
+  // In the sequential case this param will be ignored.
+  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
+                                   CodeBlobToOopClosure* code_root_cl,
+                                   int worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
   // call.  Must call each of these once before and after (in sequential
@@ -98,7 +107,10 @@
   void prepare_for_oops_into_collection_set_do();
   void cleanup_after_oops_into_collection_set_do();
 
-  void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
+  void scanRS(OopsInHeapRegionClosure* oc,
+              CodeBlobToOopClosure* code_root_cl,
+              int worker_i);
+
   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
 
   CardTableModRefBS* ct_bs() { return _ct_bs; }
--- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -127,32 +127,55 @@
 
 class HRRSStatsIter: public HeapRegionClosure {
   size_t _occupied;
-  size_t _total_mem_sz;
-  size_t _max_mem_sz;
-  HeapRegion* _max_mem_sz_region;
+
+  size_t _total_rs_mem_sz;
+  size_t _max_rs_mem_sz;
+  HeapRegion* _max_rs_mem_sz_region;
+
+  size_t _total_code_root_mem_sz;
+  size_t _max_code_root_mem_sz;
+  HeapRegion* _max_code_root_mem_sz_region;
 public:
   HRRSStatsIter() :
     _occupied(0),
-    _total_mem_sz(0),
-    _max_mem_sz(0),
-    _max_mem_sz_region(NULL)
+    _total_rs_mem_sz(0),
+    _max_rs_mem_sz(0),
+    _max_rs_mem_sz_region(NULL),
+    _total_code_root_mem_sz(0),
+    _max_code_root_mem_sz(0),
+    _max_code_root_mem_sz_region(NULL)
   {}
 
   bool doHeapRegion(HeapRegion* r) {
-    size_t mem_sz = r->rem_set()->mem_size();
-    if (mem_sz > _max_mem_sz) {
-      _max_mem_sz = mem_sz;
-      _max_mem_sz_region = r;
+    HeapRegionRemSet* hrrs = r->rem_set();
+
+    // HeapRegionRemSet::mem_size() includes the
+    // size of the strong code roots
+    size_t rs_mem_sz = hrrs->mem_size();
+    if (rs_mem_sz > _max_rs_mem_sz) {
+      _max_rs_mem_sz = rs_mem_sz;
+      _max_rs_mem_sz_region = r;
     }
-    _total_mem_sz += mem_sz;
-    size_t occ = r->rem_set()->occupied();
+    _total_rs_mem_sz += rs_mem_sz;
+
+    size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
+    if (code_root_mem_sz > _max_code_root_mem_sz) {
+      _max_code_root_mem_sz = code_root_mem_sz;
+      _max_code_root_mem_sz_region = r;
+    }
+    _total_code_root_mem_sz += code_root_mem_sz;
+
+    size_t occ = hrrs->occupied();
     _occupied += occ;
     return false;
   }
-  size_t total_mem_sz() { return _total_mem_sz; }
-  size_t max_mem_sz() { return _max_mem_sz; }
+  size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
+  size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
+  HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
+  size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
+  size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
+  HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
   size_t occupied() { return _occupied; }
-  HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
 };
 
 double calc_percentage(size_t numerator, size_t denominator) {
@@ -184,22 +207,33 @@
 
   HRRSStatsIter blk;
   G1CollectedHeap::heap()->heap_region_iterate(&blk);
+  // RemSet stats
   out->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
                 "  Max = "SIZE_FORMAT"K.",
-                blk.total_mem_sz()/K, blk.max_mem_sz()/K);
+                blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
   out->print_cr("  Static structures = "SIZE_FORMAT"K,"
                 " free_lists = "SIZE_FORMAT"K.",
                 HeapRegionRemSet::static_mem_size() / K,
                 HeapRegionRemSet::fl_mem_size() / K);
   out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
                 blk.occupied());
-  HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
-  HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
+  HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
+  HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
   out->print_cr("    Max size region = "HR_FORMAT", "
                 "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
-                HR_FORMAT_PARAMS(max_mem_sz_region),
-                (rem_set->mem_size() + K - 1)/K,
-                (rem_set->occupied() + K - 1)/K);
-
+                HR_FORMAT_PARAMS(max_rs_mem_sz_region),
+                (max_rs_rem_set->mem_size() + K - 1)/K,
+                (max_rs_rem_set->occupied() + K - 1)/K);
   out->print_cr("    Did %d coarsenings.", num_coarsenings());
+  // Strong code root stats
+  out->print_cr("  Total heap region code-root set sizes = "SIZE_FORMAT"K."
+                "  Max = "SIZE_FORMAT"K.",
+                blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
+  HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
+  HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
+  out->print_cr("    Max size region = "HR_FORMAT", "
+                "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
+                HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
+                (max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
+                (max_code_root_rem_set->strong_code_roots_list_length()));
 }
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -319,7 +319,10 @@
                                                                             \
   diagnostic(bool, G1VerifyRSetsDuringFullGC, false,                        \
              "If true, perform verification of each heap region's "         \
-             "remembered set when verifying the heap during a full GC.")
+             "remembered set when verifying the heap during a full GC.")    \
+                                                                            \
+  diagnostic(bool, G1VerifyHeapRegionCodeRoots, false,                      \
+             "Verify the code root lists attached to each heap region.")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "code/nmethod.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
@@ -50,144 +51,6 @@
                                                    OopClosure* oc) :
   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
 
-class VerifyLiveClosure: public OopClosure {
-private:
-  G1CollectedHeap* _g1h;
-  CardTableModRefBS* _bs;
-  oop _containing_obj;
-  bool _failures;
-  int _n_failures;
-  VerifyOption _vo;
-public:
-  // _vo == UsePrevMarking -> use "prev" marking information,
-  // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
-  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
-    _g1h(g1h), _bs(NULL), _containing_obj(NULL),
-    _failures(false), _n_failures(0), _vo(vo)
-  {
-    BarrierSet* bs = _g1h->barrier_set();
-    if (bs->is_a(BarrierSet::CardTableModRef))
-      _bs = (CardTableModRefBS*)bs;
-  }
-
-  void set_containing_obj(oop obj) {
-    _containing_obj = obj;
-  }
-
-  bool failures() { return _failures; }
-  int n_failures() { return _n_failures; }
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-
-  void print_object(outputStream* out, oop obj) {
-#ifdef PRODUCT
-    Klass* k = obj->klass();
-    const char* class_name = InstanceKlass::cast(k)->external_name();
-    out->print_cr("class name %s", class_name);
-#else // PRODUCT
-    obj->print_on(out);
-#endif // PRODUCT
-  }
-
-  template <class T>
-  void do_oop_work(T* p) {
-    assert(_containing_obj != NULL, "Precondition");
-    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
-           "Precondition");
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      bool failed = false;
-      if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
-        MutexLockerEx x(ParGCRareEvent_lock,
-                        Mutex::_no_safepoint_check_flag);
-
-        if (!_failures) {
-          gclog_or_tty->print_cr("");
-          gclog_or_tty->print_cr("----------");
-        }
-        if (!_g1h->is_in_closed_subset(obj)) {
-          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-          gclog_or_tty->print_cr("Field "PTR_FORMAT
-                                 " of live obj "PTR_FORMAT" in region "
-                                 "["PTR_FORMAT", "PTR_FORMAT")",
-                                 p, (void*) _containing_obj,
-                                 from->bottom(), from->end());
-          print_object(gclog_or_tty, _containing_obj);
-          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
-                                 (void*) obj);
-        } else {
-          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
-          gclog_or_tty->print_cr("Field "PTR_FORMAT
-                                 " of live obj "PTR_FORMAT" in region "
-                                 "["PTR_FORMAT", "PTR_FORMAT")",
-                                 p, (void*) _containing_obj,
-                                 from->bottom(), from->end());
-          print_object(gclog_or_tty, _containing_obj);
-          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
-                                 "["PTR_FORMAT", "PTR_FORMAT")",
-                                 (void*) obj, to->bottom(), to->end());
-          print_object(gclog_or_tty, obj);
-        }
-        gclog_or_tty->print_cr("----------");
-        gclog_or_tty->flush();
-        _failures = true;
-        failed = true;
-        _n_failures++;
-      }
-
-      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
-        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-        HeapRegion* to   = _g1h->heap_region_containing(obj);
-        if (from != NULL && to != NULL &&
-            from != to &&
-            !to->isHumongous()) {
-          jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
-          jbyte cv_field = *_bs->byte_for_const(p);
-          const jbyte dirty = CardTableModRefBS::dirty_card_val();
-
-          bool is_bad = !(from->is_young()
-                          || to->rem_set()->contains_reference(p)
-                          || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
-                              (_containing_obj->is_objArray() ?
-                                  cv_field == dirty
-                               : cv_obj == dirty || cv_field == dirty));
-          if (is_bad) {
-            MutexLockerEx x(ParGCRareEvent_lock,
-                            Mutex::_no_safepoint_check_flag);
-
-            if (!_failures) {
-              gclog_or_tty->print_cr("");
-              gclog_or_tty->print_cr("----------");
-            }
-            gclog_or_tty->print_cr("Missing rem set entry:");
-            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
-                                   "of obj "PTR_FORMAT", "
-                                   "in region "HR_FORMAT,
-                                   p, (void*) _containing_obj,
-                                   HR_FORMAT_PARAMS(from));
-            _containing_obj->print_on(gclog_or_tty);
-            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
-                                   "in region "HR_FORMAT,
-                                   (void*) obj,
-                                   HR_FORMAT_PARAMS(to));
-            obj->print_on(gclog_or_tty);
-            gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
-                          cv_obj, cv_field);
-            gclog_or_tty->print_cr("----------");
-            gclog_or_tty->flush();
-            _failures = true;
-            if (!failed) _n_failures++;
-          }
-        }
-      }
-    }
-  }
-};
-
 template<class ClosureType>
 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
                                HeapRegion* hr,
@@ -368,7 +231,7 @@
   if (!par) {
     // If this is parallel, this will be done later.
     HeapRegionRemSet* hrrs = rem_set();
-    if (hrrs != NULL) hrrs->clear();
+    hrrs->clear();
     _claimed = InitialClaimValue;
   }
   zero_marked_bytes();
@@ -505,6 +368,7 @@
     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
     _predicted_bytes_to_copy(0)
 {
+  _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
   _orig_end = mr.end();
   // Note that initialize() will set the start of the unmarked area of the
   // region.
@@ -512,8 +376,6 @@
   set_top(bottom());
   set_saved_mark();
 
-  _rem_set =  new HeapRegionRemSet(sharedOffsetArray, this);
-
   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 }
 
@@ -733,6 +595,160 @@
   return NULL;
 }
 
+// Code roots support
+
+void HeapRegion::add_strong_code_root(nmethod* nm) {
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->add_strong_code_root(nm);
+}
+
+void HeapRegion::remove_strong_code_root(nmethod* nm) {
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->remove_strong_code_root(nm);
+}
+
+void HeapRegion::migrate_strong_code_roots() {
+  assert(in_collection_set(), "only collection set regions");
+  assert(!isHumongous(), "not humongous regions");
+
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->migrate_strong_code_roots();
+}
+
+void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->strong_code_roots_do(blk);
+}
+
+class VerifyStrongCodeRootOopClosure: public OopClosure {
+  const HeapRegion* _hr;
+  nmethod* _nm;
+  bool _failures;
+  bool _has_oops_in_region;
+
+  template <class T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+
+      // Note: not all the oops embedded in the nmethod are in the
+      // current region. We only look at those which are.
+      if (_hr->is_in(obj)) {
+        // Object is in the region. Check that its less than top
+        if (_hr->top() <= (HeapWord*)obj) {
+          // Object is above top
+          gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT") is above "
+                                 "top "PTR_FORMAT,
+                                 obj, _hr->bottom(), _hr->end(), _hr->top());
+          _failures = true;
+          return;
+        }
+        // Nmethod has at least one oop in the current region
+        _has_oops_in_region = true;
+      }
+    }
+  }
+
+public:
+  VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
+    _hr(hr), _failures(false), _has_oops_in_region(false) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+
+  bool failures()           { return _failures; }
+  bool has_oops_in_region() { return _has_oops_in_region; }
+};
+
+class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
+  const HeapRegion* _hr;
+  bool _failures;
+public:
+  VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
+    _hr(hr), _failures(false) {}
+
+  void do_code_blob(CodeBlob* cb) {
+    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
+    if (nm != NULL) {
+      // Verify that the nemthod is live
+      if (!nm->is_alive()) {
+        gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
+                               PTR_FORMAT" in its strong code roots",
+                               _hr->bottom(), _hr->end(), nm);
+        _failures = true;
+      } else {
+        VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
+        nm->oops_do(&oop_cl);
+        if (!oop_cl.has_oops_in_region()) {
+          gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
+                                 PTR_FORMAT" in its strong code roots "
+                                 "with no pointers into region",
+                                 _hr->bottom(), _hr->end(), nm);
+          _failures = true;
+        } else if (oop_cl.failures()) {
+          gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
+                                 "failures for nmethod "PTR_FORMAT,
+                                 _hr->bottom(), _hr->end(), nm);
+          _failures = true;
+        }
+      }
+    }
+  }
+
+  bool failures()       { return _failures; }
+};
+
+void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
+  if (!G1VerifyHeapRegionCodeRoots) {
+    // We're not verifying code roots.
+    return;
+  }
+  if (vo == VerifyOption_G1UseMarkWord) {
+    // Marking verification during a full GC is performed after class
+    // unloading, code cache unloading, etc so the strong code roots
+    // attached to each heap region are in an inconsistent state. They won't
+    // be consistent until the strong code roots are rebuilt after the
+    // actual GC. Skip verifying the strong code roots in this particular
+    // time.
+    assert(VerifyDuringGC, "only way to get here");
+    return;
+  }
+
+  HeapRegionRemSet* hrrs = rem_set();
+  int strong_code_roots_length = hrrs->strong_code_roots_list_length();
+
+  // if this region is empty then there should be no entries
+  // on its strong code root list
+  if (is_empty()) {
+    if (strong_code_roots_length > 0) {
+      gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
+                             "but has "INT32_FORMAT" code root entries",
+                             bottom(), end(), strong_code_roots_length);
+      *failures = true;
+    }
+    return;
+  }
+
+  // An H-region should have an empty strong code root list
+  if (isHumongous()) {
+    if (strong_code_roots_length > 0) {
+      gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
+                             "but has "INT32_FORMAT" code root entries",
+                             bottom(), end(), strong_code_roots_length);
+      *failures = true;
+    }
+    return;
+  }
+
+  VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
+  strong_code_roots_do(&cb_cl);
+
+  if (cb_cl.failures()) {
+    *failures = true;
+  }
+}
+
 void HeapRegion::print() const { print_on(gclog_or_tty); }
 void HeapRegion::print_on(outputStream* st) const {
   if (isHumongous()) {
@@ -761,10 +777,143 @@
   G1OffsetTableContigSpace::print_on(st);
 }
 
-void HeapRegion::verify() const {
-  bool dummy = false;
-  verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
-}
+class VerifyLiveClosure: public OopClosure {
+private:
+  G1CollectedHeap* _g1h;
+  CardTableModRefBS* _bs;
+  oop _containing_obj;
+  bool _failures;
+  int _n_failures;
+  VerifyOption _vo;
+public:
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
+    _g1h(g1h), _bs(NULL), _containing_obj(NULL),
+    _failures(false), _n_failures(0), _vo(vo)
+  {
+    BarrierSet* bs = _g1h->barrier_set();
+    if (bs->is_a(BarrierSet::CardTableModRef))
+      _bs = (CardTableModRefBS*)bs;
+  }
+
+  void set_containing_obj(oop obj) {
+    _containing_obj = obj;
+  }
+
+  bool failures() { return _failures; }
+  int n_failures() { return _n_failures; }
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(      oop* p) { do_oop_work(p); }
+
+  void print_object(outputStream* out, oop obj) {
+#ifdef PRODUCT
+    Klass* k = obj->klass();
+    const char* class_name = InstanceKlass::cast(k)->external_name();
+    out->print_cr("class name %s", class_name);
+#else // PRODUCT
+    obj->print_on(out);
+#endif // PRODUCT
+  }
+
+  template <class T>
+  void do_oop_work(T* p) {
+    assert(_containing_obj != NULL, "Precondition");
+    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
+           "Precondition");
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      bool failed = false;
+      if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
+        MutexLockerEx x(ParGCRareEvent_lock,
+                        Mutex::_no_safepoint_check_flag);
+
+        if (!_failures) {
+          gclog_or_tty->print_cr("");
+          gclog_or_tty->print_cr("----------");
+        }
+        if (!_g1h->is_in_closed_subset(obj)) {
+          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+          gclog_or_tty->print_cr("Field "PTR_FORMAT
+                                 " of live obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 p, (void*) _containing_obj,
+                                 from->bottom(), from->end());
+          print_object(gclog_or_tty, _containing_obj);
+          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
+                                 (void*) obj);
+        } else {
+          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
+          gclog_or_tty->print_cr("Field "PTR_FORMAT
+                                 " of live obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 p, (void*) _containing_obj,
+                                 from->bottom(), from->end());
+          print_object(gclog_or_tty, _containing_obj);
+          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 (void*) obj, to->bottom(), to->end());
+          print_object(gclog_or_tty, obj);
+        }
+        gclog_or_tty->print_cr("----------");
+        gclog_or_tty->flush();
+        _failures = true;
+        failed = true;
+        _n_failures++;
+      }
+
+      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
+        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+        HeapRegion* to   = _g1h->heap_region_containing(obj);
+        if (from != NULL && to != NULL &&
+            from != to &&
+            !to->isHumongous()) {
+          jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
+          jbyte cv_field = *_bs->byte_for_const(p);
+          const jbyte dirty = CardTableModRefBS::dirty_card_val();
+
+          bool is_bad = !(from->is_young()
+                          || to->rem_set()->contains_reference(p)
+                          || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
+                              (_containing_obj->is_objArray() ?
+                                  cv_field == dirty
+                               : cv_obj == dirty || cv_field == dirty));
+          if (is_bad) {
+            MutexLockerEx x(ParGCRareEvent_lock,
+                            Mutex::_no_safepoint_check_flag);
+
+            if (!_failures) {
+              gclog_or_tty->print_cr("");
+              gclog_or_tty->print_cr("----------");
+            }
+            gclog_or_tty->print_cr("Missing rem set entry:");
+            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
+                                   "of obj "PTR_FORMAT", "
+                                   "in region "HR_FORMAT,
+                                   p, (void*) _containing_obj,
+                                   HR_FORMAT_PARAMS(from));
+            _containing_obj->print_on(gclog_or_tty);
+            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
+                                   "in region "HR_FORMAT,
+                                   (void*) obj,
+                                   HR_FORMAT_PARAMS(to));
+            obj->print_on(gclog_or_tty);
+            gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
+                          cv_obj, cv_field);
+            gclog_or_tty->print_cr("----------");
+            gclog_or_tty->flush();
+            _failures = true;
+            if (!failed) _n_failures++;
+          }
+        }
+      }
+    }
+  }
+};
 
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 // We would need a mechanism to make that code skip dead objects.
@@ -904,6 +1053,13 @@
     *failures = true;
     return;
   }
+
+  verify_strong_code_roots(vo, failures);
+}
+
+void HeapRegion::verify() const {
+  bool dummy = false;
+  verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
 }
 
 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -52,6 +52,7 @@
 class HeapRegionRemSetIterator;
 class HeapRegion;
 class HeapRegionSetBase;
+class nmethod;
 
 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
 #define HR_FORMAT_PARAMS(_hr_) \
@@ -371,7 +372,8 @@
     RebuildRSClaimValue        = 5,
     ParEvacFailureClaimValue   = 6,
     AggregateCountClaimValue   = 7,
-    VerifyCountClaimValue      = 8
+    VerifyCountClaimValue      = 8,
+    ParMarkRootClaimValue      = 9
   };
 
   inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
@@ -796,6 +798,25 @@
 
   virtual void reset_after_compaction();
 
+  // Routines for managing a list of code roots (attached to the
+  // this region's RSet) that point into this heap region.
+  void add_strong_code_root(nmethod* nm);
+  void remove_strong_code_root(nmethod* nm);
+
+  // During a collection, migrate the successfully evacuated
+  // strong code roots that referenced into this region to the
+  // new regions that they now point into. Unsuccessfully
+  // evacuated code roots are not migrated.
+  void migrate_strong_code_roots();
+
+  // Applies blk->do_code_blob() to each of the entries in
+  // the strong code roots list for this region
+  void strong_code_roots_do(CodeBlobClosure* blk) const;
+
+  // Verify that the entries on the strong code root list for this
+  // region are live and include at least one pointer into this region.
+  void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
+
   void print() const;
   void print_on(outputStream* st) const;
 
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -33,6 +33,7 @@
 #include "oops/oop.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
 
 class PerRegionTable: public CHeapObj<mtGC> {
   friend class OtherRegionsTable;
@@ -849,7 +850,7 @@
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
-  : _bosa(bosa), _other_regions(hr) {
+  : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {
   reset_for_par_iteration();
 }
 
@@ -908,6 +909,12 @@
 }
 
 void HeapRegionRemSet::clear() {
+  if (_strong_code_roots_list != NULL) {
+    delete _strong_code_roots_list;
+  }
+  _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
+                                GrowableArray<nmethod*>(10, 0, NULL, true);
+
   _other_regions.clear();
   assert(occupied() == 0, "Should be clear.");
   reset_for_par_iteration();
@@ -925,6 +932,121 @@
   _other_regions.scrub(ctbs, region_bm, card_bm);
 }
 
+
+// Code roots support
+
+void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
+  assert(nm != NULL, "sanity");
+  // Search for the code blob from the RHS to avoid
+  // duplicate entries as much as possible
+  if (_strong_code_roots_list->find_from_end(nm) < 0) {
+    // Code blob isn't already in the list
+    _strong_code_roots_list->push(nm);
+  }
+}
+
+void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
+  assert(nm != NULL, "sanity");
+  int idx = _strong_code_roots_list->find(nm);
+  if (idx >= 0) {
+    _strong_code_roots_list->remove_at(idx);
+  }
+  // Check that there were no duplicates
+  guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
+}
+
+class NMethodMigrationOopClosure : public OopClosure {
+  G1CollectedHeap* _g1h;
+  HeapRegion* _from;
+  nmethod* _nm;
+
+  uint _num_self_forwarded;
+
+  template <class T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      if (_from->is_in(obj)) {
+        // Reference still points into the source region.
+        // Since roots are immediately evacuated this means that
+        // we must have self forwarded the object
+        assert(obj->is_forwarded(),
+               err_msg("code roots should be immediately evacuated. "
+                       "Ref: "PTR_FORMAT", "
+                       "Obj: "PTR_FORMAT", "
+                       "Region: "HR_FORMAT,
+                       p, (void*) obj, HR_FORMAT_PARAMS(_from)));
+        assert(obj->forwardee() == obj,
+               err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
+
+        // The object has been self forwarded.
+        // Note, if we're during an initial mark pause, there is
+        // no need to explicitly mark object. It will be marked
+        // during the regular evacuation failure handling code.
+        _num_self_forwarded++;
+      } else {
+        // The reference points into a promotion or to-space region
+        HeapRegion* to = _g1h->heap_region_containing(obj);
+        to->rem_set()->add_strong_code_root(_nm);
+      }
+    }
+  }
+
+public:
+  NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
+    _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+
+  uint retain() { return _num_self_forwarded > 0; }
+};
+
+void HeapRegionRemSet::migrate_strong_code_roots() {
+  assert(hr()->in_collection_set(), "only collection set regions");
+  assert(!hr()->isHumongous(), "not humongous regions");
+
+  ResourceMark rm;
+
+  // List of code blobs to retain for this region
+  GrowableArray<nmethod*> to_be_retained(10);
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  while (_strong_code_roots_list->is_nonempty()) {
+    nmethod *nm = _strong_code_roots_list->pop();
+    if (nm != NULL) {
+      NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
+      nm->oops_do(&oop_cl);
+      if (oop_cl.retain()) {
+        to_be_retained.push(nm);
+      }
+    }
+  }
+
+  // Now push any code roots we need to retain
+  assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
+         "Retained nmethod list must be empty or "
+         "evacuation of this region failed");
+
+  while (to_be_retained.is_nonempty()) {
+    nmethod* nm = to_be_retained.pop();
+    assert(nm != NULL, "sanity");
+    add_strong_code_root(nm);
+  }
+}
+
+void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
+  for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
+    nmethod* nm = _strong_code_roots_list->at(i);
+    blk->do_code_blob(nm);
+  }
+}
+
+size_t HeapRegionRemSet::strong_code_roots_mem_size() {
+  return sizeof(GrowableArray<nmethod*>) +
+         _strong_code_roots_list->max_length() * sizeof(nmethod*);
+}
+
 //-------------------- Iteration --------------------
 
 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -37,6 +37,7 @@
 class HeapRegionRemSetIterator;
 class PerRegionTable;
 class SparsePRT;
+class nmethod;
 
 // Essentially a wrapper around SparsePRTCleanupTask. See
 // sparsePRT.hpp for more details.
@@ -191,6 +192,10 @@
   G1BlockOffsetSharedArray* _bosa;
   G1BlockOffsetSharedArray* bosa() const { return _bosa; }
 
+  // A list of code blobs (nmethods) whose code contains pointers into
+  // the region that owns this RSet.
+  GrowableArray<nmethod*>* _strong_code_roots_list;
+
   OtherRegionsTable _other_regions;
 
   enum ParIterState { Unclaimed, Claimed, Complete };
@@ -282,11 +287,13 @@
   }
 
   // The actual # of bytes this hr_remset takes up.
+  // Note also includes the strong code root set.
   size_t mem_size() {
     return _other_regions.mem_size()
       // This correction is necessary because the above includes the second
       // part.
-      + sizeof(this) - sizeof(OtherRegionsTable);
+      + (sizeof(this) - sizeof(OtherRegionsTable))
+      + strong_code_roots_mem_size();
   }
 
   // Returns the memory occupancy of all static data structures associated
@@ -304,6 +311,37 @@
   bool contains_reference(OopOrNarrowOopStar from) const {
     return _other_regions.contains_reference(from);
   }
+
+  // Routines for managing the list of code roots that point into
+  // the heap region that owns this RSet.
+  void add_strong_code_root(nmethod* nm);
+  void remove_strong_code_root(nmethod* nm);
+
+  // During a collection, migrate the successfully evacuated strong
+  // code roots that referenced into the region that owns this RSet
+  // to the RSets of the new regions that they now point into.
+  // Unsuccessfully evacuated code roots are not migrated.
+  void migrate_strong_code_roots();
+
+  // Applies blk->do_code_blob() to each of the entries in
+  // the strong code roots list
+  void strong_code_roots_do(CodeBlobClosure* blk) const;
+
+  // Returns the number of elements in the strong code roots list
+  int strong_code_roots_list_length() {
+    return _strong_code_roots_list->length();
+  }
+
+  // Returns true if the strong code roots contains the given
+  // nmethod.
+  bool strong_code_roots_list_contains(nmethod* nm) {
+    return _strong_code_roots_list->contains(nm);
+  }
+
+  // Returns the amount of memory, in bytes, currently
+  // consumed by the strong code roots.
+  size_t strong_code_roots_mem_size();
+
   void print() const;
 
   // Called during a stop-world phase to perform any deferred cleanups.
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -70,9 +70,6 @@
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
                     target_pause_time_ms));
-  guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
-            "we can only request an allocation if the GC cause is for "
-            "an incremental GC pause");
   _gc_cause = gc_cause;
 }
 
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -927,11 +927,9 @@
                                    workers->active_workers(),
                                    Threads::number_of_non_daemon_threads());
   workers->set_active_workers(active_workers);
-  _next_gen = gch->next_gen(this);
-  assert(_next_gen != NULL,
-    "This must be the youngest gen, and not the only gen");
   assert(gch->n_gens() == 2,
          "Par collection currently only works with single older gen.");
+  _next_gen = gch->next_gen(this);
   // Do we have to avoid promotion_undo?
   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
     set_avoid_promotion_undo(true);
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/defNewGeneration.hpp"
+#include "memory/padded.hpp"
 #include "utilities/taskqueue.hpp"
 
 class ChunkArray;
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PAROOPCLOSURES_HPP
 
 #include "memory/genOopClosures.hpp"
+#include "memory/padded.hpp"
 
 // Closures for ParNewGeneration
 
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -216,6 +216,7 @@
   young_gen()->update_counters();
   old_gen()->update_counters();
   MetaspaceCounters::update_performance_counters();
+  CompressedClassSpaceCounters::update_performance_counters();
 }
 
 size_t ParallelScavengeHeap::capacity() const {
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -29,14 +29,16 @@
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/mutableSpace.hpp"
+#include "memory/allocation.inline.hpp"
 #include "memory/memRegion.hpp"
+#include "memory/padded.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.psgc.inline.hpp"
 
-PSPromotionManager**         PSPromotionManager::_manager_array = NULL;
-OopStarTaskQueueSet*         PSPromotionManager::_stack_array_depth = NULL;
-PSOldGen*                    PSPromotionManager::_old_gen = NULL;
-MutableSpace*                PSPromotionManager::_young_space = NULL;
+PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
+OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
+PSOldGen*                      PSPromotionManager::_old_gen = NULL;
+MutableSpace*                  PSPromotionManager::_young_space = NULL;
 
 void PSPromotionManager::initialize() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -45,8 +47,10 @@
   _old_gen = heap->old_gen();
   _young_space = heap->young_gen()->to_space();
 
+  // To prevent false sharing, we pad the PSPromotionManagers
+  // and make sure that the first instance starts at a cache line.
   assert(_manager_array == NULL, "Attempt to initialize twice");
-  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
+  _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
 
   _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
@@ -54,26 +58,21 @@
 
   // Create and register the PSPromotionManager(s) for the worker threads.
   for(uint i=0; i<ParallelGCThreads; i++) {
-    _manager_array[i] = new PSPromotionManager();
-    guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
-    stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
+    stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
   }
-
   // The VMThread gets its own PSPromotionManager, which is not available
   // for work stealing.
-  _manager_array[ParallelGCThreads] = new PSPromotionManager();
-  guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
 }
 
 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
   assert(_manager_array != NULL, "Sanity");
-  return _manager_array[index];
+  return &_manager_array[index];
 }
 
 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
   assert(_manager_array != NULL, "Sanity");
-  return _manager_array[ParallelGCThreads];
+  return &_manager_array[ParallelGCThreads];
 }
 
 void PSPromotionManager::pre_scavenge() {
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -29,6 +29,8 @@
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/taskqueue.hpp"
 
 //
@@ -51,14 +53,14 @@
 class PSOldGen;
 class ParCompactionManager;
 
-class PSPromotionManager : public CHeapObj<mtGC> {
+class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
   friend class PSScavenge;
   friend class PSRefProcTaskExecutor;
  private:
-  static PSPromotionManager**         _manager_array;
-  static OopStarTaskQueueSet*         _stack_array_depth;
-  static PSOldGen*                    _old_gen;
-  static MutableSpace*                _young_space;
+  static PaddedEnd<PSPromotionManager>* _manager_array;
+  static OopStarTaskQueueSet*           _stack_array_depth;
+  static PSOldGen*                      _old_gen;
+  static MutableSpace*                  _young_space;
 
 #if TASKQUEUE_STATS
   size_t                              _masked_pushes;
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -33,7 +33,7 @@
 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
   assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
-  return _manager_array[index];
+  return &_manager_array[index];
 }
 
 template <class T>
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -32,6 +32,7 @@
 #if INCLUDE_SERVICES
 
 void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
+#if INCLUDE_TRACE
   assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
          "Only call this method if the event is enabled");
 
@@ -42,6 +43,7 @@
   event.set_totalSize(entry->words() * BytesPerWord);
   event.set_endtime(timestamp);
   event.commit();
+#endif // INCLUDE_TRACE
 }
 
 bool ObjectCountEventSender::should_send_event() {
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -118,6 +118,14 @@
   }
 }
 
+void CollectedHeap::register_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+}
+
+void CollectedHeap::unregister_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+}
+
 void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
   const GCHeapSummary& heap_summary = create_heap_summary();
   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -49,6 +49,7 @@
 class Thread;
 class ThreadClosure;
 class VirtualSpaceSummary;
+class nmethod;
 
 class GCMessage : public FormatBuffer<1024> {
  public:
@@ -603,6 +604,11 @@
   void print_heap_before_gc();
   void print_heap_after_gc();
 
+  // Registering and unregistering an nmethod (compiled code) with the heap.
+  // Override with specific mechanism for each specialized heap type.
+  virtual void register_nmethod(nmethod* nm);
+  virtual void unregister_nmethod(nmethod* nm);
+
   void trace_heap_before_gc(GCTracer* gc_tracer);
   void trace_heap_after_gc(GCTracer* gc_tracer);
 
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1209,3 +1209,26 @@
                        size_of_arguments * Interpreter::stackElementSize);
 IRT_END
 #endif
+
+#if INCLUDE_JVMTI
+// This is a support of the JVMTI PopFrame interface.
+// Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
+// and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
+// The dmh argument is a reference to a DirectMethoHandle that has a member name field.
+IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh,
+                                                            Method* method, address bcp))
+  Bytecodes::Code code = Bytecodes::code_at(method, bcp);
+  if (code != Bytecodes::_invokestatic) {
+    return;
+  }
+  ConstantPool* cpool = method->constants();
+  int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
+  Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index));
+  Symbol* mname = cpool->name_ref_at(cp_index);
+
+  if (MethodHandles::has_member_arg(cname, mname)) {
+    oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh);
+    thread->set_vm_result(member_name);
+  }
+IRT_END
+#endif // INCLUDE_JVMTI
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -95,6 +95,9 @@
   static void    create_exception(JavaThread* thread, char* name, char* message);
   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
+#if INCLUDE_JVMTI
+  static void    member_name_arg_or_null(JavaThread* thread, address dmh, Method* m, address bcp);
+#endif
   static void    throw_pending_exception(JavaThread* thread);
 
   // Statics & fields
--- a/src/share/vm/memory/allocation.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/allocation.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -669,7 +669,7 @@
   NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
 
 #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
-  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
+  (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
 
 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
   (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
@@ -678,16 +678,16 @@
   (type*) (AllocateHeap((size) * sizeof(type), memflags))
 
 #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
-  NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
 
 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
-  NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
 
 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
-  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
+  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
 
 #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
-   (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
+  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
 
 #define FREE_C_HEAP_ARRAY(type, old, memflags) \
   FreeHeap((char*)(old), memflags)
--- a/src/share/vm/memory/cardTableRS.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/cardTableRS.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -310,46 +310,31 @@
   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
 }
 
-void CardTableRS::clear_into_younger(Generation* gen) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  // Generations younger than gen have been evacuated. We can clear
-  // card table entries for gen (we know that it has no pointers
-  // to younger gens) and for those below. The card tables for
-  // the youngest gen need never be cleared.
+void CardTableRS::clear_into_younger(Generation* old_gen) {
+  assert(old_gen->level() == 1, "Should only be called for the old generation");
+  // The card tables for the youngest gen need never be cleared.
   // There's a bit of subtlety in the clear() and invalidate()
   // methods that we exploit here and in invalidate_or_clear()
   // below to avoid missing cards at the fringes. If clear() or
   // invalidate() are changed in the future, this code should
   // be revisited. 20040107.ysr
-  Generation* g = gen;
-  for(Generation* prev_gen = gch->prev_gen(g);
-      prev_gen != NULL;
-      g = prev_gen, prev_gen = gch->prev_gen(g)) {
-    MemRegion to_be_cleared_mr = g->prev_used_region();
-    clear(to_be_cleared_mr);
-  }
+  clear(old_gen->prev_used_region());
 }
 
-void CardTableRS::invalidate_or_clear(Generation* gen, bool younger) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  // For each generation gen (and younger)
-  // invalidate the cards for the currently occupied part
-  // of that generation and clear the cards for the
+void CardTableRS::invalidate_or_clear(Generation* old_gen) {
+  assert(old_gen->level() == 1, "Should only be called for the old generation");
+  // Invalidate the cards for the currently occupied part of
+  // the old generation and clear the cards for the
   // unoccupied part of the generation (if any, making use
   // of that generation's prev_used_region to determine that
   // region). No need to do anything for the youngest
   // generation. Also see note#20040107.ysr above.
-  Generation* g = gen;
-  for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
-      g = prev_gen, prev_gen = gch->prev_gen(g))  {
-    MemRegion used_mr = g->used_region();
-    MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
-    if (!to_be_cleared_mr.is_empty()) {
-      clear(to_be_cleared_mr);
-    }
-    invalidate(used_mr);
-    if (!younger) break;
+  MemRegion used_mr = old_gen->used_region();
+  MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
+  if (!to_be_cleared_mr.is_empty()) {
+    clear(to_be_cleared_mr);
   }
+  invalidate(used_mr);
 }
 
 
--- a/src/share/vm/memory/cardTableRS.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/cardTableRS.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -142,12 +142,12 @@
   void verify_aligned_region_empty(MemRegion mr);
 
   void clear(MemRegion mr) { _ct_bs->clear(mr); }
-  void clear_into_younger(Generation* gen);
+  void clear_into_younger(Generation* old_gen);
 
   void invalidate(MemRegion mr, bool whole_heap = false) {
     _ct_bs->invalidate(mr, whole_heap);
   }
-  void invalidate_or_clear(Generation* gen, bool younger);
+  void invalidate_or_clear(Generation* old_gen);
 
   static uintx ct_max_alignment_constraint() {
     return CardTableModRefBS::ct_max_alignment_constraint();
--- a/src/share/vm/memory/collectorPolicy.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -193,6 +193,8 @@
       alignment = lcm(os::large_page_size(), alignment);
   }
 
+  assert(alignment >= min_alignment(), "Must be");
+
   return alignment;
 }
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -567,8 +567,6 @@
   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 
   _next_gen = gch->next_gen(this);
-  assert(_next_gen != NULL,
-    "This must be the youngest gen, and not the only gen");
 
   // If the next generation is too full to accommodate promotion
   // from this generation, pass on collection; let the next generation
@@ -901,8 +899,6 @@
   if (_next_gen == NULL) {
     GenCollectedHeap* gch = GenCollectedHeap::heap();
     _next_gen = gch->next_gen(this);
-    assert(_next_gen != NULL,
-           "This must be the youngest gen, and not the only gen");
   }
   return _next_gen->promotion_attempt_is_safe(used());
 }
--- a/src/share/vm/memory/filemap.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/filemap.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -362,15 +362,12 @@
 ReservedSpace FileMapInfo::reserve_shared_memory() {
   struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
   char* requested_addr = si->_base;
-  size_t alignment = os::vm_allocation_granularity();
 
-  size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
-                              SharedMiscDataSize + SharedMiscCodeSize,
-                              alignment);
+  size_t size = FileMapInfo::shared_spaces_size();
 
   // Reserve the space first, then map otherwise map will go right over some
   // other reserved memory (like the code cache).
-  ReservedSpace rs(size, alignment, false, requested_addr);
+  ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
   if (!rs.is_reserved()) {
     fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
     return rs;
@@ -559,3 +556,19 @@
                         si->_base, si->_base + si->_used);
   }
 }
+
+// Unmap mapped regions of shared space.
+void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
+  FileMapInfo *map_info = FileMapInfo::current_info();
+  if (map_info) {
+    map_info->fail_continue(msg);
+    for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+      if (map_info->_header._space[i]._base != NULL) {
+        map_info->unmap_region(i);
+        map_info->_header._space[i]._base = NULL;
+      }
+    }
+  } else if (DumpSharedSpaces) {
+    fail_stop(msg, NULL);
+  }
+}
--- a/src/share/vm/memory/filemap.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/filemap.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -150,6 +150,15 @@
   // Return true if given address is in the mapped shared space.
   bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
   void print_shared_spaces() NOT_CDS_RETURN;
+
+  static size_t shared_spaces_size() {
+    return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
+                         SharedMiscDataSize + SharedMiscCodeSize,
+                         os::vm_allocation_granularity());
+  }
+
+  // Stop CDS sharing and unmap CDS regions.
+  static void stop_sharing_and_unmap(const char* msg);
 };
 
 #endif // SHARE_VM_MEMORY_FILEMAP_HPP
--- a/src/share/vm/memory/genCollectedHeap.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -95,13 +95,13 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   // The heap must be at least as aligned as generations.
-  size_t alignment = Generation::GenGrain;
+  size_t gen_alignment = Generation::GenGrain;
 
   _gen_specs = gen_policy()->generations();
 
   // Make sure the sizes are all aligned.
   for (i = 0; i < _n_gens; i++) {
-    _gen_specs[i]->align(alignment);
+    _gen_specs[i]->align(gen_alignment);
   }
 
   // Allocate space for the heap.
@@ -109,9 +109,11 @@
   char* heap_address;
   size_t total_reserved = 0;
   int n_covered_regions = 0;
-  ReservedSpace heap_rs(0);
+  ReservedSpace heap_rs;
 
-  heap_address = allocate(alignment, &total_reserved,
+  size_t heap_alignment = collector_policy()->max_alignment();
+
+  heap_address = allocate(heap_alignment, &total_reserved,
                           &n_covered_regions, &heap_rs);
 
   if (!heap_rs.is_reserved()) {
@@ -168,6 +170,8 @@
   const size_t pageSize = UseLargePages ?
       os::large_page_size() : os::vm_page_size();
 
+  assert(alignment % pageSize == 0, "Must be");
+
   for (int i = 0; i < _n_gens; i++) {
     total_reserved += _gen_specs[i]->max_size();
     if (total_reserved < _gen_specs[i]->max_size()) {
@@ -175,24 +179,17 @@
     }
     n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
-  assert(total_reserved % pageSize == 0,
-         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
-                 SIZE_FORMAT, total_reserved, pageSize));
+  assert(total_reserved % alignment == 0,
+         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
+                 SIZE_FORMAT, total_reserved, alignment));
 
   // Needed until the cardtable is fixed to have the right number
   // of covered regions.
   n_covered_regions += 2;
 
-  if (UseLargePages) {
-    assert(total_reserved != 0, "total_reserved cannot be 0");
-    total_reserved = round_to(total_reserved, os::large_page_size());
-    if (total_reserved < os::large_page_size()) {
-      vm_exit_during_initialization(overflow_msg);
-    }
-  }
+  *_total_reserved = total_reserved;
+  *_n_covered_regions = n_covered_regions;
 
-      *_total_reserved = total_reserved;
-      *_n_covered_regions = n_covered_regions;
   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   return heap_rs->base();
 }
@@ -1070,13 +1067,13 @@
 
 
 void GenCollectedHeap::prepare_for_compaction() {
-  Generation* scanning_gen = _gens[_n_gens-1];
+  guarantee(_n_gens = 2, "Wrong number of generations");
+  Generation* old_gen = _gens[1];
   // Start by compacting into same gen.
-  CompactPoint cp(scanning_gen, NULL, NULL);
-  while (scanning_gen != NULL) {
-    scanning_gen->prepare_for_compaction(&cp);
-    scanning_gen = prev_gen(scanning_gen);
-  }
+  CompactPoint cp(old_gen, NULL, NULL);
+  old_gen->prepare_for_compaction(&cp);
+  Generation* young_gen = _gens[0];
+  young_gen->prepare_for_compaction(&cp);
 }
 
 GCStats* GenCollectedHeap::gc_stats(int level) const {
@@ -1211,6 +1208,7 @@
   }
 
   MetaspaceCounters::update_performance_counters();
+  CompressedClassSpaceCounters::update_performance_counters();
 
   always_do_update_barrier = UseConcMarkSweepGC;
 };
@@ -1245,27 +1243,14 @@
   generation_iterate(&ep_cl, false);
 }
 
-oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
+oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
                                               oop obj,
                                               size_t obj_size) {
+  guarantee(old_gen->level() == 1, "We only get here with an old generation");
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   HeapWord* result = NULL;
 
-  // First give each higher generation a chance to allocate the promoted object.
-  Generation* allocator = next_gen(gen);
-  if (allocator != NULL) {
-    do {
-      result = allocator->allocate(obj_size, false);
-    } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
-  }
-
-  if (result == NULL) {
-    // Then give gen and higher generations a chance to expand and allocate the
-    // object.
-    do {
-      result = gen->expand_and_allocate(obj_size, false);
-    } while (result == NULL && (gen = next_gen(gen)) != NULL);
-  }
+  result = old_gen->expand_and_allocate(obj_size, false);
 
   if (result != NULL) {
     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -368,25 +368,23 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
-  // Return the generation before "gen", or else NULL.
+  // Return the generation before "gen".
   Generation* prev_gen(Generation* gen) const {
     int l = gen->level();
-    if (l == 0) return NULL;
-    else return _gens[l-1];
+    guarantee(l > 0, "Out of bounds");
+    return _gens[l-1];
   }
 
-  // Return the generation after "gen", or else NULL.
+  // Return the generation after "gen".
   Generation* next_gen(Generation* gen) const {
     int l = gen->level() + 1;
-    if (l == _n_gens) return NULL;
-    else return _gens[l];
+    guarantee(l < _n_gens, "Out of bounds");
+    return _gens[l];
   }
 
   Generation* get_gen(int i) const {
-    if (i >= 0 && i < _n_gens)
-      return _gens[i];
-    else
-      return NULL;
+    guarantee(i >= 0 && i < _n_gens, "Out of bounds");
+    return _gens[i];
   }
 
   int n_gens() const {
@@ -485,9 +483,9 @@
 
   // Promotion of obj into gen failed.  Try to promote obj to higher
   // gens in ascending order; return the new location of obj if successful.
-  // Otherwise, try expand-and-allocate for obj in each generation starting at
-  // gen; return the new location of obj if successful.  Otherwise, return NULL.
-  oop handle_failed_promotion(Generation* gen,
+  // Otherwise, try expand-and-allocate for obj in both the young and old
+  // generation; return the new location of obj if successful.  Otherwise, return NULL.
+  oop handle_failed_promotion(Generation* old_gen,
                               oop obj,
                               size_t obj_size);
 
--- a/src/share/vm/memory/genMarkSweep.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -52,8 +52,8 @@
 #include "utilities/copy.hpp"
 #include "utilities/events.hpp"
 
-void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
-  bool clear_all_softrefs) {
+void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
+  guarantee(level == 1, "We always collect both old and young.");
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -84,11 +84,6 @@
   // Capture heap size before collection for printing.
   size_t gch_prev_used = gch->used();
 
-  // Some of the card table updates below assume that the perm gen is
-  // also being collected.
-  assert(level == gch->n_gens() - 1,
-         "All generations are being collected, ergo perm gen too.");
-
   // Capture used regions for each generation that will be
   // subject to collection, so that card table adjustments can
   // be made intelligently (see clear / invalidate further below).
@@ -126,17 +121,15 @@
     all_empty = all_empty && gch->get_gen(i)->used() == 0;
   }
   GenRemSet* rs = gch->rem_set();
+  Generation* old_gen = gch->get_gen(level);
   // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
   if (all_empty) {
     // We've evacuated all generations below us.
-    Generation* g = gch->get_gen(level);
-    rs->clear_into_younger(g);
+    rs->clear_into_younger(old_gen);
   } else {
     // Invalidate the cards corresponding to the currently used
-    // region and clear those corresponding to the evacuated region
-    // of all generations just collected (i.e. level and younger).
-    rs->invalidate_or_clear(gch->get_gen(level),
-                            true /* younger */);
+    // region and clear those corresponding to the evacuated region.
+    rs->invalidate_or_clear(old_gen);
   }
 
   Threads::gc_epilogue();
--- a/src/share/vm/memory/genRemSet.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/genRemSet.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -135,7 +135,7 @@
   // younger than gen from generations gen and older.
   // The parameter clear_perm indicates if the perm_gen's
   // remembered set should also be processed/cleared.
-  virtual void clear_into_younger(Generation* gen) = 0;
+  virtual void clear_into_younger(Generation* old_gen) = 0;
 
   // Informs the RS that refs in the given "mr" may have changed
   // arbitrarily, and therefore may contain old-to-young pointers.
@@ -146,11 +146,8 @@
 
   // Informs the RS that refs in this generation
   // may have changed arbitrarily, and therefore may contain
-  // old-to-young pointers in arbitrary locations. The parameter
-  // younger indicates if the same should be done for younger generations
-  // as well. The parameter perm indicates if the same should be done for
-  // perm gen as well.
-  virtual void invalidate_or_clear(Generation* gen, bool younger) = 0;
+  // old-to-young pointers in arbitrary locations.
+  virtual void invalidate_or_clear(Generation* old_gen) = 0;
 };
 
 #endif // SHARE_VM_MEMORY_GENREMSET_HPP
--- a/src/share/vm/memory/heap.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/heap.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,9 +118,12 @@
   _number_of_committed_segments = size_to_segments(_memory.committed_size());
   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
+  const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
+  const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
+  const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 
   // reserve space for _segmap
-  if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
+  if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
     return false;
   }
 
--- a/src/share/vm/memory/iterator.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/iterator.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -64,7 +64,7 @@
 }
 
 void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
-  nm->oops_do(_cl, /*do_strong_roots_only=*/ true);
+  nm->oops_do(_cl, /*allow_zombie=*/ false);
 }
 
 void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
--- a/src/share/vm/memory/metaspace.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -35,6 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "runtime/globals.hpp"
+#include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "services/memTracker.hpp"
@@ -54,6 +55,8 @@
 
 MetaWord* last_allocated = 0;
 
+size_t Metaspace::_class_metaspace_size;
+
 // Used in declarations in SpaceManager and ChunkManager
 enum ChunkIndex {
   ZeroIndex = 0,
@@ -261,10 +264,6 @@
   // count of chunks contained in this VirtualSpace
   uintx _container_count;
 
-  // Convenience functions for logical bottom and end
-  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
-  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
-
   // Convenience functions to access the _virtual_space
   char* low()  const { return virtual_space()->low(); }
   char* high() const { return virtual_space()->high(); }
@@ -284,6 +283,10 @@
   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   ~VirtualSpaceNode();
 
+  // Convenience functions for logical bottom and end
+  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
+  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
+
   // address of next available space in _virtual_space;
   // Accessors
   VirtualSpaceNode* next() { return _next; }
@@ -342,7 +345,7 @@
 };
 
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   // align up to vm allocation granularity
   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 
@@ -1313,7 +1316,8 @@
 
   // Class virtual space should always be expanded.  Call GC for the other
   // metadata virtual space.
-  if (vsl == Metaspace::class_space_list()) return true;
+  if (Metaspace::using_class_space() &&
+      (vsl == Metaspace::class_space_list())) return true;
 
   // If this is part of an allocation after a GC, expand
   // unconditionally.
@@ -2257,7 +2261,7 @@
   size_t raw_word_size = get_raw_word_size(word_size);
   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
   assert(raw_word_size >= min_size,
-    err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
+         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
   block_freelists()->return_block(p, raw_word_size);
 }
 
@@ -2374,7 +2378,7 @@
   if (result == NULL) {
     result = grow_and_allocate(word_size);
   }
-  if (result > 0) {
+  if (result != 0) {
     inc_used_metrics(word_size);
     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
            "Head of the list is being allocated");
@@ -2476,15 +2480,13 @@
 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
 
+size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  return list == NULL ? 0 : list->free_bytes();
+}
+
 size_t MetaspaceAux::free_bytes() {
-  size_t result = 0;
-  if (Metaspace::class_space_list() != NULL) {
-    result = result + Metaspace::class_space_list()->free_bytes();
-  }
-  if (Metaspace::space_list() != NULL) {
-    result = result + Metaspace::space_list()->free_bytes();
-  }
-  return result;
+  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
 }
 
 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
@@ -2549,6 +2551,9 @@
 }
 
 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
+  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
+    return 0;
+  }
   // Don't count the space in the freelists.  That space will be
   // added to the capacity calculation as needed.
   size_t capacity = 0;
@@ -2563,18 +2568,18 @@
 }
 
 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
-  size_t reserved = (mdtype == Metaspace::ClassType) ?
-                       Metaspace::class_space_list()->virtual_space_total() :
-                       Metaspace::space_list()->virtual_space_total();
-  return reserved * BytesPerWord;
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  return list == NULL ? 0 : list->virtual_space_total();
 }
 
 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
 
 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
-  ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
-                            Metaspace::class_space_list()->chunk_manager() :
-                            Metaspace::space_list()->chunk_manager();
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  if (list == NULL) {
+    return 0;
+  }
+  ChunkManager* chunk = list->chunk_manager();
   chunk->slow_verify();
   return chunk->free_chunks_total();
 }
@@ -2615,7 +2620,6 @@
 
 // This is printed when PrintGCDetails
 void MetaspaceAux::print_on(outputStream* out) {
-  Metaspace::MetadataType ct = Metaspace::ClassType;
   Metaspace::MetadataType nct = Metaspace::NonClassType;
 
   out->print_cr(" Metaspace total "
@@ -2629,12 +2633,15 @@
                 allocated_capacity_bytes(nct)/K,
                 allocated_used_bytes(nct)/K,
                 reserved_in_bytes(nct)/K);
-  out->print_cr("  class space    "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes(ct)/K,
-                allocated_used_bytes(ct)/K,
-                reserved_in_bytes(ct)/K);
+  if (Metaspace::using_class_space()) {
+    Metaspace::MetadataType ct = Metaspace::ClassType;
+    out->print_cr("  class space    "
+                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                  " reserved " SIZE_FORMAT "K",
+                  allocated_capacity_bytes(ct)/K,
+                  allocated_used_bytes(ct)/K,
+                  reserved_in_bytes(ct)/K);
+  }
 }
 
 // Print information for class space and data space separately.
@@ -2659,13 +2666,37 @@
   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
 }
 
-// Print total fragmentation for class and data metaspaces separately
+// Print total fragmentation for class metaspaces
+void MetaspaceAux::print_class_waste(outputStream* out) {
+  assert(Metaspace::using_class_space(), "class metaspace not used");
+  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
+  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
+  ClassLoaderDataGraphMetaspaceIterator iter;
+  while (iter.repeat()) {
+    Metaspace* msp = iter.get_next();
+    if (msp != NULL) {
+      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
+      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
+      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
+      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
+      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
+      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
+      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
+    }
+  }
+  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
+                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
+                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
+                "large count " SIZE_FORMAT,
+                cls_specialized_count, cls_specialized_waste,
+                cls_small_count, cls_small_waste,
+                cls_medium_count, cls_medium_waste, cls_humongous_count);
+}
+
+// Print total fragmentation for data and class metaspaces separately
 void MetaspaceAux::print_waste(outputStream* out) {
-
   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
-  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
-  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
 
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
@@ -2678,14 +2709,6 @@
       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
-
-      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
-      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
-      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
-      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
-      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
-      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
-      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
     }
   }
   out->print_cr("Total fragmentation waste (words) doesn't count free space");
@@ -2695,13 +2718,9 @@
                         "large count " SIZE_FORMAT,
              specialized_count, specialized_waste, small_count,
              small_waste, medium_count, medium_waste, humongous_count);
-  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
-                           SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
-                           SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
-                           "large count " SIZE_FORMAT,
-             cls_specialized_count, cls_specialized_waste,
-             cls_small_count, cls_small_waste,
-             cls_medium_count, cls_medium_waste, cls_humongous_count);
+  if (Metaspace::using_class_space()) {
+    print_class_waste(out);
+  }
 }
 
 // Dump global metaspace things from the end of ClassLoaderDataGraph
@@ -2714,7 +2733,9 @@
 
 void MetaspaceAux::verify_free_chunks() {
   Metaspace::space_list()->chunk_manager()->verify();
-  Metaspace::class_space_list()->chunk_manager()->verify();
+  if (Metaspace::using_class_space()) {
+    Metaspace::class_space_list()->chunk_manager()->verify();
+  }
 }
 
 void MetaspaceAux::verify_capacity() {
@@ -2776,7 +2797,9 @@
 
 Metaspace::~Metaspace() {
   delete _vsm;
-  delete _class_vsm;
+  if (using_class_space()) {
+    delete _class_vsm;
+  }
 }
 
 VirtualSpaceList* Metaspace::_space_list = NULL;
@@ -2784,9 +2807,123 @@
 
 #define VIRTUALSPACEMULTIPLIER 2
 
+#ifdef _LP64
+void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
+  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
+  // narrow_klass_base is the lower of the metaspace base and the cds base
+  // (if cds is enabled).  The narrow_klass_shift depends on the distance
+  // between the lower base and higher address.
+  address lower_base;
+  address higher_address;
+  if (UseSharedSpaces) {
+    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
+                          (address)(metaspace_base + class_metaspace_size()));
+    lower_base = MIN2(metaspace_base, cds_base);
+  } else {
+    higher_address = metaspace_base + class_metaspace_size();
+    lower_base = metaspace_base;
+  }
+  Universe::set_narrow_klass_base(lower_base);
+  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
+    Universe::set_narrow_klass_shift(0);
+  } else {
+    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
+    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+  }
+}
+
+// Return TRUE if the specified metaspace_base and cds_base are close enough
+// to work with compressed klass pointers.
+bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
+  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
+  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  address lower_base = MIN2((address)metaspace_base, cds_base);
+  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
+                                (address)(metaspace_base + class_metaspace_size()));
+  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
+}
+
+// Try to allocate the metaspace at the requested addr.
+void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
+  assert(using_class_space(), "called improperly");
+  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
+         "Metaspace size is too big");
+
+  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
+                                             os::vm_allocation_granularity(),
+                                             false, requested_addr, 0);
+  if (!metaspace_rs.is_reserved()) {
+    if (UseSharedSpaces) {
+      // Keep trying to allocate the metaspace, increasing the requested_addr
+      // by 1GB each time, until we reach an address that will no longer allow
+      // use of CDS with compressed klass pointers.
+      char *addr = requested_addr;
+      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
+             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
+        addr = addr + 1*G;
+        metaspace_rs = ReservedSpace(class_metaspace_size(),
+                                     os::vm_allocation_granularity(), false, addr, 0);
+      }
+    }
+
+    // If no successful allocation then try to allocate the space anywhere.  If
+    // that fails then OOM doom.  At this point we cannot try allocating the
+    // metaspace as if UseCompressedKlassPointers is off because too much
+    // initialization has happened that depends on UseCompressedKlassPointers.
+    // So, UseCompressedKlassPointers cannot be turned off at this point.
+    if (!metaspace_rs.is_reserved()) {
+      metaspace_rs = ReservedSpace(class_metaspace_size(),
+                                   os::vm_allocation_granularity(), false);
+      if (!metaspace_rs.is_reserved()) {
+        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
+                                              class_metaspace_size()));
+      }
+    }
+  }
+
+  // If we got here then the metaspace got allocated.
+  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
+
+  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
+  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
+    FileMapInfo::stop_sharing_and_unmap(
+        "Could not allocate metaspace at a compatible address");
+  }
+
+  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
+                                  UseSharedSpaces ? (address)cds_base : 0);
+
+  initialize_class_space(metaspace_rs);
+
+  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
+    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
+                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
+    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
+                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
+  }
+}
+
+// For UseCompressedKlassPointers the class space is reserved above the top of
+// the Java heap.  The argument passed in is at the base of the compressed space.
+void Metaspace::initialize_class_space(ReservedSpace rs) {
+  // The reserved space size may be bigger because of alignment, esp with UseLargePages
+  assert(rs.size() >= ClassMetaspaceSize,
+         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
+  assert(using_class_space(), "Must be using class space");
+  _class_space_list = new VirtualSpaceList(rs);
+}
+
+#endif
+
 void Metaspace::global_initialize() {
   // Initialize the alignment for shared spaces.
   int max_alignment = os::vm_page_size();
+  size_t cds_total = 0;
+
+  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
+                                         os::vm_allocation_granularity()));
+
   MetaspaceShared::set_max_alignment(max_alignment);
 
   if (DumpSharedSpaces) {
@@ -2798,15 +2935,31 @@
     // Initialize with the sum of the shared space sizes.  The read-only
     // and read write metaspace chunks will be allocated out of this and the
     // remainder is the misc code and data chunks.
-    size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
-                                 SharedMiscDataSize + SharedMiscCodeSize,
-                                 os::vm_allocation_granularity());
-    size_t word_size = total/wordSize;
-    _space_list = new VirtualSpaceList(word_size);
+    cds_total = FileMapInfo::shared_spaces_size();
+    _space_list = new VirtualSpaceList(cds_total/wordSize);
+
+#ifdef _LP64
+    // Set the compressed klass pointer base so that decoding of these pointers works
+    // properly when creating the shared archive.
+    assert(UseCompressedOops && UseCompressedKlassPointers,
+      "UseCompressedOops and UseCompressedKlassPointers must be set");
+    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
+    if (TraceMetavirtualspaceAllocation && Verbose) {
+      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
+                             _space_list->current_virtual_space()->bottom());
+    }
+
+    // Set the shift to zero.
+    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
+           "CDS region is too large");
+    Universe::set_narrow_klass_shift(0);
+#endif
+
   } else {
     // If using shared space, open the file that contains the shared space
     // and map in the memory before initializing the rest of metaspace (so
     // the addresses don't conflict)
+    address cds_address = NULL;
     if (UseSharedSpaces) {
       FileMapInfo* mapinfo = new FileMapInfo();
       memset(mapinfo, 0, sizeof(FileMapInfo));
@@ -2821,8 +2974,22 @@
         assert(!mapinfo->is_open() && !UseSharedSpaces,
                "archive file not closed or shared spaces not disabled.");
       }
+      cds_total = FileMapInfo::shared_spaces_size();
+      cds_address = (address)mapinfo->region_base(0);
     }
 
+#ifdef _LP64
+    // If UseCompressedKlassPointers is set then allocate the metaspace area
+    // above the heap and above the CDS area (if it exists).
+    if (using_class_space()) {
+      if (UseSharedSpaces) {
+        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
+      } else {
+        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
+      }
+    }
+#endif
+
     // Initialize these before initializing the VirtualSpaceList
     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
@@ -2840,39 +3007,28 @@
   }
 }
 
-// For UseCompressedKlassPointers the class space is reserved as a piece of the
-// Java heap because the compression algorithm is the same for each.  The
-// argument passed in is at the top of the compressed space
-void Metaspace::initialize_class_space(ReservedSpace rs) {
-  // The reserved space size may be bigger because of alignment, esp with UseLargePages
-  assert(rs.size() >= ClassMetaspaceSize,
-         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
-  _class_space_list = new VirtualSpaceList(rs);
-}
-
-void Metaspace::initialize(Mutex* lock,
-                           MetaspaceType type) {
+void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
 
   assert(space_list() != NULL,
     "Metadata VirtualSpaceList has not been initialized");
 
-  _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
+  _vsm = new SpaceManager(NonClassType, lock, space_list());
   if (_vsm == NULL) {
     return;
   }
   size_t word_size;
   size_t class_word_size;
-  vsm()->get_initial_chunk_sizes(type,
-                                 &word_size,
-                                 &class_word_size);
-
-  assert(class_space_list() != NULL,
-    "Class VirtualSpaceList has not been initialized");
-
-  // Allocate SpaceManager for classes.
-  _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
-  if (_class_vsm == NULL) {
-    return;
+  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
+
+  if (using_class_space()) {
+    assert(class_space_list() != NULL,
+      "Class VirtualSpaceList has not been initialized");
+
+    // Allocate SpaceManager for classes.
+    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
+    if (_class_vsm == NULL) {
+      return;
+    }
   }
 
   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
@@ -2888,11 +3044,13 @@
   }
 
   // Allocate chunk for class metadata objects
-  Metachunk* class_chunk =
-     class_space_list()->get_initialization_chunk(class_word_size,
-                                                  class_vsm()->medium_chunk_bunch());
-  if (class_chunk != NULL) {
-    class_vsm()->add_chunk(class_chunk, true);
+  if (using_class_space()) {
+    Metachunk* class_chunk =
+       class_space_list()->get_initialization_chunk(class_word_size,
+                                                    class_vsm()->medium_chunk_bunch());
+    if (class_chunk != NULL) {
+      class_vsm()->add_chunk(class_chunk, true);
+    }
   }
 
   _alloc_record_head = NULL;
@@ -2906,7 +3064,8 @@
 
 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
   // DumpSharedSpaces doesn't use class metadata area (yet)
-  if (mdtype == ClassType && !DumpSharedSpaces) {
+  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
+  if (mdtype == ClassType && using_class_space()) {
     return  class_vsm()->allocate(word_size);
   } else {
     return  vsm()->allocate(word_size);
@@ -2937,14 +3096,19 @@
 }
 
 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
-  // return vsm()->allocated_used_words();
-  return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
-                               vsm()->sum_used_in_chunks_in_use();  // includes overhead!
+  if (mdtype == ClassType) {
+    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
+  } else {
+    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
+  }
 }
 
 size_t Metaspace::free_words(MetadataType mdtype) const {
-  return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
-                               vsm()->sum_free_in_chunks_in_use();
+  if (mdtype == ClassType) {
+    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
+  } else {
+    return vsm()->sum_free_in_chunks_in_use();
+  }
 }
 
 // Space capacity in the Metaspace.  It includes
@@ -2953,8 +3117,11 @@
 // in the space available in the dictionary which
 // is already counted in some chunk.
 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
-  return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
-                               vsm()->sum_capacity_in_chunks_in_use();
+  if (mdtype == ClassType) {
+    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
+  } else {
+    return vsm()->sum_capacity_in_chunks_in_use();
+  }
 }
 
 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
@@ -2977,8 +3144,8 @@
 #endif
       return;
     }
-    if (is_class) {
-       class_vsm()->deallocate(ptr, word_size);
+    if (is_class && using_class_space()) {
+      class_vsm()->deallocate(ptr, word_size);
     } else {
       vsm()->deallocate(ptr, word_size);
     }
@@ -2992,7 +3159,7 @@
 #endif
       return;
     }
-    if (is_class) {
+    if (is_class && using_class_space()) {
       class_vsm()->deallocate(ptr, word_size);
     } else {
       vsm()->deallocate(ptr, word_size);
@@ -3101,14 +3268,18 @@
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
   space_list()->purge();
-  class_space_list()->purge();
+  if (using_class_space()) {
+    class_space_list()->purge();
+  }
 }
 
 void Metaspace::print_on(outputStream* out) const {
   // Print both class virtual space counts and metaspace.
   if (Verbose) {
-      vsm()->print_on(out);
+    vsm()->print_on(out);
+    if (using_class_space()) {
       class_vsm()->print_on(out);
+    }
   }
 }
 
@@ -3122,17 +3293,21 @@
   // be needed.  Note, locking this can cause inversion problems with the
   // caller in MetaspaceObj::is_metadata() function.
   return space_list()->contains(ptr) ||
-         class_space_list()->contains(ptr);
+         (using_class_space() && class_space_list()->contains(ptr));
 }
 
 void Metaspace::verify() {
   vsm()->verify();
-  class_vsm()->verify();
+  if (using_class_space()) {
+    class_vsm()->verify();
+  }
 }
 
 void Metaspace::dump(outputStream* const out) const {
   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
   vsm()->dump(out);
-  out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
-  class_vsm()->dump(out);
+  if (using_class_space()) {
+    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
+    class_vsm()->dump(out);
+  }
 }
--- a/src/share/vm/memory/metaspace.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/metaspace.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -105,6 +105,16 @@
   // Align up the word size to the allocation word size
   static size_t align_word_size_up(size_t);
 
+  // Aligned size of the metaspace.
+  static size_t _class_metaspace_size;
+
+  static size_t class_metaspace_size() {
+    return _class_metaspace_size;
+  }
+  static void set_class_metaspace_size(size_t metaspace_size) {
+    _class_metaspace_size = metaspace_size;
+  }
+
   static size_t _first_chunk_word_size;
   static size_t _first_class_chunk_word_size;
 
@@ -126,11 +136,26 @@
 
   static VirtualSpaceList* space_list()       { return _space_list; }
   static VirtualSpaceList* class_space_list() { return _class_space_list; }
+  static VirtualSpaceList* get_space_list(MetadataType mdtype) {
+    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+    return mdtype == ClassType ? class_space_list() : space_list();
+  }
 
   // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   // maintain a single list for now.
   void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
 
+#ifdef _LP64
+  static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
+
+  // Returns true if can use CDS with metaspace allocated as specified address.
+  static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
+
+  static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
+
+  static void initialize_class_space(ReservedSpace rs);
+#endif
+
   class AllocRecord : public CHeapObj<mtClass> {
   public:
     AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
@@ -151,7 +176,6 @@
 
   // Initialize globals for Metaspace
   static void global_initialize();
-  static void initialize_class_space(ReservedSpace rs);
 
   static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
@@ -172,8 +196,6 @@
   MetaWord* expand_and_allocate(size_t size,
                                 MetadataType mdtype);
 
-  static bool is_initialized() { return _class_space_list != NULL; }
-
   static bool contains(const void *ptr);
   void dump(outputStream* const out) const;
 
@@ -190,11 +212,16 @@
   };
 
   void iterate(AllocRecordClosure *closure);
+
+  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
+  static bool using_class_space() {
+    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
+  }
+
 };
 
 class MetaspaceAux : AllStatic {
   static size_t free_chunks_total(Metaspace::MetadataType mdtype);
-  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
 
  public:
   // Statistics for class space and data space in metaspace.
@@ -238,13 +265,15 @@
   // Used by MetaspaceCounters
   static size_t free_chunks_total();
   static size_t free_chunks_total_in_bytes();
+  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
 
   static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
     return _allocated_capacity_words[mdtype];
   }
   static size_t allocated_capacity_words() {
-    return _allocated_capacity_words[Metaspace::ClassType] +
-           _allocated_capacity_words[Metaspace::NonClassType];
+    return _allocated_capacity_words[Metaspace::NonClassType] +
+           (Metaspace::using_class_space() ?
+           _allocated_capacity_words[Metaspace::ClassType] : 0);
   }
   static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
     return allocated_capacity_words(mdtype) * BytesPerWord;
@@ -257,8 +286,9 @@
     return _allocated_used_words[mdtype];
   }
   static size_t allocated_used_words() {
-    return _allocated_used_words[Metaspace::ClassType] +
-           _allocated_used_words[Metaspace::NonClassType];
+    return _allocated_used_words[Metaspace::NonClassType] +
+           (Metaspace::using_class_space() ?
+           _allocated_used_words[Metaspace::ClassType] : 0);
   }
   static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
     return allocated_used_words(mdtype) * BytesPerWord;
@@ -268,6 +298,7 @@
   }
 
   static size_t free_bytes();
+  static size_t free_bytes(Metaspace::MetadataType mdtype);
 
   // Total capacity in all Metaspaces
   static size_t capacity_bytes_slow() {
@@ -300,6 +331,7 @@
   static void print_on(outputStream * out);
   static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
 
+  static void print_class_waste(outputStream* out);
   static void print_waste(outputStream* out);
   static void dump(outputStream* out);
   static void verify_free_chunks();
--- a/src/share/vm/memory/metaspaceCounters.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/metaspaceCounters.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -25,11 +25,47 @@
 #include "precompiled.hpp"
 #include "memory/metaspaceCounters.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/perfData.hpp"
 #include "utilities/exceptions.hpp"
 
-MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
+class MetaspacePerfCounters: public CHeapObj<mtInternal> {
+  friend class VMStructs;
+  PerfVariable*      _capacity;
+  PerfVariable*      _used;
+  PerfVariable*      _max_capacity;
 
-size_t MetaspaceCounters::calc_total_capacity() {
+  PerfVariable* create_variable(const char *ns, const char *name, size_t value, TRAPS) {
+    const char *path = PerfDataManager::counter_name(ns, name);
+    return PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
+  }
+
+  void create_constant(const char *ns, const char *name, size_t value, TRAPS) {
+    const char *path = PerfDataManager::counter_name(ns, name);
+    PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
+  }
+
+ public:
+  MetaspacePerfCounters(const char* ns, size_t min_capacity, size_t curr_capacity, size_t max_capacity, size_t used) {
+    EXCEPTION_MARK;
+    ResourceMark rm;
+
+    create_constant(ns, "minCapacity", min_capacity, THREAD);
+    _capacity = create_variable(ns, "capacity", curr_capacity, THREAD);
+    _max_capacity = create_variable(ns, "maxCapacity", max_capacity, THREAD);
+    _used = create_variable(ns, "used", used, THREAD);
+  }
+
+  void update(size_t capacity, size_t max_capacity, size_t used) {
+    _capacity->set_value(capacity);
+    _max_capacity->set_value(max_capacity);
+    _used->set_value(used);
+  }
+};
+
+MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
+
+size_t MetaspaceCounters::calculate_capacity() {
   // The total capacity is the sum of
   //   1) capacity of Metachunks in use by all Metaspaces
   //   2) unused space at the end of each Metachunk
@@ -39,95 +75,65 @@
   return total_capacity;
 }
 
-MetaspaceCounters::MetaspaceCounters() :
-    _capacity(NULL),
-    _used(NULL),
-    _max_capacity(NULL) {
+void MetaspaceCounters::initialize_performance_counters() {
   if (UsePerfData) {
+    assert(_perf_counters == NULL, "Should only be initialized once");
+
     size_t min_capacity = MetaspaceAux::min_chunk_size();
+    size_t capacity = calculate_capacity();
     size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t curr_capacity = calc_total_capacity();
     size_t used = MetaspaceAux::allocated_used_bytes();
 
-    initialize(min_capacity, max_capacity, curr_capacity, used);
-  }
-}
-
-static PerfVariable* create_ms_variable(const char *ns,
-                                        const char *name,
-                                        size_t value,
-                                        TRAPS) {
-  const char *path = PerfDataManager::counter_name(ns, name);
-  PerfVariable *result =
-      PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value,
-                                       CHECK_NULL);
-  return result;
-}
-
-static void create_ms_constant(const char *ns,
-                               const char *name,
-                               size_t value,
-                               TRAPS) {
-  const char *path = PerfDataManager::counter_name(ns, name);
-  PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, CHECK);
-}
-
-void MetaspaceCounters::initialize(size_t min_capacity,
-                                   size_t max_capacity,
-                                   size_t curr_capacity,
-                                   size_t used) {
-
-  if (UsePerfData) {
-    EXCEPTION_MARK;
-    ResourceMark rm;
-
-    const char *ms = "metaspace";
-
-    create_ms_constant(ms, "minCapacity", min_capacity, CHECK);
-    _max_capacity = create_ms_variable(ms, "maxCapacity", max_capacity, CHECK);
-    _capacity = create_ms_variable(ms, "capacity", curr_capacity, CHECK);
-    _used = create_ms_variable(ms, "used", used, CHECK);
-  }
-}
-
-void MetaspaceCounters::update_capacity() {
-  assert(UsePerfData, "Should not be called unless being used");
-  size_t total_capacity = calc_total_capacity();
-  _capacity->set_value(total_capacity);
-}
-
-void MetaspaceCounters::update_used() {
-  assert(UsePerfData, "Should not be called unless being used");
-  size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
-  _used->set_value(used_in_bytes);
-}
-
-void MetaspaceCounters::update_max_capacity() {
-  assert(UsePerfData, "Should not be called unless being used");
-  assert(_max_capacity != NULL, "Should be initialized");
-  size_t reserved_in_bytes = MetaspaceAux::reserved_in_bytes();
-  _max_capacity->set_value(reserved_in_bytes);
-}
-
-void MetaspaceCounters::update_all() {
-  if (UsePerfData) {
-    update_used();
-    update_capacity();
-    update_max_capacity();
-  }
-}
-
-void MetaspaceCounters::initialize_performance_counters() {
-  if (UsePerfData) {
-    assert(_metaspace_counters == NULL, "Should only be initialized once");
-    _metaspace_counters = new MetaspaceCounters();
+    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
   }
 }
 
 void MetaspaceCounters::update_performance_counters() {
   if (UsePerfData) {
-    assert(_metaspace_counters != NULL, "Should be initialized");
-    _metaspace_counters->update_all();
+    assert(_perf_counters != NULL, "Should be initialized");
+
+    size_t capacity = calculate_capacity();
+    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
+    size_t used = MetaspaceAux::allocated_used_bytes();
+
+    _perf_counters->update(capacity, max_capacity, used);
   }
 }
 
+MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
+
+size_t CompressedClassSpaceCounters::calculate_capacity() {
+    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
+           MetaspaceAux::free_bytes(_class_type) +
+           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
+}
+
+void CompressedClassSpaceCounters::update_performance_counters() {
+  if (UsePerfData && UseCompressedKlassPointers) {
+    assert(_perf_counters != NULL, "Should be initialized");
+
+    size_t capacity = calculate_capacity();
+    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
+    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
+
+    _perf_counters->update(capacity, max_capacity, used);
+  }
+}
+
+void CompressedClassSpaceCounters::initialize_performance_counters() {
+  if (UsePerfData) {
+    assert(_perf_counters == NULL, "Should only be initialized once");
+    const char* ns = "compressedclassspace";
+
+    if (UseCompressedKlassPointers) {
+      size_t min_capacity = MetaspaceAux::min_chunk_size();
+      size_t capacity = calculate_capacity();
+      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
+      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
+
+      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
+    } else {
+      _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
+    }
+  }
+}
--- a/src/share/vm/memory/metaspaceCounters.hpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/metaspaceCounters.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -25,31 +25,27 @@
 #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 
-#include "runtime/perfData.hpp"
+#include "memory/metaspace.hpp"
 
-class MetaspaceCounters: public CHeapObj<mtClass> {
-  friend class VMStructs;
-  PerfVariable*      _capacity;
-  PerfVariable*      _used;
-  PerfVariable*      _max_capacity;
-  static MetaspaceCounters* _metaspace_counters;
-  void initialize(size_t min_capacity,
-                  size_t max_capacity,
-                  size_t curr_capacity,
-                  size_t used);
-  size_t calc_total_capacity();
+class MetaspacePerfCounters;
+
+class MetaspaceCounters: public AllStatic {
+  static MetaspacePerfCounters* _perf_counters;
+  static size_t calculate_capacity();
+
  public:
-  MetaspaceCounters();
-  ~MetaspaceCounters();
-
-  void update_capacity();
-  void update_used();
-  void update_max_capacity();
-
-  void update_all();
-
   static void initialize_performance_counters();
   static void update_performance_counters();
+};
 
+class CompressedClassSpaceCounters: public AllStatic {
+  static MetaspacePerfCounters* _perf_counters;
+  static size_t calculate_capacity();
+  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
+
+ public:
+  static void initialize_performance_counters();
+  static void update_performance_counters();
 };
+
 #endif // SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
--- a/src/share/vm/memory/metaspaceShared.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/metaspaceShared.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -52,7 +52,6 @@
   int tag = 0;
   soc->do_tag(--tag);
 
-  assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
   // Verify the sizes of various metadata in the system.
   soc->do_tag(sizeof(Method));
   soc->do_tag(sizeof(ConstMethod));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/padded.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_PADDED_HPP
+#define SHARE_VM_MEMORY_PADDED_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
+// expected cache line size (a power of two).  The first addend avoids sharing
+// when the start address is not a multiple of alignment; the second maintains
+// alignment of starting addresses that happen to be a multiple.
+#define PADDING_SIZE(type, alignment)                           \
+  ((alignment) + align_size_up_(sizeof(type), alignment))
+
+// Templates to create a subclass padded to avoid cache line sharing.  These are
+// effective only when applied to derived-most (leaf) classes.
+
+// When no args are passed to the base ctor.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded : public T {
+ private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// When either 0 or 1 args may be passed to the base ctor.
+template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded01 : public T {
+ public:
+  Padded01(): T() { }
+  Padded01(Arg1T arg1): T(arg1) { }
+ private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// Super class of PaddedEnd when pad_size != 0.
+template <class T, size_t pad_size>
+class PaddedEndImpl : public T {
+ private:
+  char _pad_buf[pad_size];
+};
+
+// Super class of PaddedEnd when pad_size == 0.
+template <class T>
+class PaddedEndImpl<T, /*pad_size*/ 0> : public T {
+  // No padding.
+};
+
+#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type))
+
+// More memory conservative implementation of Padded. The subclass adds the
+// minimal amount of padding needed to make the size of the objects be aligned.
+// This will help reducing false sharing,
+// if the start address is a multiple of alignment.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
+  // C++ don't allow zero-length arrays. The padding is put in a
+  // super class that is specialized for the pad_size == 0 case.
+};
+
+// Helper class to create an array of PaddedEnd<T> objects. All elements will
+// start at a multiple of alignment and the size will be aligned to alignment.
+template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedArray {
+ public:
+  // Creates an aligned padded array.
+  // The memory can't be deleted since the raw memory chunk is not returned.
+  static PaddedEnd<T>* create_unfreeable(uint length);
+};
+
+#endif // SHARE_VM_MEMORY_PADDED_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/padded.inline.hpp	Thu Sep 05 11:04:39 2013 -0700
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "memory/allocation.inline.hpp"
+#include "memory/padded.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Creates an aligned padded array.
+// The memory can't be deleted since the raw memory chunk is not returned.
+template <class T, MEMFLAGS flags, size_t alignment>
+PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
+  // Check that the PaddedEnd class works as intended.
+  STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment));
+
+  // Allocate a chunk of memory large enough to allow for some alignment.
+  void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
+
+  // Make the initial alignment.
+  PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_pointer_up(chunk, alignment);
+
+  // Call the default constructor for each element.
+  for (uint i = 0; i < length; i++) {
+    ::new (&aligned_padded_array[i]) T();
+  }
+
+  return aligned_padded_array;
+}
--- a/src/share/vm/memory/universe.cpp	Thu Aug 22 09:39:54 2013 -0700
+++ b/src/share/vm/memory/universe.cpp	Thu Sep 05 11:04:39 2013 -0700
@@ -105,10 +105,9 @@
 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
 oop Universe::_the_null_string                        = NULL;
 oop Universe::_the_min_jint_string                   = NULL;
-LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
-LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
-LatestMethodOopCache* Universe::_pd_implies_cache         = NULL;
-ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
+LatestMethodCache* Universe::_finalizer_register_cache = NULL;
+LatestMethodCache* Universe::_loader_addClass_cache    = NULL;
+LatestMethodCache* Universe::_pd_implies_cache         = NULL;
 oop Universe::_out_of_memory_error_java_heap          = NULL;
 oop Universe::_out_of_memory_error_metaspace          = NULL;
 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
@@ -146,8 +145,6 @@
 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
 address Universe::_narrow_ptrs_base;
 
-size_t          Universe::_class_metaspace_size;
-
 void Universe::basic_type_classes_do(void f(Klass*)) {
   f(boolArrayKlassObj());
   f(byteArrayKlassObj());
@@ -225,7 +222,6 @@
   f->do_ptr((void**)&_the_empty_klass_array);
   _finalizer_register_cache->serialize(f);
   _loader_addClass_cache->serialize(f);
-  _reflect_invoke_cache->serialize(f);
   _pd_implies_cache->serialize(f);
 }
 
@@ -643,16 +639,17 @@
     return status;
   }
 
+  Metaspace::global_initialize();
+
   // Create memory for metadata.  Must be after initializing heap for
   // DumpSharedSpaces.
   ClassLoaderData::init_null_class_loader_data();
 
   // We have a heap so create the Method* caches before
   // Metaspace::initialize_shared_spaces() tries to populate them.
-  Universe::_finalizer_register_cache = new LatestMethodOopCache();
-  Universe::_loader_addClass_cache    = new LatestMethodOopCache();
-  Universe::_pd_implies_cache         = new LatestMethodOopCache();
-  Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
+  Universe::_finalizer_register_cache = new LatestMethodCache();
+  Universe::_loader_addClass_cache    = new LatestMethodCache();
+  Universe::_pd_implies_cache         = new LatestMethodCache();
 
   if (UseSharedSpaces) {
     // Read the data structures supporting the shared spaces (shared
@@ -684,25 +681,27 @@
 // 32Gb
 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
-char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
+  assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
+  assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+  assert(is_size_aligned(heap_size, alignment), "Must be");
+
+  uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
+
   size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
-    const size_t total_size = heap_size + HeapBaseMinAddress;
+    const size_t total_size = heap_size + heap_base_min_address_aligned;
     // Return specified base for the first request.
     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      base = HeapBaseMinAddress;
+      base = heap_base_min_address_aligned;
 
-    // If the total size and the metaspace size are small enough to allow
-    // UnscaledNarrowOop then just use UnscaledNarrowOop.
-    } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
-        (!UseCompressedKlassPointers ||
-          (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
-      // We don't need to check the metaspace size here because it is always smaller
-      // than total_size.
+    // If the total size is small enough to allow UnscaledNarrowOop then
+    // just use UnscaledNarrowOop.
+    } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
         // Use 32-bits oops without encoding and
@@ -719,13 +718,6 @@
           base = (OopEncodingHeapMax - heap_size);
         }
       }
-
-    // See if ZeroBaseNarrowOop encoding will work for a heap based at
-    // (KlassEncodingMetaspaceMax - class_metaspace_size()).
-    } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
-        (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
-        (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
-      base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
     } else {
       // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
       // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
@@ -735,8 +727,7 @@
     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
     // used in ReservedHeapSpace() constructors.
     // The final values will be set in initialize_heap() below.
-    if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
-        (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
+    if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
       // Use zero based compressed oops
       Universe::set_narrow_oop_base(NULL);
       // Don't need guard page for implicit checks in indexed
@@ -757,6 +748,8 @@
     }
   }
 #endif
+
+  assert(is_ptr_aligned((char*)base, alignment), "Must be");
   return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
@@ -819,9 +812,7 @@
       tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
                  Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
     }
-    if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
-        (UseCompressedKlassPointers &&
-        ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
+    if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
       // Can't reserve heap below 32Gb.
       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
@@ -857,20 +848,16 @@
         }
       }
     }
+
     if (verbose) {
       tty->cr();
       tty->cr();
     }
-    if (UseCompressedKlassPointers) {
-      Universe::set_narrow_klass_base(Universe::narrow_oop_base());
-      Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
-    }
     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
   }
-  // Universe::narrow_oop_base() is one page below the metaspace
-  // base. The actual metaspace base depends on alignment constraints
-  // so we don't know its exact location here.
-  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
+  // Universe::narrow_oop_base() is one page below the heap.
+  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
+         os::vm_page_size()) ||
          Universe::narrow_oop_base() == NULL, "invalid value");
   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
          Universe::narrow_oop_shift() == 0, "invalid value");
@@ -890,35 +877,36 @@
 
 // Reserve the Java heap, which is now the same for all GCs.
 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
-  // Add in the class metaspace area so the classes in the headers can
-  // be compressed the same as instances.
-  // Need to round class space size up because it's below the heap and
-  // the actual alignment depends on its size.
-  Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
-  size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
+  size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
-  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 
-  ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
+  bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
+  assert(!UseLargePages
+      || UseParallelOldGC
+      || use_large_pages, "Wrong alignment to use large pages");
+
+  char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
+
+  ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !total_rs.is_reserved()) {
       // Failed to reserve at specified address - the requested memory
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
-      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+      addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 
       ReservedHeapSpace total_rs0(total_reserved, alignment,
-                                  UseLargePages, addr);
+          use_large_pages, addr);
 
       if (addr != NULL && !total_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
-        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
 
         ReservedHeapSpace total_rs1(total_reserved, alignment,
-                                    UseLargePages, addr);
+            use_large_pages, addr);
         total_rs = total_rs1;
       } else {
         total_rs = total_rs0;
@@ -931,28 +919,17 @@
     return total_rs;
   }
 
-  // Split the reserved space into main Java heap and a space for
-  // classes so that they can be compressed using the same algorithm
-  // as compressed oops. If compress oops and compress klass ptrs are
-  // used we need the meta space first: if the alignment used for
-  // compressed oops is greater than the one used for compressed klass
-  // ptrs, a metadata space on top of the heap could become
-  // unreachable.
-  ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
-  ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
-  Metaspace::initialize_class_space(class_rs);
-
   if (UseCompressedOops) {
     // Universe::initialize_heap() will reset this to NULL if unscaled
     // or zero-based narrow oops are actually used.
     address base = (address)(total_rs.base() - os::vm_page_size());
     Universe::set_narrow_oop_base(base);
   }
-  return heap_rs;
+  return total_rs;
 }
 
 
-// It's the caller's repsonsibility to ensure glitch-freedom
+// It's the caller's responsibility to ensure glitch-freedom
 // (if required).
 void Universe::update_heap_info_at_gc() {
   _heap_capacity_at_last_gc = heap()->capacity();
@@ -1093,35 +1070,21 @@
                                   vmSymbols::register_method_name(),
                                   vmSymbols::register_method_signature());
   if (m == NULL || !m->is_static()) {
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
-      "java.lang.ref.Finalizer.register", false);
+    tty->print_cr("Unable to link/verify Finalizer.register method");
+    return false; // initialization failed (cannot throw exception yet)
   }
   Universe::_finalizer_register_cache->init(
-    SystemDictionary::Finalizer_klass(), m, CHECK_false);
-
-  // Resolve on first use and initialize class.
-  // Note: No race-condition here, since a resolve will always return the same result
-
-  // Setup method for security checks
-  k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
-  k_h = instanceKlassHandle(THREAD, k);
-  k_h->link_class(CHECK_false);
-  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
-  if (m == NULL || m->is_static()) {
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
-      "java.lang.reflect.Method.invoke", false);
-  }
-  Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
+    SystemDictionary::Finalizer_klass(), m);
 
   // Setup method for registering loaded classes in class loader vector
   InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
   m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
   if (m == NULL || m->is_static()) {
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
-      "java.lang.ClassLoader.addClass", false);
+    tty->print_cr("Unable to link/verify ClassLoader.addClass method");
+    return false; // initialization failed (cannot throw exception yet)
   }
   Universe::_loader_addClass_cache->init(
-    SystemDictionary::ClassLoader_klass(), m, CHECK_false);
+    SystemDictionary::ClassLoader_klass(), m);
 
   // Setup method for checking protection domain
   InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
@@ -1137,7 +1100,7 @@
       return false; // initialization failed
     }
     Universe::_pd_implies_cache->init(
-      SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);;
+      SystemDictionary::ProtectionDomain_klass(), m);;
   }
 
   // The folowing is initializing converter functions for serialization in
@@ -1157,6 +1120,8 @@
 
   // Initialize performance counters for metaspaces
   MetaspaceCounters::initialize_performance_counters();
+  CompressedClassSpaceCounters::initialize_performance_counters();
+
   MemoryService::add_metaspace_memory_pools();
 
   GC_locker::unlock();  // allow gc after bootstrapping
@@ -1460,7 +1425,7 @@
 }
 
 
-void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
+void LatestMethodCache::init(Klass* k, Method* m) {
   if (!UseSharedSpaces) {
     _klass = k;
   }
@@ -1476,88 +1441,7 @@
 }
 
 
-ActiveMethodOopsCache::~ActiveMethodOopsCache() {
-  if (_prev_methods != NULL) {
-    delete _prev_methods;
-    _prev_methods = NULL;
-  }
-}
-
-
-void ActiveMethodOopsCache::add_previous_version(Method* method) {
-  assert(Thread::current()->is_VM_thread(),
-    "only VMThread can add previous versions");
-
-  // Only append the previous method if it is executing on the stack.
-  if (method->on_stack()) {
-
-    if (_prev_methods == NULL) {
-      // This is the first previous version so make some space.
-      // Start with 2 elements under the assumption that the class
-      // won't be redefined much.
-      _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
-    }
-
-    // RC_TRACE macro has an embedded ResourceMark
-    RC_TRACE(0x00000100,
-      ("add: %s(%s): adding prev version ref for cached method @%d",
-        method->name()->as_C_string(), method->signature()->as_C_string(),
-        _prev_methods->length()));
-
-    _prev_methods->append(method);
-  }
-