changeset 8610:da05ab7fde79

Merge
author kvn
date Thu, 25 Jun 2015 09:48:50 -0700
parents 0b76cb81b165 4ffba31b9baa
children cfcdcb9f73d0
files src/cpu/ppc/vm/vm_version_ppc.cpp src/share/vm/opto/c2_globals.hpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/vmStructs.cpp
diffstat 256 files changed, 9596 insertions(+), 2620 deletions(-) [+]
line wrap: on
line diff
--- a/agent/make/Makefile	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/make/Makefile	Thu Jun 25 09:48:50 2015 -0700
@@ -58,6 +58,7 @@
 sun.jvm.hotspot.debugger.dummy \
 sun.jvm.hotspot.debugger.linux \
 sun.jvm.hotspot.debugger.linux.amd64 \
+sun.jvm.hotspot.debugger.linux.aarch64 \
 sun.jvm.hotspot.debugger.linux.ppc64 \
 sun.jvm.hotspot.debugger.linux.x86 \
 sun.jvm.hotspot.debugger.posix \
@@ -65,6 +66,7 @@
 sun.jvm.hotspot.debugger.ppc64 \
 sun.jvm.hotspot.debugger.proc \
 sun.jvm.hotspot.debugger.proc.amd64 \
+sun.jvm.hotspot.debugger.proc.aarch64 \
 sun.jvm.hotspot.debugger.proc.ppc64 \
 sun.jvm.hotspot.debugger.proc.sparc \
 sun.jvm.hotspot.debugger.proc.x86 \
@@ -91,11 +93,13 @@
 sun.jvm.hotspot.prims \
 sun.jvm.hotspot.runtime \
 sun.jvm.hotspot.runtime.amd64 \
+sun.jvm.hotspot.runtime.aarch64 \
 sun.jvm.hotspot.runtime.bsd \
 sun.jvm.hotspot.runtime.bsd_amd64 \
 sun.jvm.hotspot.runtime.bsd_x86 \
 sun.jvm.hotspot.runtime.linux \
 sun.jvm.hotspot.runtime.linux_amd64 \
+sun.jvm.hotspot.runtime.linux_aarch64 \
 sun.jvm.hotspot.runtime.linux_ppc64 \
 sun.jvm.hotspot.runtime.linux_sparc \
 sun.jvm.hotspot.runtime.linux_x86 \
@@ -149,16 +153,19 @@
 sun/jvm/hotspot/debugger/linux/*.java \
 sun/jvm/hotspot/debugger/linux/ppc64/*.java \
 sun/jvm/hotspot/debugger/linux/x86/*.java \
+sun/jvm/hotspot/debugger/linux/aarch64/*.java \
 sun/jvm/hotspot/debugger/posix/*.java \
 sun/jvm/hotspot/debugger/posix/elf/*.java \
 sun/jvm/hotspot/debugger/ppc64/*.java \
 sun/jvm/hotspot/debugger/proc/*.java \
 sun/jvm/hotspot/debugger/proc/amd64/*.java \
+sun/jvm/hotspot/debugger/proc/aarch64/*.java \
 sun/jvm/hotspot/debugger/proc/ppc64/*.java \
 sun/jvm/hotspot/debugger/proc/sparc/*.java \
 sun/jvm/hotspot/debugger/proc/x86/*.java \
 sun/jvm/hotspot/debugger/remote/*.java \
 sun/jvm/hotspot/debugger/remote/amd64/*.java \
+sun/jvm/hotspot/debugger/remote/aarch64/*.java \
 sun/jvm/hotspot/debugger/remote/ppc64/*.java \
 sun/jvm/hotspot/debugger/remote/sparc/*.java \
 sun/jvm/hotspot/debugger/remote/x86/*.java \
@@ -178,11 +185,13 @@
 sun/jvm/hotspot/prims/*.java \
 sun/jvm/hotspot/runtime/*.java \
 sun/jvm/hotspot/runtime/amd64/*.java \
+sun/jvm/hotspot/runtime/aarch64/*.java \
 sun/jvm/hotspot/runtime/bsd/*.java \
 sun/jvm/hotspot/runtime/bsd_amd64/*.java \
 sun/jvm/hotspot/runtime/bsd_x86/*.java \
 sun/jvm/hotspot/runtime/linux/*.java \
 sun/jvm/hotspot/runtime/linux_amd64/*.java \
+sun/jvm/hotspot/runtime/linux_aarch64/*.java \
 sun/jvm/hotspot/runtime/linux_ppc64/*.java \
 sun/jvm/hotspot/runtime/linux_sparc/*.java \
 sun/jvm/hotspot/runtime/linux_x86/*.java \
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Jun 25 09:48:50 2015 -0700
@@ -53,6 +53,10 @@
 #include "sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext.h"
 #endif
 
+#ifdef aarch64
+#include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h"
+#endif
+
 static jfieldID p_ps_prochandle_ID = 0;
 static jfieldID threadList_ID = 0;
 static jfieldID loadObjectList_ID = 0;
@@ -368,7 +372,7 @@
 #define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
 #endif
 #ifdef aarch64
-#define NPRGREG 32
+#define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG
 #endif
 #if defined(sparc) || defined(sparcv9)
 #define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG
@@ -473,6 +477,13 @@
 
 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_##reg
 
+  {
+    int i;
+    for (i = 0; i < 31; i++)
+      regs[i] = gregs.regs[i];
+    regs[REG_INDEX(SP)] = gregs.sp;
+    regs[REG_INDEX(PC)] = gregs.pc;
+  }
 #endif /* aarch64 */
 
 #ifdef ppc64
--- a/agent/src/os/linux/Makefile	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/os/linux/Makefile	Thu Jun 25 09:48:50 2015 -0700
@@ -53,14 +53,15 @@
         $(JAVAH) -jni -classpath ../../../build/classes -d $(ARCH) \
 		sun.jvm.hotspot.debugger.x86.X86ThreadContext \
 		sun.jvm.hotspot.debugger.sparc.SPARCThreadContext \
-		sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext 
+		sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext \
+		sun.jvm.hotspot.debugger.aarch64.AARCH64ThreadContext 
         $(GCC) $(CFLAGS) $< -o $@
 
 $(ARCH)/sadis.o:  ../../share/native/sadis.c
         $(JAVAH) -jni -classpath ../../../build/classes -d $(ARCH) \
                 sun.jvm.hotspot.asm.Disassembler
         $(GCC) $(CFLAGS) $< -o $@
- 
+
 $(ARCH)/%.o: %.c
         $(GCC) $(CFLAGS) $< -o $@
 
--- a/agent/src/os/linux/libproc.h	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/os/linux/libproc.h	Thu Jun 25 09:48:50 2015 -0700
@@ -72,6 +72,7 @@
 #define user_regs_struct  pt_regs
 #endif
 #if defined(aarch64)
+#include <asm/ptrace.h>
 #define user_regs_struct user_pt_regs
 #endif
 
--- a/agent/src/os/linux/proc_service.h	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/os/linux/proc_service.h	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 
 // Linux does not have the proc service library, though it does provide the
 // thread_db library which can be used to manipulate threads without having
-// to know the details of LinuxThreads or NPTL
+// to know the details of NPTL
 
 // copied from Solaris "proc_service.h"
 typedef enum {
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Thu Jun 25 09:48:50 2015 -0700
@@ -983,19 +983,15 @@
                                                      curFrame.getFP(),
                                                      anno));
             } else {
-              if (VM.getVM().getCPU().equals("x86") || VM.getVM().getCPU().equals("amd64")) {
-                // For C2, which has null frame pointers on x86/amd64
-                CodeBlob cb = VM.getVM().getCodeCache().findBlob(curFrame.getPC());
-                Address sp = curFrame.getSP();
-                if (Assert.ASSERTS_ENABLED) {
-                  Assert.that(cb.getFrameSize() > 0, "CodeBlob must have non-zero frame size");
-                }
-                annoPanel.addAnnotation(new Annotation(sp,
-                                                       sp.addOffsetTo(cb.getFrameSize()),
-                                                       anno));
-              } else {
-                Assert.that(VM.getVM().getCPU().equals("ia64"), "only ia64 should reach here");
+              // For C2, which has null frame pointers on x86/amd64/aarch64
+              CodeBlob cb = VM.getVM().getCodeCache().findBlob(curFrame.getPC());
+              Address sp = curFrame.getSP();
+              if (Assert.ASSERTS_ENABLED) {
+                Assert.that(cb.getFrameSize() > 0, "CodeBlob must have non-zero frame size");
               }
+              annoPanel.addAnnotation(new Annotation(sp,
+                                                     sp.addOffsetTo(cb.getFrameSize()),
+                                                     anno));
             }
 
             // Add interpreter frame annotations
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/aarch64/AARCH64ThreadContext.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.aarch64;
+
+import java.lang.annotation.Native;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.*;
+
+/** Specifies the thread context on aarch64 platforms; only a sub-portion
+ * of the context is guaranteed to be present on all operating
+ * systems. */
+
+public abstract class AARCH64ThreadContext implements ThreadContext {
+    // Taken from /usr/include/asm/sigcontext.h on Linux/AARCH64.
+
+    // NOTE: the indices for the various registers must be maintained as
+    // listed across various operating systems. However, only a small
+    // subset of the registers' values are guaranteed to be present (and
+    // must be present for the SA's stack walking to work)
+
+    // One instance of the Native annotation is enough to trigger header generation
+    // for this file.
+    @Native
+    public static final int R0 = 0;
+    public static final int R1 = 1;
+    public static final int R2 = 2;
+    public static final int R3 = 3;
+    public static final int R4 = 4;
+    public static final int R5 = 5;
+    public static final int R6 = 6;
+    public static final int R7 = 7;
+    public static final int R8 = 8;
+    public static final int R9 = 9;
+    public static final int R10 = 10;
+    public static final int R11 = 11;
+    public static final int R12 = 12;
+    public static final int R13 = 13;
+    public static final int R14 = 14;
+    public static final int R15 = 15;
+    public static final int R16 = 16;
+    public static final int R17 = 17;
+    public static final int R18 = 18;
+    public static final int R19 = 19;
+    public static final int R20 = 20;
+    public static final int R21 = 21;
+    public static final int R22 = 22;
+    public static final int R23 = 23;
+    public static final int R24 = 24;
+    public static final int R25 = 25;
+    public static final int R26 = 26;
+    public static final int R27 = 27;
+    public static final int R28 = 28;
+    public static final int FP = 29;
+    public static final int LR = 30;
+    public static final int SP = 31;
+    public static final int PC = 32;
+
+    public static final int NPRGREG = 33;
+
+    private long[] data;
+
+    public AARCH64ThreadContext() {
+        data = new long[NPRGREG];
+    }
+
+    public int getNumRegisters() {
+        return NPRGREG;
+    }
+
+    public String getRegisterName(int index) {
+        switch (index) {
+        case LR: return "lr";
+        case SP: return "sp";
+        case PC: return "pc";
+        default:
+            return "r" + index;
+        }
+    }
+
+    public void setRegister(int index, long value) {
+        data[index] = value;
+    }
+
+    public long getRegister(int index) {
+        return data[index];
+    }
+
+    public CFrame getTopFrame(Debugger dbg) {
+        return null;
+    }
+
+    /** This can't be implemented in this class since we would have to
+     * tie the implementation to, for example, the debugging system */
+    public abstract void setRegisterAsAddress(int index, Address value);
+
+    /** This can't be implemented in this class since we would have to
+     * tie the implementation to, for example, the debugging system */
+    public abstract Address getRegisterAsAddress(int index);
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,12 +32,14 @@
 import sun.jvm.hotspot.debugger.cdbg.*;
 import sun.jvm.hotspot.debugger.x86.*;
 import sun.jvm.hotspot.debugger.amd64.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
 import sun.jvm.hotspot.debugger.sparc.*;
 import sun.jvm.hotspot.debugger.ppc64.*;
 import sun.jvm.hotspot.debugger.linux.x86.*;
 import sun.jvm.hotspot.debugger.linux.amd64.*;
 import sun.jvm.hotspot.debugger.linux.sparc.*;
 import sun.jvm.hotspot.debugger.linux.ppc64.*;
+import sun.jvm.hotspot.debugger.linux.aarch64.*;
 import sun.jvm.hotspot.utilities.*;
 
 class LinuxCDebugger implements CDebugger {
@@ -106,6 +109,13 @@
         Address pc  = context.getRegisterAsAddress(PPC64ThreadContext.PC);
         if (pc == null) return null;
         return new LinuxPPC64CFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize());
+    } else if (cpu.equals("aarch64")) {
+       AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext();
+       Address fp = context.getRegisterAsAddress(AARCH64ThreadContext.FP);
+       if (fp == null) return null;
+       Address pc  = context.getRegisterAsAddress(AARCH64ThreadContext.PC);
+       if (pc == null) return null;
+       return new LinuxAARCH64CFrame(dbg, fp, pc);
      } else {
        // Runtime exception thrown by LinuxThreadContextFactory if unknown cpu
        ThreadContext context = (ThreadContext) thread.getContext();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64CFrame.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.linux.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.debugger.linux.*;
+import sun.jvm.hotspot.debugger.cdbg.*;
+import sun.jvm.hotspot.debugger.cdbg.basic.*;
+
+final public class LinuxAARCH64CFrame extends BasicCFrame {
+   public LinuxAARCH64CFrame(LinuxDebugger dbg, Address fp, Address pc) {
+      super(dbg.getCDebugger());
+      this.fp = fp;
+      this.pc = pc;
+      this.dbg = dbg;
+   }
+
+   // override base class impl to avoid ELF parsing
+   public ClosestSymbol closestSymbolToPC() {
+      // try native lookup in debugger.
+      return dbg.lookup(dbg.getAddressValue(pc()));
+   }
+
+   public Address pc() {
+      return pc;
+   }
+
+   public Address localVariableBase() {
+      return fp;
+   }
+
+   public CFrame sender(ThreadProxy thread) {
+      AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext();
+      Address rsp = context.getRegisterAsAddress(AARCH64ThreadContext.SP);
+
+      if ((fp == null) || fp.lessThan(rsp)) {
+        return null;
+      }
+
+      // Check alignment of fp
+      if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) {
+        return null;
+      }
+
+      Address nextFP = fp.getAddressAt(0 * ADDRESS_SIZE);
+      if (nextFP == null || nextFP.lessThanOrEqual(fp)) {
+        return null;
+      }
+      Address nextPC  = fp.getAddressAt(1 * ADDRESS_SIZE);
+      if (nextPC == null) {
+        return null;
+      }
+      return new LinuxAARCH64CFrame(dbg, nextFP, nextPC);
+   }
+
+   // package/class internals only
+   private static final int ADDRESS_SIZE = 8;
+   private Address pc;
+   private Address sp;
+   private Address fp;
+   private LinuxDebugger dbg;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/aarch64/LinuxAARCH64ThreadContext.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.linux.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.debugger.linux.*;
+
+public class LinuxAARCH64ThreadContext extends AARCH64ThreadContext {
+  private LinuxDebugger debugger;
+
+  public LinuxAARCH64ThreadContext(LinuxDebugger debugger) {
+    super();
+    this.debugger = debugger;
+  }
+
+  public void setRegisterAsAddress(int index, Address value) {
+    setRegister(index, debugger.getAddressValue(value));
+  }
+
+  public Address getRegisterAsAddress(int index) {
+    return debugger.newAddress(getRegister(index));
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java	Thu Jun 25 09:48:50 2015 -0700
@@ -31,11 +31,13 @@
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.debugger.cdbg.*;
 import sun.jvm.hotspot.debugger.proc.amd64.*;
+import sun.jvm.hotspot.debugger.proc.aarch64.*;
 import sun.jvm.hotspot.debugger.proc.sparc.*;
 import sun.jvm.hotspot.debugger.proc.ppc64.*;
 import sun.jvm.hotspot.debugger.proc.x86.*;
 import sun.jvm.hotspot.debugger.ppc64.*;
 import sun.jvm.hotspot.debugger.amd64.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
 import sun.jvm.hotspot.debugger.sparc.*;
 import sun.jvm.hotspot.debugger.x86.*;
 import sun.jvm.hotspot.utilities.*;
@@ -88,6 +90,10 @@
             threadFactory = new ProcAMD64ThreadFactory(this);
             pcRegIndex = AMD64ThreadContext.RIP;
             fpRegIndex = AMD64ThreadContext.RBP;
+        } else if (cpu.equals("aarch64")) {
+            threadFactory = new ProcAARCH64ThreadFactory(this);
+            pcRegIndex = AARCH64ThreadContext.PC;
+            fpRegIndex = AARCH64ThreadContext.FP;
         } else if (cpu.equals("ppc64")) {
             threadFactory = new ProcPPC64ThreadFactory(this);
             pcRegIndex = PPC64ThreadContext.PC;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64Thread.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.proc.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.debugger.proc.*;
+import sun.jvm.hotspot.utilities.*;
+
+public class ProcAARCH64Thread implements ThreadProxy {
+    private ProcDebugger debugger;
+    private int         id;
+
+    public ProcAARCH64Thread(ProcDebugger debugger, Address addr) {
+        this.debugger = debugger;
+
+        // FIXME: the size here should be configurable. However, making it
+        // so would produce a dependency on the "types" package from the
+        // debugger package, which is not desired.
+        this.id       = (int) addr.getCIntegerAt(0, 4, true);
+    }
+
+    public ProcAARCH64Thread(ProcDebugger debugger, long id) {
+        this.debugger = debugger;
+        this.id = (int) id;
+    }
+
+    public ThreadContext getContext() throws IllegalThreadStateException {
+        ProcAARCH64ThreadContext context = new ProcAARCH64ThreadContext(debugger);
+        long[] regs = debugger.getThreadIntegerRegisterSet(id);
+        if (Assert.ASSERTS_ENABLED) {
+            Assert.that(regs.length == AARCH64ThreadContext.NPRGREG, "size mismatch");
+        }
+        for (int i = 0; i < regs.length; i++) {
+            context.setRegister(i, regs[i]);
+        }
+        return context;
+    }
+
+    public boolean canSetContext() throws DebuggerException {
+        return false;
+    }
+
+    public void setContext(ThreadContext context)
+    throws IllegalThreadStateException, DebuggerException {
+        throw new DebuggerException("Unimplemented");
+    }
+
+    public String toString() {
+        return "t@" + id;
+    }
+
+    public boolean equals(Object obj) {
+        if ((obj == null) || !(obj instanceof ProcAARCH64Thread)) {
+            return false;
+        }
+
+        return (((ProcAARCH64Thread) obj).id == id);
+    }
+
+    public int hashCode() {
+        return id;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadContext.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.proc.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.debugger.proc.*;
+
+public class ProcAARCH64ThreadContext extends AARCH64ThreadContext {
+    private ProcDebugger debugger;
+
+    public ProcAARCH64ThreadContext(ProcDebugger debugger) {
+        super();
+        this.debugger = debugger;
+    }
+
+    public void setRegisterAsAddress(int index, Address value) {
+        setRegister(index, debugger.getAddressValue(value));
+    }
+
+    public Address getRegisterAsAddress(int index) {
+        return debugger.newAddress(getRegister(index));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadFactory.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.proc.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.proc.*;
+
+public class ProcAARCH64ThreadFactory implements ProcThreadFactory {
+    private ProcDebugger debugger;
+
+    public ProcAARCH64ThreadFactory(ProcDebugger debugger) {
+        this.debugger = debugger;
+    }
+
+    public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
+        return new ProcAARCH64Thread(debugger, threadIdentifierAddr);
+    }
+
+    public ThreadProxy createThreadWrapper(long id) {
+        return new ProcAARCH64Thread(debugger, id);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/aarch64/RemoteAARCH64Thread.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.remote.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.debugger.remote.*;
+import sun.jvm.hotspot.utilities.*;
+
+public class RemoteAARCH64Thread extends RemoteThread  {
+  public RemoteAARCH64Thread(RemoteDebuggerClient debugger, Address addr) {
+     super(debugger, addr);
+  }
+
+  public RemoteAARCH64Thread(RemoteDebuggerClient debugger, long id) {
+     super(debugger, id);
+  }
+
+  public ThreadContext getContext() throws IllegalThreadStateException {
+    RemoteAARCH64ThreadContext context = new RemoteAARCH64ThreadContext(debugger);
+    long[] regs = (addr != null)? debugger.getThreadIntegerRegisterSet(addr) :
+                                  debugger.getThreadIntegerRegisterSet(id);
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(regs.length == AARCH64ThreadContext.NPRGREG, "size of register set must match");
+    }
+    for (int i = 0; i < regs.length; i++) {
+      context.setRegister(i, regs[i]);
+    }
+    return context;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/aarch64/RemoteAARCH64ThreadContext.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.remote.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.debugger.remote.*;
+
+public class RemoteAARCH64ThreadContext extends AARCH64ThreadContext {
+  private RemoteDebuggerClient debugger;
+
+  public RemoteAARCH64ThreadContext(RemoteDebuggerClient debugger) {
+    super();
+    this.debugger = debugger;
+  }
+
+  public void setRegisterAsAddress(int index, Address value) {
+    setRegister(index, debugger.getAddressValue(value));
+  }
+
+  public Address getRegisterAsAddress(int index) {
+    return debugger.newAddress(getRegister(index));
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/aarch64/RemoteAARCH64ThreadFactory.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger.remote.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.remote.*;
+
+public class RemoteAARCH64ThreadFactory implements RemoteThreadFactory {
+  private RemoteDebuggerClient debugger;
+
+  public RemoteAARCH64ThreadFactory(RemoteDebuggerClient debugger) {
+    this.debugger = debugger;
+  }
+
+  public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
+    return new RemoteAARCH64Thread(debugger, threadIdentifierAddr);
+  }
+
+  public ThreadProxy createThreadWrapper(long id) {
+    return new RemoteAARCH64Thread(debugger, id);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc/shared/Generation.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc/shared/Generation.java	Thu Jun 25 09:48:50 2015 -0700
@@ -49,7 +49,6 @@
 public abstract class Generation extends VMObject {
   private static long          reservedFieldOffset;
   private static long          virtualSpaceFieldOffset;
-  private static CIntegerField levelField;
   protected static final int  K = 1024;
   // Fields for class StatRecord
   private static Field         statRecordField;
@@ -75,7 +74,6 @@
 
     reservedFieldOffset     = type.getField("_reserved").getOffset();
     virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
-    levelField              = type.getCIntegerField("_level");
     // StatRecord
     statRecordField         = type.getField("_stat_record");
     type                    = db.lookupType("Generation::StatRecord");
@@ -130,14 +128,6 @@
      }
   }
 
-  public GenerationSpec spec() {
-    return ((GenCollectedHeap) VM.getVM().getUniverse().heap()).spec(level());
-  }
-
-  public int level() {
-    return (int) levelField.getValue(addr);
-  }
-
   public int invocations() {
     return getStatRecord().getInvocations();
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -357,12 +357,6 @@
   // FIXME: avoiding implementing this for now if possible
   //  public void interpreter_frame_set_monitor_end(BasicObjectLock* value);
   //  public void interpreter_frame_verify_monitor(BasicObjectLock* value) const;
-  //
-  // Tells whether the current interpreter_frame frame pointer
-  // corresponds to the old compiled/deoptimized fp
-  // The receiver used to be a top level frame
-  // public boolean interpreter_frame_equals_unpacked_fp(intptr_t* fp);
-
   //--------------------------------------------------------------------------------
   // Method and constant pool cache:
   //
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java	Thu Jun 25 09:48:50 2015 -0700
@@ -35,6 +35,7 @@
 import sun.jvm.hotspot.runtime.win32_x86.Win32X86JavaThreadPDAccess;
 import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess;
 import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess;
+import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess;
 import sun.jvm.hotspot.runtime.linux_ppc64.LinuxPPC64JavaThreadPDAccess;
 import sun.jvm.hotspot.runtime.linux_sparc.LinuxSPARCJavaThreadPDAccess;
 import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess;
@@ -91,6 +92,8 @@
                 access = new LinuxSPARCJavaThreadPDAccess();
             } else if (cpu.equals("ppc64")) {
                 access = new LinuxPPC64JavaThreadPDAccess();
+            } else if (cpu.equals("aarch64")) {
+                access = new LinuxAARCH64JavaThreadPDAccess();
             } else {
               try {
                 access = (JavaThreadPDAccess)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64CurrentFrameGuess.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.code.*;
+import sun.jvm.hotspot.interpreter.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.runtime.aarch64.*;
+
+/** <P> Should be able to be used on all aarch64 platforms we support
+    (Linux/aarch64) to implement JavaThread's "currentFrameGuess()"
+    functionality. Input is an AARCH64ThreadContext; output is SP, FP,
+    and PC for an AARCH64Frame. Instantiation of the AARCH64Frame is
+    left to the caller, since we may need to subclass AARCH64Frame to
+    support signal handler frames on Unix platforms. </P>
+
+    <P> Algorithm is to walk up the stack within a given range (say,
+    512K at most) looking for a plausible PC and SP for a Java frame,
+    also considering those coming in from the context. If we find a PC
+    that belongs to the VM (i.e., in generated code like the
+    interpreter or CodeCache) then we try to find an associated FP.
+    We repeat this until we either find a complete frame or run out of
+    stack to look at. </P> */
+
+public class AARCH64CurrentFrameGuess {
+  private AARCH64ThreadContext context;
+  private JavaThread       thread;
+  private Address          spFound;
+  private Address          fpFound;
+  private Address          pcFound;
+
+  private static final boolean DEBUG = System.getProperty("sun.jvm.hotspot.runtime.aarch64.AARCH64Frame.DEBUG")
+                                       != null;
+
+  public AARCH64CurrentFrameGuess(AARCH64ThreadContext context,
+                              JavaThread thread) {
+    this.context = context;
+    this.thread  = thread;
+  }
+
+  /** Returns false if not able to find a frame within a reasonable range. */
+  public boolean run(long regionInBytesToSearch) {
+    Address sp  = context.getRegisterAsAddress(AARCH64ThreadContext.SP);
+    Address pc  = context.getRegisterAsAddress(AARCH64ThreadContext.PC);
+    Address fp  = context.getRegisterAsAddress(AARCH64ThreadContext.FP);
+    if (sp == null) {
+      // Bail out if no last java frame either
+      if (thread.getLastJavaSP() != null) {
+        setValues(thread.getLastJavaSP(), thread.getLastJavaFP(), null);
+        return true;
+      }
+      return false;
+    }
+    Address end = sp.addOffsetTo(regionInBytesToSearch);
+    VM vm       = VM.getVM();
+
+    setValues(null, null, null); // Assume we're not going to find anything
+
+    if (vm.isJavaPCDbg(pc)) {
+      if (vm.isClientCompiler()) {
+        // If the topmost frame is a Java frame, we are (pretty much)
+        // guaranteed to have a viable FP. We should be more robust
+        // than this (we have the potential for losing entire threads'
+        // stack traces) but need to see how much work we really have
+        // to do here. Searching the stack for an (SP, FP) pair is
+        // hard since it's easy to misinterpret inter-frame stack
+        // pointers as base-of-frame pointers; we also don't know the
+        // sizes of C1 frames (not registered in the nmethod) so can't
+        // derive them from SP.
+
+        setValues(sp, fp, pc);
+        return true;
+      } else {
+        if (vm.getInterpreter().contains(pc)) {
+          if (DEBUG) {
+            System.out.println("CurrentFrameGuess: choosing interpreter frame: sp = " +
+                               sp + ", fp = " + fp + ", pc = " + pc);
+          }
+          setValues(sp, fp, pc);
+          return true;
+        }
+
+        // For the server compiler, FP is not guaranteed to be valid
+        // for compiled code. In addition, an earlier attempt at a
+        // non-searching algorithm (see below) failed because the
+        // stack pointer from the thread context was pointing
+        // (considerably) beyond the ostensible end of the stack, into
+        // garbage; walking from the topmost frame back caused a crash.
+        //
+        // This algorithm takes the current PC as a given and tries to
+        // find the correct corresponding SP by walking up the stack
+        // and repeatedly performing stackwalks (very inefficient).
+        //
+        // FIXME: there is something wrong with stackwalking across
+        // adapter frames...this is likely to be the root cause of the
+        // failure with the simpler algorithm below.
+
+        for (long offset = 0;
+             offset < regionInBytesToSearch;
+             offset += vm.getAddressSize()) {
+          try {
+            Address curSP = sp.addOffsetTo(offset);
+            Frame frame = new AARCH64Frame(curSP, null, pc);
+            RegisterMap map = thread.newRegisterMap(false);
+            while (frame != null) {
+              if (frame.isEntryFrame() && frame.entryFrameIsFirst()) {
+                // We were able to traverse all the way to the
+                // bottommost Java frame.
+                // This sp looks good. Keep it.
+                if (DEBUG) {
+                  System.out.println("CurrentFrameGuess: Choosing sp = " + curSP + ", pc = " + pc);
+                }
+                setValues(curSP, null, pc);
+                return true;
+              }
+              frame = frame.sender(map);
+            }
+          } catch (Exception e) {
+            if (DEBUG) {
+              System.out.println("CurrentFrameGuess: Exception " + e + " at offset " + offset);
+            }
+            // Bad SP. Try another.
+          }
+        }
+
+        // Were not able to find a plausible SP to go with this PC.
+        // Bail out.
+        return false;
+
+        /*
+        // Original algorithm which does not work because SP was
+        // pointing beyond where it should have:
+
+        // For the server compiler, FP is not guaranteed to be valid
+        // for compiled code. We see whether the PC is in the
+        // interpreter and take care of that, otherwise we run code
+        // (unfortunately) duplicated from AARCH64Frame.senderForCompiledFrame.
+
+        CodeCache cc = vm.getCodeCache();
+        if (cc.contains(pc)) {
+          CodeBlob cb = cc.findBlob(pc);
+
+          // See if we can derive a frame pointer from SP and PC
+          // NOTE: This is the code duplicated from AARCH64Frame
+          Address saved_fp = null;
+          int llink_offset = cb.getLinkOffset();
+          if (llink_offset >= 0) {
+            // Restore base-pointer, since next frame might be an interpreter frame.
+            Address fp_addr = sp.addOffsetTo(VM.getVM().getAddressSize() * llink_offset);
+            saved_fp = fp_addr.getAddressAt(0);
+          }
+
+          setValues(sp, saved_fp, pc);
+          return true;
+        }
+        */
+      }
+    } else {
+      // If the current program counter was not known to us as a Java
+      // PC, we currently assume that we are in the run-time system
+      // and attempt to look to thread-local storage for saved SP and
+      // FP. Note that if these are null (because we were, in fact,
+      // in Java code, i.e., vtable stubs or similar, and the SA
+      // didn't have enough insight into the target VM to understand
+      // that) then we are going to lose the entire stack trace for
+      // the thread, which is sub-optimal. FIXME.
+
+      if (DEBUG) {
+        System.out.println("CurrentFrameGuess: choosing last Java frame: sp = " +
+                           thread.getLastJavaSP() + ", fp = " + thread.getLastJavaFP());
+      }
+      if (thread.getLastJavaSP() == null) {
+        return false; // No known Java frames on stack
+      }
+
+      // The runtime has a nasty habit of not saving fp in the frame
+      // anchor, leaving us to grovel about in the stack to find a
+      // plausible address.  Fortunately, this only happens in
+      // compiled code; there we always have a valid PC, and we always
+      // push LR and FP onto the stack as a pair, with FP at the lower
+      // address.
+      pc = thread.getLastJavaPC();
+      fp = thread.getLastJavaFP();
+      sp = thread.getLastJavaSP();
+
+      if (fp == null) {
+        CodeCache cc = vm.getCodeCache();
+        if (cc.contains(pc)) {
+          CodeBlob cb = cc.findBlob(pc);
+          if (DEBUG) {
+            System.out.println("FP is null.  Found blob frame size " + cb.getFrameSize());
+          }
+          // See if we can derive a frame pointer from SP and PC
+          long link_offset = cb.getFrameSize() - 2 * VM.getVM().getAddressSize();
+          if (link_offset >= 0) {
+            fp = sp.addOffsetTo(link_offset);
+          }
+        }
+      }
+
+      setValues(sp, fp, null);
+
+      return true;
+    }
+  }
+
+  public Address getSP() { return spFound; }
+  public Address getFP() { return fpFound; }
+  /** May be null if getting values from thread-local storage; take
+      care to call the correct AARCH64Frame constructor to recover this if
+      necessary */
+  public Address getPC() { return pcFound; }
+
+  private void setValues(Address sp, Address fp, Address pc) {
+    spFound = sp;
+    fpFound = fp;
+    pcFound = pc;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.aarch64;
+
+import java.util.*;
+import sun.jvm.hotspot.code.*;
+import sun.jvm.hotspot.compiler.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+/** Specialization of and implementation of abstract methods of the
+    Frame class for the aarch64 family of CPUs. */
+
+public class AARCH64Frame extends Frame {
+  private static final boolean DEBUG;
+  static {
+    DEBUG = System.getProperty("sun.jvm.hotspot.runtime.aarch64.AARCH64Frame.DEBUG") != null;
+  }
+
+  // All frames
+  private static final int LINK_OFFSET                =  0;
+  private static final int RETURN_ADDR_OFFSET         =  1;
+  private static final int SENDER_SP_OFFSET           =  2;
+
+  // Interpreter frames
+  private static final int INTERPRETER_FRAME_MIRROR_OFFSET    =  2; // for native calls only
+  private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1;
+  private static final int INTERPRETER_FRAME_LAST_SP_OFFSET   = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1;
+  private static final int INTERPRETER_FRAME_METHOD_OFFSET    = INTERPRETER_FRAME_LAST_SP_OFFSET - 1;
+  private static       int INTERPRETER_FRAME_MDX_OFFSET;         // Non-core builds only
+  private static       int INTERPRETER_FRAME_CACHE_OFFSET;
+  private static       int INTERPRETER_FRAME_LOCALS_OFFSET;
+  private static       int INTERPRETER_FRAME_BCX_OFFSET;
+  private static       int INTERPRETER_FRAME_INITIAL_SP_OFFSET;
+  private static       int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET;
+  private static       int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
+
+  // Entry frames
+  private static       int ENTRY_FRAME_CALL_WRAPPER_OFFSET = -8;
+
+  // Native frames
+  private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET =  2;
+
+  private static VMReg fp = new VMReg(29);
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    INTERPRETER_FRAME_MDX_OFFSET                  = INTERPRETER_FRAME_METHOD_OFFSET - 1;
+    INTERPRETER_FRAME_CACHE_OFFSET                = INTERPRETER_FRAME_MDX_OFFSET - 1;
+    INTERPRETER_FRAME_LOCALS_OFFSET               = INTERPRETER_FRAME_CACHE_OFFSET - 1;
+    INTERPRETER_FRAME_BCX_OFFSET                  = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
+    INTERPRETER_FRAME_INITIAL_SP_OFFSET           = INTERPRETER_FRAME_BCX_OFFSET - 1;
+    INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET    = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
+    INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
+  }
+
+
+  // an additional field beyond sp and pc:
+  Address raw_fp; // frame pointer
+  private Address raw_unextendedSP;
+
+  private AARCH64Frame() {
+  }
+
+  private void adjustForDeopt() {
+    if ( pc != null) {
+      // Look for a deopt pc and if it is deopted convert to original pc
+      CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
+      if (cb != null && cb.isJavaMethod()) {
+        NMethod nm = (NMethod) cb;
+        if (pc.equals(nm.deoptHandlerBegin())) {
+          if (Assert.ASSERTS_ENABLED) {
+            Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
+          }
+          // adjust pc if frame is deoptimized.
+          pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
+          deoptimized = true;
+        }
+      }
+    }
+  }
+
+  public AARCH64Frame(Address raw_sp, Address raw_fp, Address pc) {
+    this.raw_sp = raw_sp;
+    this.raw_unextendedSP = raw_sp;
+    this.raw_fp = raw_fp;
+    this.pc = pc;
+    adjustUnextendedSP();
+
+    // Frame must be fully constructed before this call
+    adjustForDeopt();
+
+    if (DEBUG) {
+      System.out.println("AARCH64Frame(sp, fp, pc): " + this);
+      dumpStack();
+    }
+  }
+
+  public AARCH64Frame(Address raw_sp, Address raw_fp) {
+    this.raw_sp = raw_sp;
+    this.raw_unextendedSP = raw_sp;
+    this.raw_fp = raw_fp;
+    this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
+    adjustUnextendedSP();
+
+    // Frame must be fully constructed before this call
+    adjustForDeopt();
+
+    if (DEBUG) {
+      System.out.println("AARCH64Frame(sp, fp): " + this);
+      dumpStack();
+    }
+  }
+
+  public AARCH64Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) {
+    this.raw_sp = raw_sp;
+    this.raw_unextendedSP = raw_unextendedSp;
+    this.raw_fp = raw_fp;
+    this.pc = pc;
+    adjustUnextendedSP();
+
+    // Frame must be fully constructed before this call
+    adjustForDeopt();
+
+    if (DEBUG) {
+      System.out.println("AARCH64Frame(sp, unextendedSP, fp, pc): " + this);
+      dumpStack();
+    }
+
+  }
+
+  public Object clone() {
+    AARCH64Frame frame = new AARCH64Frame();
+    frame.raw_sp = raw_sp;
+    frame.raw_unextendedSP = raw_unextendedSP;
+    frame.raw_fp = raw_fp;
+    frame.pc = pc;
+    frame.deoptimized = deoptimized;
+    return frame;
+  }
+
+  public boolean equals(Object arg) {
+    if (arg == null) {
+      return false;
+    }
+
+    if (!(arg instanceof AARCH64Frame)) {
+      return false;
+    }
+
+    AARCH64Frame other = (AARCH64Frame) arg;
+
+    return (AddressOps.equal(getSP(), other.getSP()) &&
+            AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) &&
+            AddressOps.equal(getFP(), other.getFP()) &&
+            AddressOps.equal(getPC(), other.getPC()));
+  }
+
+  public int hashCode() {
+    if (raw_sp == null) {
+      return 0;
+    }
+
+    return raw_sp.hashCode();
+  }
+
+  public String toString() {
+    return "sp: " + (getSP() == null? "null" : getSP().toString()) +
+         ", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) +
+         ", fp: " + (getFP() == null? "null" : getFP().toString()) +
+         ", pc: " + (pc == null? "null" : pc.toString());
+  }
+
+  // accessors for the instance variables
+  public Address getFP() { return raw_fp; }
+  public Address getSP() { return raw_sp; }
+  public Address getID() { return raw_sp; }
+
+  // FIXME: not implemented yet
+  public boolean isSignalHandlerFrameDbg() { return false; }
+  public int     getSignalNumberDbg()      { return 0;     }
+  public String  getSignalNameDbg()        { return null;  }
+
+  public boolean isInterpretedFrameValid() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(isInterpretedFrame(), "Not an interpreted frame");
+    }
+
+    // These are reasonable sanity checks
+    if (getFP() == null || getFP().andWithMask(0x3) != null) {
+      return false;
+    }
+
+    if (getSP() == null || getSP().andWithMask(0x3) != null) {
+      return false;
+    }
+
+    if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) {
+      return false;
+    }
+
+    // These are hacks to keep us out of trouble.
+    // The problem with these is that they mask other problems
+    if (getFP().lessThanOrEqual(getSP())) {
+      // this attempts to deal with unsigned comparison above
+      return false;
+    }
+
+    if (getFP().minus(getSP()) > 4096 * VM.getVM().getAddressSize()) {
+      // stack frames shouldn't be large.
+      return false;
+    }
+
+    return true;
+  }
+
+  // FIXME: not applicable in current system
+  //  void    patch_pc(Thread* thread, address pc);
+
+  public Frame sender(RegisterMap regMap, CodeBlob cb) {
+    AARCH64RegisterMap map = (AARCH64RegisterMap) regMap;
+
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(map != null, "map must be set");
+    }
+
+    // Default is we done have to follow them. The sender_for_xxx will
+    // update it accordingly
+    map.setIncludeArgumentOops(false);
+
+    if (isEntryFrame())       return senderForEntryFrame(map);
+    if (isInterpretedFrame()) return senderForInterpreterFrame(map);
+
+    if(cb == null) {
+      cb = VM.getVM().getCodeCache().findBlob(getPC());
+    } else {
+      if (Assert.ASSERTS_ENABLED) {
+        Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
+      }
+    }
+
+    if (cb != null) {
+      return senderForCompiledFrame(map, cb);
+    }
+
+    // Must be native-compiled frame, i.e. the marshaling code for native
+    // methods that exists in the core system.
+    return new AARCH64Frame(getSenderSP(), getLink(), getSenderPC());
+  }
+
+  private Frame senderForEntryFrame(AARCH64RegisterMap map) {
+    if (DEBUG) {
+      System.out.println("senderForEntryFrame");
+    }
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(map != null, "map must be set");
+    }
+    // Java frame called from C; skip all C frames and return top C
+    // frame of that chunk as the sender
+    AARCH64JavaCallWrapper jcw = (AARCH64JavaCallWrapper) getEntryFrameCallWrapper();
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero");
+      Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack");
+    }
+    AARCH64Frame fr;
+    if (jcw.getLastJavaPC() != null) {
+      fr = new AARCH64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC());
+    } else {
+      fr = new AARCH64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP());
+    }
+    map.clear();
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
+    }
+    return fr;
+  }
+
+  //------------------------------------------------------------------------------
+  // frame::adjust_unextended_sp
+  private void adjustUnextendedSP() {
+    // If we are returning to a compiled MethodHandle call site, the
+    // saved_fp will in fact be a saved value of the unextended SP.  The
+    // simplest way to tell whether we are returning to such a call site
+    // is as follows:
+
+    CodeBlob cb = cb();
+    NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
+    if (senderNm != null) {
+      // If the sender PC is a deoptimization point, get the original
+      // PC.  For MethodHandle call site the unextended_sp is stored in
+      // saved_fp.
+      if (senderNm.isDeoptMhEntry(getPC())) {
+        // DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
+        raw_unextendedSP = getFP();
+      }
+      else if (senderNm.isDeoptEntry(getPC())) {
+        // DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
+      }
+      else if (senderNm.isMethodHandleReturn(getPC())) {
+        raw_unextendedSP = getFP();
+      }
+    }
+  }
+
+  private Frame senderForInterpreterFrame(AARCH64RegisterMap map) {
+    if (DEBUG) {
+      System.out.println("senderForInterpreterFrame");
+    }
+    Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
+    Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
+    // We do not need to update the callee-save register mapping because above
+    // us is either another interpreter frame or a converter-frame, but never
+    // directly a compiled frame.
+    // 11/24/04 SFG. With the removal of adapter frames this is no longer true.
+    // However c2 no longer uses callee save register for java calls so there
+    // are no callee register to find.
+
+    if (map.getUpdateMap())
+      updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET));
+
+    return new AARCH64Frame(sp, unextendedSP, getLink(), getSenderPC());
+  }
+
+  private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) {
+    map.setLocation(fp, savedFPAddr);
+  }
+
+  private Frame senderForCompiledFrame(AARCH64RegisterMap map, CodeBlob cb) {
+    if (DEBUG) {
+      System.out.println("senderForCompiledFrame");
+    }
+
+    //
+    // NOTE: some of this code is (unfortunately) duplicated  AARCH64CurrentFrameGuess
+    //
+
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(map != null, "map must be set");
+    }
+
+    // frame owned by optimizing compiler
+    if (Assert.ASSERTS_ENABLED) {
+        Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size");
+    }
+    Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize());
+
+    // The return_address is always the word on the stack
+    Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize());
+
+    // This is the saved value of FP which may or may not really be an FP.
+    // It is only an FP if the sender is an interpreter frame.
+    Address savedFPAddr = senderSP.addOffsetTo(- SENDER_SP_OFFSET * VM.getVM().getAddressSize());
+
+    if (map.getUpdateMap()) {
+      // Tell GC to use argument oopmaps for some runtime stubs that need it.
+      // For C1, the runtime stub might not have oop maps, so set this flag
+      // outside of update_register_map.
+      map.setIncludeArgumentOops(cb.callerMustGCArguments());
+
+      if (cb.getOopMaps() != null) {
+        ImmutableOopMapSet.updateRegisterMap(this, cb, map, true);
+      }
+
+      // Since the prolog does the save and restore of FP there is no oopmap
+      // for it so we must fill in its location as if there was an oopmap entry
+      // since if our caller was compiled code there could be live jvm state in it.
+      updateMapWithSavedLink(map, savedFPAddr);
+    }
+
+    return new AARCH64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC);
+  }
+
+  protected boolean hasSenderPD() {
+    return true;
+  }
+
+  public long frameSize() {
+    return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize());
+  }
+
+    public Address getLink() {
+        try {
+            if (DEBUG) {
+                System.out.println("Reading link at " + addressOfStackSlot(LINK_OFFSET)
+                        + " = " + addressOfStackSlot(LINK_OFFSET).getAddressAt(0));
+            }
+            return addressOfStackSlot(LINK_OFFSET).getAddressAt(0);
+        } catch (Exception e) {
+            if (DEBUG)
+                System.out.println("Returning null");
+            return null;
+        }
+    }
+
+  // FIXME: not implementable yet
+  //inline void      frame::set_link(intptr_t* addr)  { *(intptr_t **)addr_at(link_offset) = addr; }
+
+  public Address getUnextendedSP() { return raw_unextendedSP; }
+
+  // Return address:
+  public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); }
+  public Address getSenderPC()     { return getSenderPCAddr().getAddressAt(0);      }
+
+  // return address of param, zero origin index.
+  public Address getNativeParamAddr(int idx) {
+    return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
+  }
+
+  public Address getSenderSP()     { return addressOfStackSlot(SENDER_SP_OFFSET); }
+
+  public Address addressOfInterpreterFrameLocals() {
+    return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
+  }
+
+  private Address addressOfInterpreterFrameBCX() {
+    return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET);
+  }
+
+  public int getInterpreterFrameBCI() {
+    // FIXME: this is not atomic with respect to GC and is unsuitable
+    // for use in a non-debugging, or reflective, system. Need to
+    // figure out how to express this.
+    Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0);
+    Address methodHandle = addressOfInterpreterFrameMethod().getAddressAt(0);
+    Method method = (Method)Metadata.instantiateWrapperFor(methodHandle);
+    return bcpToBci(bcp, method);
+  }
+
+  public Address addressOfInterpreterFrameMDX() {
+    return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET);
+  }
+
+  // FIXME
+  //inline int frame::interpreter_frame_monitor_size() {
+  //  return BasicObjectLock::size();
+  //}
+
+  // expression stack
+  // (the max_stack arguments are used by the GC; see class FrameClosure)
+
+  public Address addressOfInterpreterFrameExpressionStack() {
+    Address monitorEnd = interpreterFrameMonitorEnd().address();
+    return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize());
+  }
+
+  public int getInterpreterFrameExpressionStackDirection() { return -1; }
+
+  // top of expression stack
+  public Address addressOfInterpreterFrameTOS() {
+    return getSP();
+  }
+
+  /** Expression stack from top down */
+  public Address addressOfInterpreterFrameTOSAt(int slot) {
+    return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize());
+  }
+
+  public Address getInterpreterFrameSenderSP() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(isInterpretedFrame(), "interpreted frame expected");
+    }
+    return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
+  }
+
+  // Monitors
+  public BasicObjectLock interpreterFrameMonitorBegin() {
+    return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET));
+  }
+
+  public BasicObjectLock interpreterFrameMonitorEnd() {
+    Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0);
+    if (Assert.ASSERTS_ENABLED) {
+      // make sure the pointer points inside the frame
+      Assert.that(AddressOps.gt(getFP(), result), "result must <  than frame pointer");
+      Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer");
+    }
+    return new BasicObjectLock(result);
+  }
+
+  public int interpreterFrameMonitorSize() {
+    return BasicObjectLock.size();
+  }
+
+  // Method
+  public Address addressOfInterpreterFrameMethod() {
+    return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET);
+  }
+
+  // Constant pool cache
+  public Address addressOfInterpreterFrameCPCache() {
+    return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET);
+  }
+
+  // Entry frames
+  public JavaCallWrapper getEntryFrameCallWrapper() {
+    return new AARCH64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0));
+  }
+
+  protected Address addressOfSavedOopResult() {
+    // offset is 2 for compiler2 and 3 for compiler1
+    return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) *
+                               VM.getVM().getAddressSize());
+  }
+
+  protected Address addressOfSavedReceiver() {
+    return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
+  }
+
+  private void dumpStack() {
+    for (Address addr = getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
+         AddressOps.lt(addr, getSP());
+         addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
+      System.out.println(addr + ": " + addr.getAddressAt(0));
+    }
+    System.out.println("-----------------------");
+    for (Address addr = getSP();
+         AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize()));
+         addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
+      System.out.println(addr + ": " + addr.getAddressAt(0));
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64JavaCallWrapper.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.aarch64;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.runtime.*;
+
+public class AARCH64JavaCallWrapper extends JavaCallWrapper {
+  private static AddressField lastJavaFPField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("JavaFrameAnchor");
+
+    lastJavaFPField  = type.getAddressField("_last_Java_fp");
+  }
+
+  public AARCH64JavaCallWrapper(Address addr) {
+    super(addr);
+  }
+
+  public Address getLastJavaFP() {
+    return lastJavaFPField.getValue(addr.addOffsetTo(anchorField.getOffset()));
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64RegisterMap.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.aarch64;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+
+public class AARCH64RegisterMap extends RegisterMap {
+
+  /** This is the only public constructor */
+  public AARCH64RegisterMap(JavaThread thread, boolean updateMap) {
+    super(thread, updateMap);
+  }
+
+  protected AARCH64RegisterMap(RegisterMap map) {
+    super(map);
+  }
+
+  public Object clone() {
+    AARCH64RegisterMap retval = new AARCH64RegisterMap(this);
+    return retval;
+  }
+
+  // no PD state to clear or copy:
+  protected void clearPD() {}
+  protected void initializePD() {}
+  protected void initializeFromPD(RegisterMap map) {}
+  protected Address getLocationPD(VMReg reg) { return null; }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_aarch64/LinuxAARCH64JavaThreadPDAccess.java	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.linux_aarch64;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.aarch64.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.runtime.aarch64.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+public class LinuxAARCH64JavaThreadPDAccess implements JavaThreadPDAccess {
+  private static AddressField  lastJavaFPField;
+  private static AddressField  osThreadField;
+
+  // Field from OSThread
+  private static CIntegerField osThreadThreadIDField;
+
+  // This is currently unneeded but is being kept in case we change
+  // the currentFrameGuess algorithm
+  private static final long GUESS_SCAN_RANGE = 128 * 1024;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("JavaThread");
+    osThreadField           = type.getAddressField("_osthread");
+
+    Type anchorType = db.lookupType("JavaFrameAnchor");
+    lastJavaFPField         = anchorType.getAddressField("_last_Java_fp");
+
+    Type osThreadType = db.lookupType("OSThread");
+    osThreadThreadIDField   = osThreadType.getCIntegerField("_thread_id");
+  }
+
+  public Address getLastJavaFP(Address addr) {
+    return lastJavaFPField.getValue(addr.addOffsetTo(sun.jvm.hotspot.runtime.JavaThread.getAnchorField().getOffset()));
+  }
+
+  public Address getLastJavaPC(Address addr) {
+    return null;
+  }
+
+  public Address getBaseOfStackPointer(Address addr) {
+    return null;
+  }
+
+  public Frame getLastFramePD(JavaThread thread, Address addr) {
+    Address fp = thread.getLastJavaFP();
+    if (fp == null) {
+      return null; // no information
+    }
+    return new AARCH64Frame(thread.getLastJavaSP(), fp);
+  }
+
+  public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
+    return new AARCH64RegisterMap(thread, updateMap);
+  }
+
+  public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
+    ThreadProxy t = getThreadProxy(addr);
+    AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
+    AARCH64CurrentFrameGuess guesser = new AARCH64CurrentFrameGuess(context, thread);
+    if (!guesser.run(GUESS_SCAN_RANGE)) {
+      return null;
+    }
+    if (guesser.getPC() == null) {
+      return new AARCH64Frame(guesser.getSP(), guesser.getFP());
+    } else {
+      return new AARCH64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
+    }
+  }
+
+  public void printThreadIDOn(Address addr, PrintStream tty) {
+    tty.print(getThreadProxy(addr));
+  }
+
+  public void printInfoOn(Address threadAddr, PrintStream tty) {
+    tty.print("Thread id: ");
+    printThreadIDOn(threadAddr, tty);
+//    tty.println("\nPostJavaState: " + getPostJavaState(threadAddr));
+  }
+
+  public Address getLastSP(Address addr) {
+    ThreadProxy t = getThreadProxy(addr);
+    AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
+    return context.getRegisterAsAddress(AARCH64ThreadContext.SP);
+  }
+
+  public ThreadProxy getThreadProxy(Address addr) {
+    // Addr is the address of the JavaThread.
+    // Fetch the OSThread (for now and for simplicity, not making a
+    // separate "OSThread" class in this package)
+    Address osThreadAddr = osThreadField.getValue(addr);
+    // Get the address of the _thread_id from the OSThread
+    Address threadIdAddr = osThreadAddr.addOffsetTo(osThreadThreadIDField.getOffset());
+
+    JVMDebugger debugger = VM.getVM().getDebugger();
+    return debugger.getThreadForIdentifierAddress(threadIdAddr);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/AltPlatformInfo.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/AltPlatformInfo.java	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,10 @@
 package sun.jvm.hotspot.utilities;
 
 public interface AltPlatformInfo {
+
   // Additional cpu types can be tested via this interface
+  public boolean knownCPU(String cpu);
 
-  public boolean knownCPU(String cpu);
-}
\ No newline at end of file
+  // Mangle a cpu name if necessary
+  public String getCPU(String cpu);
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,27 +52,54 @@
     }
   }
 
-  /* Returns "sparc" for SPARC based platforms and "x86" for x86 based
-     platforms. Otherwise returns the value of os.arch.  If the value
-     is not recognized as supported, an exception is thrown instead. */
+  public static boolean knownCPU(String cpu) {
+    final String[] KNOWN =
+        new String[] {"i386", "x86", "x86_64", "amd64", "sparc", "sparcv9", "ppc64", "aarch64"};
+
+    for(String s : KNOWN) {
+      if(s.equals(cpu))
+        return true;
+    }
+
+    return false;
+  }
+
+  /* Returns "sparc" for SPARC based platforms "x86" for x86 based
+     platforms and x86_64 for 64bit x86 based platform. Otherwise
+     returns the value of os.arch. If the value is not recognized as supported,
+     an exception is thrown instead. */
+
   public static String getCPU() throws UnsupportedPlatformException {
     String cpu = System.getProperty("os.arch");
-    if (cpu.equals("i386") || cpu.equals("x86")) {
+
+    // Let any additional CPU mangling fire first
+    try {
+      Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
+      AltPlatformInfo api = (AltPlatformInfo) pic.newInstance();
+      if (api.knownCPU(cpu)) {
+        return api.getCPU(cpu);
+      }
+    } catch (Exception e) {
+       // Ignored
+    }
+
+    // Check that CPU is supported
+    if (!knownCPU(cpu)) {
+       throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
+    }
+
+    // Tweeks
+    if (cpu.equals("i386"))
       return "x86";
-    } else if (cpu.equals("sparc") || cpu.equals("sparcv9")) {
+
+    if (cpu.equals("sparcv9"))
       return "sparc";
-    } else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64") || cpu.equals("ppc64") || cpu.equals("aarch64")) {
-      return cpu;
-    } else {
-      try {
-        Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
-        AltPlatformInfo api = (AltPlatformInfo)pic.newInstance();
-        if (api.knownCPU(cpu)) {
-          return cpu;
-        }
-      } catch (Exception e) {}
-      throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
-    }
+
+    if (cpu.equals("x86_64"))
+      return "amd64";
+
+    return cpu;
+
   }
 
   // this main is invoked from Makefile to make platform specific agent Makefile(s).
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java	Thu Jun 25 09:48:50 2015 -0700
@@ -84,11 +84,11 @@
   }
 
   public boolean isInNewGen() {
-    return ((gen != null) && (gen.level() == 0));
+    return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(0)));
   }
 
   public boolean isInOldGen() {
-    return ((gen != null) && (gen.level() == 1));
+    return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(1)));
   }
 
   public boolean inOtherGen() {
@@ -207,8 +207,6 @@
           tty.print("In new generation ");
         } else if (isInOldGen()) {
           tty.print("In old generation ");
-        } else if (gen != null) {
-          tty.print("In Generation " + getGeneration().level());
         } else {
           tty.print("In unknown section of Java heap");
         }
--- a/make/bsd/makefiles/dtrace.make	Wed Jun 24 09:13:12 2015 +0200
+++ b/make/bsd/makefiles/dtrace.make	Thu Jun 25 09:48:50 2015 -0700
@@ -263,14 +263,19 @@
 $(DtraceOutDir):
 	mkdir $(DtraceOutDir)
 
+# When building using a devkit, dtrace cannot find the correct preprocessor so
+# we run it explicitly before runing dtrace.
 $(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
+	$(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hotspot.d > $(DtraceOutDir)/hotspot.d
+	$(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hotspot.d
 
 $(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
+	$(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d > $(DtraceOutDir)/hotspot_jni.d
+	$(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hotspot_jni.d
 
 $(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
+	$(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hs_private.d > $(DtraceOutDir)/hs_private.d
+	$(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hs_private.d
 
 dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h 
 
--- a/make/bsd/makefiles/universal.gmk	Wed Jun 24 09:13:12 2015 +0200
+++ b/make/bsd/makefiles/universal.gmk	Thu Jun 25 09:48:50 2015 -0700
@@ -56,13 +56,14 @@
 universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
 	$(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
 
+LIPO ?= lipo
 
 # Package built libraries in a universal binary
 $(UNIVERSAL_LIPO_LIST):
 	BUILT_LIPO_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
 	if [ -n "$${BUILT_LIPO_FILES}" ]; then \
 	  $(MKDIR) -p $(shell dirname $@); \
-	  lipo -create -output $@ $${BUILT_LIPO_FILES}; \
+	  $(LIPO) -create -output $@ $${BUILT_LIPO_FILES}; \
 	fi
 
 
--- a/make/sa.files	Wed Jun 24 09:13:12 2015 +0200
+++ b/make/sa.files	Thu Jun 25 09:48:50 2015 -0700
@@ -44,6 +44,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/aarch64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/amd64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/x86/*.java \
@@ -55,6 +56,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ia64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ppc64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/aarch64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/*.java \
@@ -63,6 +65,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/amd64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/ppc64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/aarch64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/*.java \
@@ -70,6 +73,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/ppc64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/x86/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/aarch64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \
@@ -92,11 +96,13 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/prims/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/aarch64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/bsd/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/bsd_amd64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/bsd_x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_aarch64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_ppc64/*.java \
--- a/src/cpu/aarch64/vm/frame_aarch64.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/aarch64/vm/frame_aarch64.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -526,16 +526,6 @@
   return frame(sender_sp(), link(), sender_pc());
 }
 
-bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
-  assert(is_interpreted_frame(), "must be interpreter frame");
-  Method* method = interpreter_frame_method();
-  // When unpacking an optimized frame the frame pointer is
-  // adjusted with:
-  int diff = (method->max_locals() - method->size_of_parameters()) *
-             Interpreter::stackElementWords;
-  return _fp == (fp - diff);
-}
-
 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
 // QQQ
 #ifdef CC_INTERP
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,7 @@
 
 #ifdef BUILTIN_SIM
 #define UseBuiltinSim           true
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
                                                                         \
   product(bool, NotifySimulator, UseBuiltinSim,                         \
          "tell the AArch64 sim where we are in method code")            \
@@ -112,7 +112,7 @@
 #define NotifySimulator         false
 #define UseSimulatorCache       false
 #define DisableBCCheck          true
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
                                                                         \
   product(bool, NearCpool, true,                                        \
          "constant pool is close to instructions")                      \
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -2888,41 +2888,40 @@
   cmp(src1, rscratch1);
 }
 
+void MacroAssembler::store_check(Register obj, Address dst) {
+  store_check(obj);
+}
+
 void MacroAssembler::store_check(Register obj) {
   // Does a store check for the oop in register obj. The content of
   // register obj is destroyed afterwards.
-  store_check_part_1(obj);
-  store_check_part_2(obj);
-}
-
-void MacroAssembler::store_check(Register obj, Address dst) {
-  store_check(obj);
-}
-
-
-// split the store check operation so that other instructions can be scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
+
   BarrierSet* bs = Universe::heap()->barrier_set();
   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+
+  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
+  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+
   lsr(obj, obj, CardTableModRefBS::card_shift);
-}
-
-void MacroAssembler::store_check_part_2(Register obj) {
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
-  CardTableModRefBS* ct = (CardTableModRefBS*)bs;
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
-  // The calculation for byte_map_base is as follows:
-  // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
-  // So this essentially converts an address to a displacement and
-  // it will never need to be relocated.
-
-  // FIXME: It's not likely that disp will fit into an offset so we
-  // don't bother to check, but it could save an instruction.
-  intptr_t disp = (intptr_t) ct->byte_map_base;
-  mov(rscratch1, disp);
-  strb(zr, Address(obj, rscratch1));
+
+  assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
+
+  {
+    ExternalAddress cardtable((address) ct->byte_map_base);
+    unsigned long offset;
+    adrp(rscratch1, cardtable, offset);
+    assert(offset == 0, "byte_map_base is misaligned");
+  }
+
+  if (UseCondCardMark) {
+    Label L_already_dirty;
+    ldrb(rscratch2,  Address(obj, rscratch1));
+    cbz(rscratch2, L_already_dirty);
+    strb(zr, Address(obj, rscratch1));
+    bind(L_already_dirty);
+  } else {
+    strb(zr, Address(obj, rscratch1));
+  }
 }
 
 void MacroAssembler::load_klass(Register dst, Register src) {
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -756,10 +756,6 @@
 
 #endif // INCLUDE_ALL_GCS
 
-  // split store_check(Register obj) to enhance instruction interleaving
-  void store_check_part_1(Register obj);
-  void store_check_part_2(Register obj);
-
   // oop manipulations
   void load_klass(Register dst, Register src);
   void store_klass(Register dst, Register src);
--- a/src/cpu/ppc/vm/globals_ppc.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/ppc/vm/globals_ppc.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -63,7 +63,7 @@
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 // Platform dependent flag handling: flags only defined on this platform.
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)  \
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint)  \
                                                                             \
   /* Load poll address from thread. This is used to implement per-thread */ \
   /* safepoints on platforms != IA64. */                                    \
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -510,7 +510,8 @@
 
 void VM_Version::determine_features() {
 #if defined(ABI_ELFv2)
-  const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect.
+  // 1 InstWord per call for the blr instruction.
+  const int code_size = (num_features+1+2*1)*BytesPerInstWord;
 #else
   // 7 InstWords for each call (function descriptor + blr instruction).
   const int code_size = (num_features+1+2*7)*BytesPerInstWord;
@@ -545,7 +546,8 @@
   a->popcntw(R7, R5);                          // code[6]  -> popcntw
   a->fcfids(F3, F4);                           // code[7]  -> fcfids
   a->vand(VR0, VR0, VR0);                      // code[8]  -> vand
-  a->lqarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[9]  -> lqarx_m
+  // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
+  a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9]  -> lqarx_m
   a->vcipher(VR0, VR1, VR2);                   // code[10] -> vcipher
   a->vpmsumb(VR0, VR1, VR2);                   // code[11] -> vpmsumb
   a->tcheck(0);                                // code[12] -> tcheck
@@ -577,7 +579,8 @@
 
   // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
   VM_Version::_is_determine_features_test_running = true;
-  (*test)((address)mid_of_test_area, (uint64_t)0);
+  // We must align the first argument to 16 bytes because of the lqarx check.
+  (*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
   VM_Version::_is_determine_features_test_running = false;
 
   // determine which instructions are legal.
@@ -619,12 +622,12 @@
   MacroAssembler* a = new MacroAssembler(&cb);
 
   // Emit code.
-  uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->emit_fd();
+  uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
   uint32_t *code = (uint32_t *)a->pc();
   a->mfdscr(R3);
   a->blr();
 
-  void (*set_dscr)(long) = (void(*)(long))(void *)a->emit_fd();
+  void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
   a->mtdscr(R3);
   a->blr();
 
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -599,12 +599,6 @@
   return next_younger_sp_or_null(valid_sp, sp) != NULL;
 }
 
-
-bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
-  assert(is_interpreted_frame(), "must be interpreter frame");
-  return this->fp() == fp;
-}
-
 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
 #ifdef CC_INTERP
   // Is there anything to do?
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -81,7 +81,7 @@
 
 define_pd_global(uintx, TypeProfileLevel, 111);
 
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
                                                                             \
   product(intx, UseVIS, 99,                                                 \
           "Highest supported VIS instructions set on Sparc")                \
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/x86/vm/frame_x86.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -524,17 +524,6 @@
   return frame(sender_sp(), link(), sender_pc());
 }
 
-
-bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
-  assert(is_interpreted_frame(), "must be interpreter frame");
-  Method* method = interpreter_frame_method();
-  // When unpacking an optimized frame the frame pointer is
-  // adjusted with:
-  int diff = (method->max_locals() - method->size_of_parameters()) *
-             Interpreter::stackElementWords;
-  return _fp == (fp - diff);
-}
-
 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
 // QQQ
 #ifdef CC_INTERP
--- a/src/cpu/x86/vm/globals_x86.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/x86/vm/globals_x86.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -84,7 +84,7 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
                                                                             \
   develop(bool, IEEEPrecision, true,                                        \
           "Enables IEEE precision (for INTEL only)")                        \
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -4260,31 +4260,24 @@
 //////////////////////////////////////////////////////////////////////////////////
 
 
+void MacroAssembler::store_check(Register obj, Address dst) {
+  store_check(obj);
+}
+
 void MacroAssembler::store_check(Register obj) {
   // Does a store check for the oop in register obj. The content of
   // register obj is destroyed afterwards.
-  store_check_part_1(obj);
-  store_check_part_2(obj);
-}
-
-void MacroAssembler::store_check(Register obj, Address dst) {
-  store_check(obj);
-}
-
-
-// split the store check operation so that other instructions can be scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
+
   BarrierSet* bs = Universe::heap()->barrier_set();
   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
-  shrptr(obj, CardTableModRefBS::card_shift);
-}
-
-void MacroAssembler::store_check_part_2(Register obj) {
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
+  shrptr(obj, CardTableModRefBS::card_shift);
+
+  Address card_addr;
+
   // The calculation for byte_map_base is as follows:
   // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
   // So this essentially converts an address to a displacement and it will
@@ -4292,8 +4285,7 @@
   // large for a 32bit displacement.
   intptr_t disp = (intptr_t) ct->byte_map_base;
   if (is_simm32(disp)) {
-    Address cardtable(noreg, obj, Address::times_1, disp);
-    movb(cardtable, 0);
+    card_addr = Address(noreg, obj, Address::times_1, disp);
   } else {
     // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
     // displacement and done in a single instruction given favorable mapping and a
@@ -4301,7 +4293,21 @@
     // entry and that entry is not properly handled by the relocation code.
     AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
     Address index(noreg, obj, Address::times_1);
-    movb(as_Address(ArrayAddress(cardtable, index)), 0);
+    card_addr = as_Address(ArrayAddress(cardtable, index));
+  }
+
+  int dirty = CardTableModRefBS::dirty_card_val();
+  if (UseCondCardMark) {
+    Label L_already_dirty;
+    if (UseConcMarkSweepGC) {
+      membar(Assembler::StoreLoad);
+    }
+    cmpb(card_addr, dirty);
+    jcc(Assembler::equal, L_already_dirty);
+    movb(card_addr, dirty);
+    bind(L_already_dirty);
+  } else {
+    movb(card_addr, dirty);
   }
 }
 
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -315,10 +315,6 @@
 
 #endif // INCLUDE_ALL_GCS
 
-  // split store_check(Register obj) to enhance instruction interleaving
-  void store_check_part_1(Register obj);
-  void store_check_part_2(Register obj);
-
   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
   void c2bool(Register x);
 
--- a/src/cpu/zero/vm/globals_zero.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/cpu/zero/vm/globals_zero.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -63,7 +63,8 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)  \
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint)  \
+                                                                            \
   product(bool, UseFastEmptyMethods, true,                                  \
           "Use fast method entry code for empty methods")                   \
                                                                             \
--- a/src/os/aix/vm/decoder_aix.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/aix/vm/decoder_aix.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -38,8 +38,8 @@
 
   virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // demangled by getFuncName
 
-  virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
-    return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0) == 0);
+  virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
+    return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0, demangle) == 0);
   }
   virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
     ShouldNotReachHere();
--- a/src/os/aix/vm/globals_aix.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/aix/vm/globals_aix.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -29,7 +29,7 @@
 //
 // Defines Aix specific flags. They are not available on other platforms.
 //
-#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
                                                                                     \
   /* Use 64K pages for virtual memory (shmat). */                                   \
   product(bool, Use64KPages, true,                                                  \
--- a/src/os/aix/vm/os_aix.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/aix/vm/os_aix.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1439,7 +1439,8 @@
 }
 
 bool os::dll_address_to_function_name(address addr, char *buf,
-                                      int buflen, int *offset) {
+                                      int buflen, int *offset,
+                                      bool demangle) {
   if (offset) {
     *offset = -1;
   }
@@ -1454,7 +1455,7 @@
   }
 
   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
-  return Decoder::decode(addr, buf, buflen, offset);
+  return Decoder::decode(addr, buf, buflen, offset, demangle);
 }
 
 static int getModuleName(codeptr_t pc,                    // [in] program counter
@@ -1653,7 +1654,7 @@
   }
 }
 
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
   // cpu
   st->print("CPU:");
   st->print("total %d", os::processor_count());
@@ -3761,10 +3762,6 @@
   return fetcher.result();
 }
 
-// Not neede on Aix.
-// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
-// }
-
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
--- a/src/os/aix/vm/porting_aix.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/aix/vm/porting_aix.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -114,7 +114,8 @@
     int* p_displacement,             // [out] optional: displacement (-1 if not available)
     const struct tbtable** p_tb,     // [out] optional: ptr to traceback table to get further
                                      //                 information (NULL if not available)
-    char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
+    char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
+    bool demangle                    // [in] whether to demangle the name
   ) {
   struct tbtable* tb = 0;
   unsigned int searchcount = 0;
@@ -216,15 +217,17 @@
       p_name[0] = '\0';
 
       // If it is a C++ name, try and demangle it using the Demangle interface (see demangle.h).
-      char* rest;
-      Name* const name = Demangle(buf, rest);
-      if (name) {
-        const char* const demangled_name = name->Text();
-        if (demangled_name) {
-          strncpy(p_name, demangled_name, namelen-1);
-          p_name[namelen-1] = '\0';
+      if (demangle) {
+        char* rest;
+        Name* const name = Demangle(buf, rest);
+        if (name) {
+          const char* const demangled_name = name->Text();
+          if (demangled_name) {
+            strncpy(p_name, demangled_name, namelen-1);
+            p_name[namelen-1] = '\0';
+          }
+          delete name;
         }
-        delete name;
       }
 
       // Fallback: if demangling did not work, just provide the unmangled name.
@@ -325,7 +328,7 @@
       int displacement = 0;
 
       if (getFuncName((codeptr_t) p, funcname, sizeof(funcname), &displacement,
-                      NULL, NULL, 0) == 0) {
+                      NULL, NULL, 0, true /* demangle */) == 0) {
         if (funcname[0] != '\0') {
           const char* const interned = dladdr_fixed_strings.intern(funcname);
           info->dli_sname = interned;
--- a/src/os/aix/vm/porting_aix.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/aix/vm/porting_aix.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -87,7 +87,8 @@
       char* p_name, size_t namelen,    // [out] optional: user provided buffer for the function name
       int* p_displacement,             // [out] optional: displacement
       const struct tbtable** p_tb,     // [out] optional: ptr to traceback table to get further information
-      char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
+      char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
+      bool demangle = true             // [in] whether to demangle the name
     );
 
 // -------------------------------------------------------------------------
--- a/src/os/bsd/vm/decoder_machO.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/bsd/vm/decoder_machO.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
   virtual bool decode(address pc, char* buf, int buflen, int* offset,
                       const void* base);
   virtual bool decode(address pc, char* buf, int buflen, int* offset,
-                      const char* module_path = NULL) {
+                      const char* module_path, bool demangle) {
     ShouldNotReachHere();
     return false;
   }
--- a/src/os/bsd/vm/globals_bsd.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/bsd/vm/globals_bsd.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,19 +28,20 @@
 //
 // Defines Bsd specific flags. They are not available on other platforms.
 //
-#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
-  product(bool, UseOprofile, false,                                     \
-        "enable support for Oprofile profiler")                         \
-                                                                        \
-  product(bool, UseBsdPosixThreadCPUClocks, true,                     \
-          "enable fast Bsd Posix clocks where available")             \
-/*  NB: The default value of UseBsdPosixThreadCPUClocks may be        \
-    overridden in Arguments::parse_each_vm_init_arg.  */                \
-                                                                        \
-  product(bool, UseHugeTLBFS, false,                                    \
-          "Use MAP_HUGETLB for large pages")                            \
-                                                                        \
-  product(bool, UseSHM, false,                                          \
+#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
+                                                                                \
+  product(bool, UseOprofile, false,                                             \
+        "enable support for Oprofile profiler")                                 \
+                                                                                \
+  /*  NB: The default value of UseBsdPosixThreadCPUClocks may be  */            \
+  /*  overridden in Arguments::parse_each_vm_init_arg.            */            \
+  product(bool, UseBsdPosixThreadCPUClocks, true,                               \
+          "enable fast Bsd Posix clocks where available")                       \
+                                                                                \
+  product(bool, UseHugeTLBFS, false,                                            \
+          "Use MAP_HUGETLB for large pages")                                    \
+                                                                                \
+  product(bool, UseSHM, false,                                                  \
           "Use SYSV shared memory for large pages")
 
 //
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/bsd/vm/os_bsd.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -637,11 +637,6 @@
 //////////////////////////////////////////////////////////////////////////////
 // create new thread
 
-// check if it's safe to start a new thread
-static bool _thread_safety_check(Thread* thread) {
-  return true;
-}
-
 #ifdef __APPLE__
 // library handle for calling objc_registerThreadWithCollector()
 // without static linking to the libobjc library
@@ -681,15 +676,6 @@
   OSThread* osthread = thread->osthread();
   Monitor* sync = osthread->startThread_lock();
 
-  // non floating stack BsdThreads needs extra check, see above
-  if (!_thread_safety_check(thread)) {
-    // notify parent thread
-    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
-    osthread->set_state(ZOMBIE);
-    sync->notify_all();
-    return NULL;
-  }
-
   osthread->set_thread_id(os::Bsd::gettid());
 
 #ifdef __APPLE__
@@ -1339,7 +1325,8 @@
 #define MACH_MAXSYMLEN 256
 
 bool os::dll_address_to_function_name(address addr, char *buf,
-                                      int buflen, int *offset) {
+                                      int buflen, int *offset,
+                                      bool demangle) {
   // buf is not optional, but offset is optional
   assert(buf != NULL, "sanity check");
 
@@ -1349,7 +1336,7 @@
   if (dladdr((void*)addr, &dlinfo) != 0) {
     // see if we have a matching symbol
     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
-      if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
+      if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
         jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
       }
       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
@@ -1358,15 +1345,16 @@
     // no matching symbol so try for just file info
     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-                          buf, buflen, offset, dlinfo.dli_fname)) {
+                          buf, buflen, offset, dlinfo.dli_fname, demangle)) {
         return true;
       }
     }
 
     // Handle non-dynamic manually:
     if (dlinfo.dli_fbase != NULL &&
-        Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
-      if (!Decoder::demangle(localbuf, buf, buflen)) {
+        Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
+                        dlinfo.dli_fbase)) {
+      if (!(demangle && Decoder::demangle(localbuf, buf, buflen))) {
         jio_snprintf(buf, buflen, "%s", localbuf);
       }
       return true;
@@ -1706,7 +1694,7 @@
   os::Posix::print_load_average(st);
 }
 
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
   // Nothing to do for now.
 }
 
@@ -2276,8 +2264,6 @@
   return os::uncommit_memory(addr, size);
 }
 
-static address _highest_vm_reserved_address = NULL;
-
 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
 // at 'requested_addr'. If there are existing memory mappings at the same
 // location, however, they will be overwritten. If 'fixed' is false,
@@ -2300,23 +2286,9 @@
   addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
                        flags, -1, 0);
 
-  if (addr != MAP_FAILED) {
-    // anon_mmap() should only get called during VM initialization,
-    // don't need lock (actually we can skip locking even it can be called
-    // from multiple threads, because _highest_vm_reserved_address is just a
-    // hint about the upper limit of non-stack memory regions.)
-    if ((address)addr + bytes > _highest_vm_reserved_address) {
-      _highest_vm_reserved_address = (address)addr + bytes;
-    }
-  }
-
   return addr == MAP_FAILED ? NULL : addr;
 }
 
-// Don't update _highest_vm_reserved_address, because there might be memory
-// regions above addr + size. If so, releasing a memory region only creates
-// a hole in the address space, it doesn't help prevent heap-stack collision.
-//
 static int anon_munmap(char * addr, size_t size) {
   return ::munmap(addr, size) == 0;
 }
@@ -2490,15 +2462,7 @@
   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
 
   // Repeatedly allocate blocks until the block is allocated at the
-  // right spot. Give up after max_tries. Note that reserve_memory() will
-  // automatically update _highest_vm_reserved_address if the call is
-  // successful. The variable tracks the highest memory address every reserved
-  // by JVM. It is used to detect heap-stack collision if running with
-  // fixed-stack BsdThreads. Because here we may attempt to reserve more
-  // space than needed, it could confuse the collision detecting code. To
-  // solve the problem, save current _highest_vm_reserved_address and
-  // calculate the correct value before return.
-  address old_highest = _highest_vm_reserved_address;
+  // right spot.
 
   // Bsd mmap allows caller to pass an address as hint; give it a try first,
   // if kernel honors the hint then we can return immediately.
@@ -2552,10 +2516,8 @@
   }
 
   if (i < max_tries) {
-    _highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
     return requested_addr;
   } else {
-    _highest_vm_reserved_address = old_highest;
     return NULL;
   }
 }
@@ -3715,12 +3677,6 @@
   return fetcher.result();
 }
 
-int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond,
-                                 pthread_mutex_t *_mutex,
-                                 const struct timespec *_abstime) {
-  return pthread_cond_timedwait(_cond, _mutex, _abstime);
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
@@ -4286,7 +4242,7 @@
   // In that case, we should propagate the notify to another waiter.
 
   while (_Event < 0) {
-    status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
+    status = pthread_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy(_cond);
       pthread_cond_init(_cond, NULL);
@@ -4492,7 +4448,7 @@
   if (time == 0) {
     status = pthread_cond_wait(_cond, _mutex);
   } else {
-    status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &absTime);
+    status = pthread_cond_timedwait(_cond, _mutex, &absTime);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy(_cond);
       pthread_cond_init(_cond, NULL);
--- a/src/os/bsd/vm/os_bsd.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/bsd/vm/os_bsd.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -30,9 +30,6 @@
 // Information about the protection of the page at address '0' on this os.
 static bool zero_page_read_protected() { return true; }
 
-// pthread_getattr_np comes with BsdThreads-0.9-7 on RedHat 7.1
-typedef int (*pthread_getattr_func_type)(pthread_t, pthread_attr_t *);
-
 #ifdef __APPLE__
 // Mac OS X doesn't support clock_gettime. Stub out the type, it is
 // unused
@@ -145,9 +142,6 @@
 
   // none present
 
-  // BsdThreads work-around for 6292965
-  static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
-
  private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
--- a/src/os/linux/vm/globals_linux.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/linux/vm/globals_linux.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,14 +28,15 @@
 //
 // Defines Linux specific flags. They are not available on other platforms.
 //
-#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
+                                                                        \
   product(bool, UseOprofile, false,                                     \
         "enable support for Oprofile profiler")                         \
                                                                         \
+  /*  NB: The default value of UseLinuxPosixThreadCPUClocks may be   */ \
+  /* overridden in Arguments::parse_each_vm_init_arg.                */ \
   product(bool, UseLinuxPosixThreadCPUClocks, true,                     \
           "enable fast Linux Posix clocks where available")             \
-/*  NB: The default value of UseLinuxPosixThreadCPUClocks may be        \
-    overridden in Arguments::parse_each_vm_init_arg.  */                \
                                                                         \
   product(bool, UseHugeTLBFS, false,                                    \
           "Use MAP_HUGETLB for large pages")                            \
--- a/src/os/linux/vm/os_linux.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/linux/vm/os_linux.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -135,8 +135,6 @@
 pthread_t os::Linux::_main_thread;
 int os::Linux::_page_size = -1;
 const int os::Linux::_vm_default_page_size = (8 * K);
-bool os::Linux::_is_floating_stack = false;
-bool os::Linux::_is_NPTL = false;
 bool os::Linux::_supports_fast_thread_cpu_time = false;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
@@ -150,8 +148,6 @@
 static sigset_t check_signal_done;
 static bool check_signals = true;
 
-static pid_t _initial_pid = 0;
-
 // Signal number used to suspend/resume a thread
 
 // do not use any signal number less than SIGSEGV, see 4355769
@@ -223,18 +219,10 @@
 //
 // Returns the kernel thread id of the currently running thread. Kernel
 // thread id is used to access /proc.
-//
-// (Note that getpid() on LinuxThreads returns kernel thread id too; but
-// on NPTL, it returns the same pid for all threads, as required by POSIX.)
-//
 pid_t os::Linux::gettid() {
   int rslt = syscall(SYS_gettid);
-  if (rslt == -1) {
-    // old kernel, no NPTL support
-    return getpid();
-  } else {
-    return (pid_t)rslt;
-  }
+  assert(rslt != -1, "must be."); // old linuxthreads implementation?
+  return (pid_t)rslt;
 }
 
 // Most versions of linux have a bug where the number of processors are
@@ -508,68 +496,48 @@
 // detecting pthread library
 
 void os::Linux::libpthread_init() {
-  // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
-  // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
-  // generic name for earlier versions.
-  // Define macros here so we can build HotSpot on old systems.
-#ifndef _CS_GNU_LIBC_VERSION
-  #define _CS_GNU_LIBC_VERSION 2
+  // Save glibc and pthread version strings.
+#if !defined(_CS_GNU_LIBC_VERSION) || \
+    !defined(_CS_GNU_LIBPTHREAD_VERSION)
+  #error "glibc too old (< 2.3.2)"
 #endif
-#ifndef _CS_GNU_LIBPTHREAD_VERSION
-  #define _CS_GNU_LIBPTHREAD_VERSION 3
-#endif
 
   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
-  if (n > 0) {
-    char *str = (char *)malloc(n, mtInternal);
-    confstr(_CS_GNU_LIBC_VERSION, str, n);
-    os::Linux::set_glibc_version(str);
-  } else {
-    // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
-    static char _gnu_libc_version[32];
-    jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
-                 "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
-    os::Linux::set_glibc_version(_gnu_libc_version);
-  }
+  assert(n > 0, "cannot retrieve glibc version");
+  char *str = (char *)malloc(n, mtInternal);
+  confstr(_CS_GNU_LIBC_VERSION, str, n);
+  os::Linux::set_glibc_version(str);
 
   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
-  if (n > 0) {
-    char *str = (char *)malloc(n, mtInternal);
-    confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
-    // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
-    // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
-    // is the case. LinuxThreads has a hard limit on max number of threads.
-    // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
-    // On the other hand, NPTL does not have such a limit, sysconf()
-    // will return -1 and errno is not changed. Check if it is really NPTL.
-    if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
-        strstr(str, "NPTL") &&
-        sysconf(_SC_THREAD_THREADS_MAX) > 0) {
-      free(str);
-      os::Linux::set_libpthread_version("linuxthreads");
-    } else {
-      os::Linux::set_libpthread_version(str);
-    }
-  } else {
-    // glibc before 2.3.2 only has LinuxThreads.
-    os::Linux::set_libpthread_version("linuxthreads");
-  }
-
-  if (strstr(libpthread_version(), "NPTL")) {
-    os::Linux::set_is_NPTL();
-  } else {
-    os::Linux::set_is_LinuxThreads();
-  }
-
-  // LinuxThreads have two flavors: floating-stack mode, which allows variable
-  // stack size; and fixed-stack mode. NPTL is always floating-stack.
-  if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
-    os::Linux::set_is_floating_stack();
-  }
+  assert(n > 0, "cannot retrieve pthread version");
+  str = (char *)malloc(n, mtInternal);
+  confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
+  os::Linux::set_libpthread_version(str);
 }
 
 /////////////////////////////////////////////////////////////////////////////
-// thread stack
+// thread stack expansion
+
+// os::Linux::manually_expand_stack() takes care of expanding the thread
+// stack. Note that this is normally not needed: pthread stacks allocate
+// thread stack using mmap() without MAP_NORESERVE, so the stack is already
+// committed. Therefore it is not necessary to expand the stack manually.
+//
+// Manually expanding the stack was historically needed on LinuxThreads
+// thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays
+// it is kept to deal with very rare corner cases:
+//
+// For one, user may run the VM on an own implementation of threads
+// whose stacks are - like the old LinuxThreads - implemented using
+// mmap(MAP_GROWSDOWN).
+//
+// Also, this coding may be needed if the VM is running on the primordial
+// thread. Normally we avoid running on the primordial thread; however,
+// user may still invoke the VM on the primordial thread.
+//
+// The following historical comment describes the details about running
+// on a thread stack allocated with mmap(MAP_GROWSDOWN):
+
 
 // Force Linux kernel to expand current thread stack. If "bottom" is close
 // to the stack guard, caller should block all signals.
@@ -593,10 +561,7 @@
 //   stack overflow detection.
 //
 //   Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
-//   not use this flag. However, the stack of initial thread is not created
-//   by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
-//   unlikely) that user code can create a thread with MAP_GROWSDOWN stack
-//   and then attach the thread to JVM.
+//   not use MAP_GROWSDOWN.
 //
 // To get around the problem and allow stack banging on Linux, we need to
 // manually expand thread stack after receiving the SIGSEGV.
@@ -671,45 +636,6 @@
 //////////////////////////////////////////////////////////////////////////////
 // create new thread
 
-static address highest_vm_reserved_address();
-
-// check if it's safe to start a new thread
-static bool _thread_safety_check(Thread* thread) {
-  if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) {
-    // Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
-    //   Heap is mmap'ed at lower end of memory space. Thread stacks are
-    //   allocated (MAP_FIXED) from high address space. Every thread stack
-    //   occupies a fixed size slot (usually 2Mbytes, but user can change
-    //   it to other values if they rebuild LinuxThreads).
-    //
-    // Problem with MAP_FIXED is that mmap() can still succeed even part of
-    // the memory region has already been mmap'ed. That means if we have too
-    // many threads and/or very large heap, eventually thread stack will
-    // collide with heap.
-    //
-    // Here we try to prevent heap/stack collision by comparing current
-    // stack bottom with the highest address that has been mmap'ed by JVM
-    // plus a safety margin for memory maps created by native code.
-    //
-    // This feature can be disabled by setting ThreadSafetyMargin to 0
-    //
-    if (ThreadSafetyMargin > 0) {
-      address stack_bottom = os::current_stack_base() - os::current_stack_size();
-
-      // not safe if our stack extends below the safety margin
-      return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
-    } else {
-      return true;
-    }
-  } else {
-    // Floating stack LinuxThreads or NPTL:
-    //   Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
-    //   there's not enough space left, pthread_create() will fail. If we come
-    //   here, that means enough space has been reserved for stack.
-    return true;
-  }
-}
-
 // Thread start routine for all newly created threads
 static void *java_start(Thread *thread) {
   // Try to randomize the cache line index of hot stack frames.
@@ -726,15 +652,6 @@
   OSThread* osthread = thread->osthread();
   Monitor* sync = osthread->startThread_lock();
 
-  // non floating stack LinuxThreads needs extra check, see above
-  if (!_thread_safety_check(thread)) {
-    // notify parent thread
-    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
-    osthread->set_state(ZOMBIE);
-    sync->notify_all();
-    return NULL;
-  }
-
   // thread_id is kernel thread id (similar to Solaris LWP id)
   osthread->set_thread_id(os::Linux::gettid());
 
@@ -833,12 +750,6 @@
   ThreadState state;
 
   {
-    // Serialize thread creation if we are running with fixed stack LinuxThreads
-    bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack();
-    if (lock) {
-      os::Linux::createThread_lock()->lock_without_safepoint_check();
-    }
-
     pthread_t tid;
     int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 
@@ -851,7 +762,6 @@
       // Need to clean up stuff we've allocated so far
       thread->set_osthread(NULL);
       delete osthread;
-      if (lock) os::Linux::createThread_lock()->unlock();
       return false;
     }
 
@@ -866,10 +776,6 @@
         sync_with_child->wait(Mutex::_no_safepoint_check_flag);
       }
     }
-
-    if (lock) {
-      os::Linux::createThread_lock()->unlock();
-    }
   }
 
   // Aborted due to thread limit being reached
@@ -1497,7 +1403,6 @@
 
 // Die immediately, no exit hook, no abort hook, no cleanup.
 void os::die() {
-  // _exit() on LinuxThreads only kills current thread
   ::abort();
 }
 
@@ -1520,24 +1425,7 @@
 
 intx os::current_thread_id() { return (intx)pthread_self(); }
 int os::current_process_id() {
-
-  // Under the old linux thread library, linux gives each thread
-  // its own process id. Because of this each thread will return
-  // a different pid if this method were to return the result
-  // of getpid(2). Linux provides no api that returns the pid
-  // of the launcher thread for the vm. This implementation
-  // returns a unique pid, the pid of the launcher thread
-  // that starts the vm 'process'.
-
-  // Under the NPTL, getpid() returns the same pid as the
-  // launcher thread rather than a unique pid per thread.
-  // Use gettid() if you want the old pre NPTL behaviour.
-
-  // if you are looking for the result of a call to getpid() that
-  // returns a unique pid for the calling thread, then look at the
-  // OSThread::thread_id() method in osThread_linux.hpp file
-
-  return (int)(_initial_pid ? _initial_pid : getpid());
+  return ::getpid();
 }
 
 // DLL functions
@@ -1623,7 +1511,8 @@
 }
 
 bool os::dll_address_to_function_name(address addr, char *buf,
-                                      int buflen, int *offset) {
+                                      int buflen, int *offset,
+                                      bool demangle) {
   // buf is not optional, but offset is optional
   assert(buf != NULL, "sanity check");
 
@@ -1632,7 +1521,7 @@
   if (dladdr((void*)addr, &dlinfo) != 0) {
     // see if we have a matching symbol
     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
-      if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
+      if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
         jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
       }
       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
@@ -1641,7 +1530,7 @@
     // no matching symbol so try for just file info
     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-                          buf, buflen, offset, dlinfo.dli_fname)) {
+                          buf, buflen, offset, dlinfo.dli_fname, demangle)) {
         return true;
       }
     }
@@ -2183,9 +2072,6 @@
   st->print("libc:");
   st->print("%s ", os::Linux::glibc_version());
   st->print("%s ", os::Linux::libpthread_version());
-  if (os::Linux::is_LinuxThreads()) {
-    st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
-  }
   st->cr();
 }
 
@@ -2215,12 +2101,52 @@
   st->cr();
 }
 
-void os::pd_print_cpu_info(outputStream* st) {
-  st->print("\n/proc/cpuinfo:\n");
-  if (!_print_ascii_file("/proc/cpuinfo", st)) {
-    st->print("  <Not Available>");
-  }
-  st->cr();
+// Print the first "model name" line and the first "flags" line
+// that we find and nothing more. We assume "model name" comes
+// before "flags" so if we find a second "model name", then the
+// "flags" field is considered missing.
+static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
+#if defined(IA32) || defined(AMD64)
+  // Other platforms have less repetitive cpuinfo files
+  FILE *fp = fopen("/proc/cpuinfo", "r");
+  if (fp) {
+    while (!feof(fp)) {
+      if (fgets(buf, buflen, fp)) {
+        // Assume model name comes before flags
+        bool model_name_printed = false;
+        if (strstr(buf, "model name") != NULL) {
+          if (!model_name_printed) {
+            st->print_raw("\nCPU Model and flags from /proc/cpuinfo:\n");
+            st->print_raw(buf);
+            model_name_printed = true;
+          } else {
+            // model name printed but not flags?  Odd, just return
+            fclose(fp);
+            return true;
+          }
+        }
+        // print the flags line too
+        if (strstr(buf, "flags") != NULL) {
+          st->print_raw(buf);
+          fclose(fp);
+          return true;
+        }
+      }
+    }
+    fclose(fp);
+  }
+#endif // x86 platforms
+  return false;
+}
+
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
+  // Only print the model name if the platform provides this as a summary
+  if (!print_model_name_and_flags(st, buf, buflen)) {
+    st->print("\n/proc/cpuinfo:\n");
+    if (!_print_ascii_file("/proc/cpuinfo", st)) {
+      st->print_cr("  <Not Available>");
+    }
+  }
 }
 
 void os::print_siginfo(outputStream* st, void* siginfo) {
@@ -3044,8 +2970,6 @@
   return os::uncommit_memory(addr, size);
 }
 
-static address _highest_vm_reserved_address = NULL;
-
 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
 // at 'requested_addr'. If there are existing memory mappings at the same
 // location, however, they will be overwritten. If 'fixed' is false,
@@ -3068,23 +2992,9 @@
   addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
                        flags, -1, 0);
 
-  if (addr != MAP_FAILED) {
-    // anon_mmap() should only get called during VM initialization,
-    // don't need lock (actually we can skip locking even it can be called
-    // from multiple threads, because _highest_vm_reserved_address is just a
-    // hint about the upper limit of non-stack memory regions.)
-    if ((address)addr + bytes > _highest_vm_reserved_address) {
-      _highest_vm_reserved_address = (address)addr + bytes;
-    }
-  }
-
   return addr == MAP_FAILED ? NULL : addr;
 }
 
-// Don't update _highest_vm_reserved_address, because there might be memory
-// regions above addr + size. If so, releasing a memory region only creates
-// a hole in the address space, it doesn't help prevent heap-stack collision.
-//
 static int anon_munmap(char * addr, size_t size) {
   return ::munmap(addr, size) == 0;
 }
@@ -3098,10 +3008,6 @@
   return anon_munmap(addr, size);
 }
 
-static address highest_vm_reserved_address() {
-  return _highest_vm_reserved_address;
-}
-
 static bool linux_mprotect(char* addr, size_t size, int prot) {
   // Linux wants the mprotect address argument to be page aligned.
   char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
@@ -3718,15 +3624,7 @@
   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
 
   // Repeatedly allocate blocks until the block is allocated at the
-  // right spot. Give up after max_tries. Note that reserve_memory() will
-  // automatically update _highest_vm_reserved_address if the call is
-  // successful. The variable tracks the highest memory address every reserved
-  // by JVM. It is used to detect heap-stack collision if running with
-  // fixed-stack LinuxThreads. Because here we may attempt to reserve more
-  // space than needed, it could confuse the collision detecting code. To
-  // solve the problem, save current _highest_vm_reserved_address and
-  // calculate the correct value before return.
-  address old_highest = _highest_vm_reserved_address;
+  // right spot.
 
   // Linux mmap allows caller to pass an address as hint; give it a try first,
   // if kernel honors the hint then we can return immediately.
@@ -3780,10 +3678,8 @@
   }
 
   if (i < max_tries) {
-    _highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
     return requested_addr;
   } else {
-    _highest_vm_reserved_address = old_highest;
     return NULL;
   }
 }
@@ -4627,16 +4523,6 @@
   char dummy;   // used to get a guess on initial stack address
 //  first_hrtime = gethrtime();
 
-  // With LinuxThreads the JavaMain thread pid (primordial thread)
-  // is different than the pid of the java launcher thread.
-  // So, on Linux, the launcher thread pid is passed to the VM
-  // via the sun.java.launcher.pid property.
-  // Use this property instead of getpid() if it was correctly passed.
-  // See bug 6351349.
-  pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
-
-  _initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
-
   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
 
   init_random(1234567);
@@ -4769,9 +4655,8 @@
 
   Linux::libpthread_init();
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
-    tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
-                  Linux::glibc_version(), Linux::libpthread_version(),
-                  Linux::is_floating_stack() ? "floating stack" : "fixed stack");
+    tty->print_cr("[HotSpot is running with %s, %s]\n",
+                  Linux::glibc_version(), Linux::libpthread_version());
   }
 
   if (UseNUMA) {
@@ -4946,22 +4831,6 @@
   return fetcher.result();
 }
 
-int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond,
-                                   pthread_mutex_t *_mutex,
-                                   const struct timespec *_abstime) {
-  if (is_NPTL()) {
-    return pthread_cond_timedwait(_cond, _mutex, _abstime);
-  } else {
-    // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
-    // word back to default 64bit precision if condvar is signaled. Java
-    // wants 53bit precision.  Save and restore current value.
-    int fpu = get_fpu_control_word();
-    int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
-    set_fpu_control_word(fpu);
-    return status;
-  }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
@@ -5585,7 +5454,7 @@
   // In that case, we should propagate the notify to another waiter.
 
   while (_Event < 0) {
-    status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
+    status = pthread_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy(_cond);
       pthread_cond_init(_cond, os::Linux::condAttr());
@@ -5813,7 +5682,7 @@
     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
   } else {
     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
-    status = os::Linux::safe_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
+    status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy(&_cond[_cur_index]);
       pthread_cond_init(&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
--- a/src/os/linux/vm/os_linux.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/linux/vm/os_linux.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,9 +27,6 @@
 
 // Linux_OS defines the interface to Linux operating systems
 
-// pthread_getattr_np comes with LinuxThreads-0.9-7 on RedHat 7.1
-typedef int (*pthread_getattr_func_type)(pthread_t, pthread_attr_t *);
-
 // Information about the protection of the page at address '0' on this os.
 static bool zero_page_read_protected() { return true; }
 
@@ -63,8 +60,6 @@
   static const char *_glibc_version;
   static const char *_libpthread_version;
 
-  static bool _is_floating_stack;
-  static bool _is_NPTL;
   static bool _supports_fast_thread_cpu_time;
 
   static GrowableArray<int>* _cpu_to_node;
@@ -90,10 +85,6 @@
 
   static bool supports_variable_stack_size();
 
-  static void set_is_NPTL()                   { _is_NPTL = true;  }
-  static void set_is_LinuxThreads()           { _is_NPTL = false; }
-  static void set_is_floating_stack()         { _is_floating_stack = true; }
-
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
 
@@ -178,14 +169,6 @@
   static const char *glibc_version()          { return _glibc_version; }
   static const char *libpthread_version()     { return _libpthread_version; }
 
-  // NPTL or LinuxThreads?
-  static bool is_LinuxThreads()               { return !_is_NPTL; }
-  static bool is_NPTL()                       { return _is_NPTL;  }
-
-  // NPTL is always floating stack. LinuxThreads could be using floating
-  // stack or fixed stack.
-  static bool is_floating_stack()             { return _is_floating_stack; }
-
   static void libpthread_init();
   static bool libnuma_init();
   static void* libnuma_dlsym(void* handle, const char* name);
@@ -234,9 +217,6 @@
 
   // none present
 
-  // LinuxThreads work-around for 6292965
-  static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
-
  private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
--- a/src/os/solaris/vm/globals_solaris.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/solaris/vm/globals_solaris.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 //
 // Defines Solaris specific flags. They are not available on other platforms.
 //
-#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
                                                                                \
   product(bool, UseExtendedFileIO, true,                                       \
           "Enable workaround for limitations of stdio FILE structure")
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1627,7 +1627,8 @@
 static dladdr1_func_type dladdr1_func = NULL;
 
 bool os::dll_address_to_function_name(address addr, char *buf,
-                                      int buflen, int * offset) {
+                                      int buflen, int * offset,
+                                      bool demangle) {
   // buf is not optional, but offset is optional
   assert(buf != NULL, "sanity check");
 
@@ -1655,7 +1656,7 @@
       if (dlinfo.dli_saddr != NULL &&
           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
         if (dlinfo.dli_sname != NULL) {
-          if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
+          if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
           }
           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
@@ -1665,7 +1666,7 @@
       // no matching symbol so try for just file info
       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-                            buf, buflen, offset, dlinfo.dli_fname)) {
+                            buf, buflen, offset, dlinfo.dli_fname, demangle)) {
           return true;
         }
       }
@@ -1679,7 +1680,7 @@
   if (dladdr((void *)addr, &dlinfo) != 0) {
     // see if we have a matching symbol
     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
-      if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
+      if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
         jio_snprintf(buf, buflen, dlinfo.dli_sname);
       }
       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
@@ -1688,7 +1689,7 @@
     // no matching symbol so try for just file info
     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-                          buf, buflen, offset, dlinfo.dli_fname)) {
+                          buf, buflen, offset, dlinfo.dli_fname, demangle)) {
         return true;
       }
     }
@@ -1996,7 +1997,7 @@
   return status;
 }
 
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
   // Nothing to do for now.
 }
 
--- a/src/os/windows/vm/decoder_windows.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/windows/vm/decoder_windows.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -162,7 +162,7 @@
      // current function and comparing the result
      address addr = (address)Decoder::demangle;
      char buf[MAX_PATH];
-     if (decode(addr, buf, sizeof(buf), NULL)) {
+     if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
        _can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
      }
   }
@@ -187,7 +187,7 @@
 }
 
 
-bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath)  {
+bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle_name)  {
   if (_pfnSymGetSymFromAddr64 != NULL) {
     PIMAGEHLP_SYMBOL64 pSymbol;
     char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
@@ -197,7 +197,7 @@
     DWORD64 displacement;
     if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
       if (buf != NULL) {
-        if (demangle(pSymbol->Name, buf, buflen)) {
+        if (!(demangle_name && demangle(pSymbol->Name, buf, buflen))) {
           jio_snprintf(buf, buflen, "%s", pSymbol->Name);
         }
       }
--- a/src/os/windows/vm/decoder_windows.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/windows/vm/decoder_windows.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 
   bool can_decode_C_frame_in_vm() const;
   bool demangle(const char* symbol, char *buf, int buflen);
-  bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
+  bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle);
   bool decode(address addr, char *buf, int buflen, int* offset, const void* base) {
     ShouldNotReachHere();
     return false;
--- a/src/os/windows/vm/globals_windows.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/windows/vm/globals_windows.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,7 @@
 //
 // Defines Windows specific flags. They are not available on other platforms.
 //
-#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd,       \
-                         diagnostic, notproduct)                         \
+#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
                                                                          \
   product(bool, UseUTCFileTimestamp, true,                               \
           "Adjust the timestamp returned from stat() to be UTC")
--- a/src/os/windows/vm/os_windows.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os/windows/vm/os_windows.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1369,11 +1369,12 @@
 }
 
 bool os::dll_address_to_function_name(address addr, char *buf,
-                                      int buflen, int *offset) {
+                                      int buflen, int *offset,
+                                      bool demangle) {
   // buf is not optional, but offset is optional
   assert(buf != NULL, "sanity check");
 
-  if (Decoder::decode(addr, buf, buflen, offset)) {
+  if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
     return true;
   }
   if (offset != NULL)  *offset  = -1;
@@ -1732,7 +1733,7 @@
   st->cr();
 }
 
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
   // Nothing to do for now.
 }
 
--- a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -10,7 +10,7 @@
  * This code is distributed in the hope that it will be useful, but WITHOUT
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file hat
+ * version 2 for more details (a copy is included in the LICENSE file that
  * accompanied this code).
  *
  * You should have received a copy of the GNU General Public License version
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -619,53 +619,14 @@
 
 #ifdef AMD64
 size_t os::Linux::min_stack_allowed  = 64 * K;
-
-// amd64: pthread on amd64 is always in floating stack mode
-bool os::Linux::supports_variable_stack_size() {  return true; }
 #else
 size_t os::Linux::min_stack_allowed  =  (48 DEBUG_ONLY(+4))*K;
+#endif // AMD64
 
-#ifdef __GNUC__
-#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
-#endif
-
-// Test if pthread library can support variable thread stack size. LinuxThreads
-// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads
-// in floating stack mode and NPTL support variable stack size.
+// Test if pthread library can support variable thread stack size.
 bool os::Linux::supports_variable_stack_size() {
-  if (os::Linux::is_NPTL()) {
-     // NPTL, yes
-     return true;
-
-  } else {
-    // Note: We can't control default stack size when creating a thread.
-    // If we use non-default stack size (pthread_attr_setstacksize), both
-    // floating stack and non-floating stack LinuxThreads will return the
-    // same value. This makes it impossible to implement this function by
-    // detecting thread stack size directly.
-    //
-    // An alternative approach is to check %gs. Fixed-stack LinuxThreads
-    // do not use %gs, so its value is 0. Floating-stack LinuxThreads use
-    // %gs (either as LDT selector or GDT selector, depending on kernel)
-    // to access thread specific data.
-    //
-    // Note that %gs is a reserved glibc register since early 2001, so
-    // applications are not allowed to change its value (Ulrich Drepper from
-    // Redhat confirmed that all known offenders have been modified to use
-    // either %fs or TSD). In the worst case scenario, when VM is embedded in
-    // a native application that plays with %gs, we might see non-zero %gs
-    // even LinuxThreads is running in fixed stack mode. As the result, we'll
-    // return true and skip _thread_safety_check(), so we may not be able to
-    // detect stack-heap collisions. But otherwise it's harmless.
-    //
-#ifdef __GNUC__
-    return (GET_GS() != 0);
-#else
-    return false;
-#endif
-  }
+  return true;
 }
-#endif // AMD64
 
 // return default stack size for thr_type
 size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
--- a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,8 +40,7 @@
 // actual memory pages are committed on demand.
 //
 // If an application creates and destroys a lot of threads, usually the
-// stack space freed by a thread will soon get reused by new thread
-// (this is especially true in NPTL or LinuxThreads in fixed-stack mode).
+// stack space freed by a thread will soon get reused by new thread.
 // No memory page in _sp_map is wasted.
 //
 // However, it's still possible that we might end up populating &
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Thu Jun 25 09:48:50 2015 -0700
@@ -363,9 +363,6 @@
 
         // Set /On option
         addAttr(rv, "Optimization", opt);
-        // Set /FR option.
-        addAttr(rv, "BrowseInformation", "true");
-        addAttr(rv, "BrowseInformationFile", "$(IntDir)");
         // Set /MD option.
         addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL");
         // Set /Oy- option
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1619,6 +1619,9 @@
   LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
   if (UseCondCardMark) {
     LIR_Opr cur_value = new_register(T_INT);
+    if (UseConcMarkSweepGC) {
+      __ membar_storeload();
+    }
     __ move(card_addr, cur_value);
 
     LabelObj* L_already_dirty = new LabelObj();
--- a/src/share/vm/c1/c1_globals.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/c1/c1_globals.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,4 +25,4 @@
 #include "precompiled.hpp"
 #include "c1/c1_globals.hpp"
 
-C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG, IGNORE_RANGE, IGNORE_CONSTRAINT)
--- a/src/share/vm/c1/c1_globals.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/c1/c1_globals.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 //
 // Defines all global flags used by the client compiler.
 //
-#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
                                                                             \
   /* Printing */                                                            \
   notproduct(bool, PrintC1Statistics, false,                                \
@@ -148,6 +148,7 @@
                                                                             \
   product(intx, ValueMapInitialSize, 11,                                    \
           "Initial size of a value map")                                    \
+          range(1, NOT_LP64(1*K) LP64_ONLY(32*K))                           \
                                                                             \
   product(intx, ValueMapMaxLoopSize, 8,                                     \
           "maximum size of a loop optimized by global value numbering")     \
@@ -191,6 +192,7 @@
                                                                             \
   develop(intx, NestedInliningSizeRatio, 90,                                \
           "Percentage of prev. allowed inline size in recursive inlining")  \
+          range(0, 100)                                                     \
                                                                             \
   notproduct(bool, PrintIRWithLIR, false,                                   \
           "Print IR instructions with generated LIR")                       \
@@ -338,10 +340,15 @@
   diagnostic(bool, C1PatchInvokeDynamic, true,                              \
              "Patch invokedynamic appendix not known at compile time")      \
                                                                             \
-
-
 // Read default values for c1 globals
 
-C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C1_FLAGS(DECLARE_DEVELOPER_FLAG, \
+         DECLARE_PD_DEVELOPER_FLAG, \
+         DECLARE_PRODUCT_FLAG, \
+         DECLARE_PD_PRODUCT_FLAG, \
+         DECLARE_DIAGNOSTIC_FLAG, \
+         DECLARE_NOTPRODUCT_FLAG, \
+         IGNORE_RANGE, \
+         IGNORE_CONSTRAINT)
 
 #endif // SHARE_VM_C1_C1_GLOBALS_HPP
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -949,8 +949,7 @@
         assert(runtime_visible_annotations != NULL, "null visible annotations");
         parse_annotations(runtime_visible_annotations,
                           runtime_visible_annotations_length,
-                          parsed_annotations,
-                          CHECK);
+                          parsed_annotations);
         cfs->skip_u1(runtime_visible_annotations_length, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
         if (runtime_invisible_annotations_exists) {
@@ -1643,7 +1642,6 @@
     index = skip_annotation(buffer, limit, index);
     break;
   default:
-    assert(false, "annotation tag");
     return limit;  //  bad tag byte
   }
   return index;
@@ -1651,8 +1649,7 @@
 
 // Sift through annotations, looking for those significant to the VM:
 void ClassFileParser::parse_annotations(u1* buffer, int limit,
-                                        ClassFileParser::AnnotationCollector* coll,
-                                        TRAPS) {
+                                        ClassFileParser::AnnotationCollector* coll) {
   // annotations := do(nann:u2) {annotation}
   int index = 0;
   if ((index += 2) >= limit)  return;  // read nann
@@ -2286,8 +2283,7 @@
         runtime_visible_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_annotations != NULL, "null visible annotations");
         parse_annotations(runtime_visible_annotations,
-            runtime_visible_annotations_length, &parsed_annotations,
-            CHECK_(nullHandle));
+            runtime_visible_annotations_length, &parsed_annotations);
         cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
       } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
         if (runtime_invisible_annotations_exists) {
@@ -2951,8 +2947,7 @@
         assert(runtime_visible_annotations != NULL, "null visible annotations");
         parse_annotations(runtime_visible_annotations,
                           runtime_visible_annotations_length,
-                          parsed_annotations,
-                          CHECK);
+                          parsed_annotations);
         cfs->skip_u1(runtime_visible_annotations_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_invisible_annotations()) {
         if (runtime_invisible_annotations_exists) {
--- a/src/share/vm/classfile/classFileParser.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/classFileParser.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -295,8 +295,7 @@
   int skip_annotation_value(u1* buffer, int limit, int index);
   void parse_annotations(u1* buffer, int limit,
                          /* Results (currently, only one result is supported): */
-                         AnnotationCollector* result,
-                         TRAPS);
+                         AnnotationCollector* result);
 
   // Final setup
   unsigned int compute_oop_map_count(instanceKlassHandle super,
--- a/src/share/vm/classfile/compactHashtable.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/compactHashtable.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/javaClasses.hpp"
 #include "memory/metaspaceShared.hpp"
+#include "prims/jvm.h"
 #include "utilities/numberSeq.hpp"
 #include <sys/stat.h>
 
@@ -32,11 +33,11 @@
 //
 // The compact hash table writer implementations
 //
-CompactHashtableWriter::CompactHashtableWriter(const char* table_name,
+CompactHashtableWriter::CompactHashtableWriter(int table_type,
                                                int num_entries,
                                                CompactHashtableStats* stats) {
   assert(DumpSharedSpaces, "dump-time only");
-  _table_name = table_name;
+  _type = table_type;
   _num_entries = num_entries;
   _num_buckets = number_of_buckets(_num_entries);
   _buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol);
@@ -99,7 +100,7 @@
                                           NumberSeq* summary) {
   int index;
   juint* compact_table = p;
-  // Find the start of the buckets, skip the compact_bucket_infos table
+  // Compute the start of the buckets, include the compact_bucket_infos table
   // and the table end offset.
   juint offset = _num_buckets + 1;
   *first_bucket = compact_table + offset;
@@ -130,10 +131,17 @@
 // Write the compact table's entries
 juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
                                             NumberSeq* summary) {
-  uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
-  uintx max_delta    = uintx(MetaspaceShared::shared_rs()->size());
-  assert(max_delta <= 0x7fffffff, "range check");
+  uintx base_address = 0;
+  uintx max_delta = 0;
   int num_compact_buckets = 0;
+  if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
+    base_address = uintx(MetaspaceShared::shared_rs()->base());
+    max_delta    = uintx(MetaspaceShared::shared_rs()->size());
+    assert(max_delta <= 0x7fffffff, "range check");
+  } else {
+    assert((_type == CompactHashtable<oop, char>::_string_table), "unknown table");
+    assert(UseCompressedOops, "UseCompressedOops is required");
+  }
 
   assert(p != NULL, "sanity");
   for (int index = 0; index < _num_buckets; index++) {
@@ -148,12 +156,16 @@
     for (Entry* tent = _buckets[index]; tent;
          tent = tent->next()) {
       if (bucket_type == REGULAR_BUCKET_TYPE) {
-        *p++ = juint(tent->hash()); // write symbol hash
+        *p++ = juint(tent->hash()); // write entry hash
       }
-      uintx deltax = uintx(tent->value()) - base_address;
-      assert(deltax < max_delta, "range check");
-      juint delta = juint(deltax);
-      *p++ = delta; // write symbol offset
+      if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
+        uintx deltax = uintx(tent->value()) - base_address;
+        assert(deltax < max_delta, "range check");
+        juint delta = juint(deltax);
+        *p++ = delta; // write entry offset
+      } else {
+        *p++ = oopDesc::encode_heap_oop(tent->string());
+      }
       count ++;
     }
     assert(count == _bucket_sizes[index], "sanity");
@@ -174,6 +186,10 @@
 
   uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
 
+  // Now write the following at the beginning of the table:
+  //      base_address (uintx)
+  //      num_entries  (juint)
+  //      num_buckets  (juint)
   *p++ = high(base_address);
   *p++ = low (base_address); // base address
   *p++ = _num_entries;  // number of entries in the table
@@ -191,7 +207,8 @@
     if (_num_entries > 0) {
       avg_cost = double(_required_bytes)/double(_num_entries);
     }
-    tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT, _table_name, (intptr_t)base_address);
+    tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
+                  table_name(), (intptr_t)base_address);
     tty->print_cr("Number of entries       : %9d", _num_entries);
     tty->print_cr("Total bytes used        : %9d", (int)((*top) - old_top));
     tty->print_cr("Average bytes per entry : %9.3f", avg_cost);
@@ -202,12 +219,24 @@
   }
 }
 
+const char* CompactHashtableWriter::table_name() {
+  switch (_type) {
+  case CompactHashtable<Symbol*, char>::_symbol_table: return "symbol";
+  case CompactHashtable<oop, char>::_string_table: return "string";
+  default:
+    ;
+  }
+  return "unknown";
+}
+
 /////////////////////////////////////////////////////////////
 //
 // The CompactHashtable implementation
 //
-template <class T, class N> const char* CompactHashtable<T, N>::init(const char* buffer) {
+template <class T, class N> const char* CompactHashtable<T, N>::init(
+                           CompactHashtableType type, const char* buffer) {
   assert(!DumpSharedSpaces, "run-time only");
+  _type = type;
   juint*p = (juint*)buffer;
   juint upper = *p++;
   juint lower = *p++;
@@ -245,8 +274,34 @@
   }
 }
 
+template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* f) {
+  assert(!DumpSharedSpaces, "run-time only");
+  assert(_type == _string_table || _bucket_count == 0, "sanity");
+  for (juint i = 0; i < _bucket_count; i ++) {
+    juint bucket_info = _buckets[i];
+    juint bucket_offset = BUCKET_OFFSET(bucket_info);
+    int   bucket_type = BUCKET_TYPE(bucket_info);
+    juint* bucket = _buckets + bucket_offset;
+    juint* bucket_end = _buckets;
+
+    narrowOop o;
+    if (bucket_type == COMPACT_BUCKET_TYPE) {
+      o = (narrowOop)bucket[0];
+      f->do_oop(&o);
+    } else {
+      bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
+      while (bucket < bucket_end) {
+        o = (narrowOop)bucket[1];
+        f->do_oop(&o);
+        bucket += 2;
+      }
+    }
+  }
+}
+
 // Explicitly instantiate these types
 template class CompactHashtable<Symbol*, char>;
+template class CompactHashtable<oop, char>;
 
 #ifndef O_BINARY       // if defined (Win32) use binary files.
 #define O_BINARY 0     // otherwise do nothing.
@@ -273,6 +328,8 @@
   _p = _base;
   _end = _base + st.st_size;
   _filename = filename;
+  _prefix_type = Unknown;
+  _line_no = 1;
 }
 
 HashtableTextDump::~HashtableTextDump() {
@@ -286,9 +343,11 @@
   vm_exit_during_initialization(err, msg);
 }
 
-void HashtableTextDump::corrupted(const char *p) {
-  char info[60];
-  sprintf(info, "corrupted at pos %d", (int)(p - _base));
+void HashtableTextDump::corrupted(const char *p, const char* msg) {
+  char info[100];
+  jio_snprintf(info, sizeof(info),
+               "%s. Corrupted at line %d (file pos %d)",
+               msg, _line_no, (int)(p - _base));
   quit(info, _filename);
 }
 
@@ -298,8 +357,9 @@
   } else if (_p[0] == '\n') {
     _p += 1;
   } else {
-    corrupted(_p);
+    corrupted(_p, "Unexpected character");
   }
+  _line_no ++;
   return true;
 }
 
@@ -328,26 +388,60 @@
   skip_newline();
 }
 
+void HashtableTextDump::scan_prefix_type() {
+  _p ++;
+  if (strncmp(_p, "SECTION: String", 15) == 0) {
+    _p += 15;
+    _prefix_type = StringPrefix;
+  } else if (strncmp(_p, "SECTION: Symbol", 15) == 0) {
+    _p += 15;
+    _prefix_type = SymbolPrefix;
+  } else {
+    _prefix_type = Unknown;
+  }
+  skip_newline();
+}
 
-int HashtableTextDump::scan_prefix() {
+int HashtableTextDump::scan_prefix(int* utf8_length) {
+  if (*_p == '@') {
+    scan_prefix_type();
+  }
+
+  switch (_prefix_type) {
+  case SymbolPrefix:
+    *utf8_length = scan_symbol_prefix(); break;
+  case StringPrefix:
+    *utf8_length = scan_string_prefix(); break;
+  default:
+    tty->print_cr("Shared input data type: Unknown.");
+    corrupted(_p, "Unknown data type");
+  }
+
+  return _prefix_type;
+}
+
+int HashtableTextDump::scan_string_prefix() {
   // Expect /[0-9]+: /
-  int utf8_length = get_num(':');
+  int utf8_length;
+  get_num(':', &utf8_length);
   if (*_p != ' ') {
-    corrupted(_p);
+    corrupted(_p, "Wrong prefix format for string");
   }
   _p++;
   return utf8_length;
 }
 
-int HashtableTextDump::scan_prefix2() {
+int HashtableTextDump::scan_symbol_prefix() {
   // Expect /[0-9]+ (-|)[0-9]+: /
-  int utf8_length = get_num(' ');
-  if (*_p == '-') {
-    _p++;
+  int utf8_length;
+  get_num(' ', &utf8_length);
+    if (*_p == '-') {
+     _p++;
   }
-  (void)get_num(':');
+  int ref_num;
+  (void)get_num(':', &ref_num);
   if (*_p != ' ') {
-    corrupted(_p);
+    corrupted(_p, "Wrong prefix format for symbol");
   }
   _p++;
   return utf8_length;
@@ -408,7 +502,7 @@
       case 'r':  *to++ = '\r'; break;
       case '\\': *to++ = '\\'; break;
       default:
-        ShouldNotReachHere();
+        corrupted(_p, "Unsupported character");
       }
     }
   }
--- a/src/share/vm/classfile/compactHashtable.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/compactHashtable.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -28,6 +28,7 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.hpp"
@@ -49,7 +50,7 @@
 // the compact table to the shared archive.
 //
 // At dump time, the CompactHashtableWriter obtains all entries from the
-// symbol table and adds them to a new temporary hash table. The hash
+// symbol/string table and adds them to a new temporary hash table. The hash
 // table size (number of buckets) is calculated using
 // '(num_entries + bucket_size - 1) / bucket_size'. The default bucket
 // size is 4 and can be changed by -XX:SharedSymbolTableBucketSize option.
@@ -57,14 +58,14 @@
 // faster lookup. It also has relatively small number of empty buckets and
 // good distribution of the entries.
 //
-// We use a simple hash function (symbol_hash % num_bucket) for the table.
+// We use a simple hash function (hash % num_bucket) for the table.
 // The new table is compacted when written out. Please see comments
 // above the CompactHashtable class for the table layout detail. The bucket
 // offsets are written to the archive as part of the compact table. The
 // bucket offset is encoded in the low 30-bit (0-29) and the bucket type
 // (regular or compact) are encoded in bit[31, 30]. For buckets with more
-// than one entry, both symbol hash and symbol offset are written to the
-// table. For buckets with only one entry, only the symbol offset is written
+// than one entry, both hash and entry offset are written to the
+// table. For buckets with only one entry, only the entry offset is written
 // to the table and the buckets are tagged as compact in their type bits.
 // Buckets without entry are skipped from the table. Their offsets are
 // still written out for faster lookup.
@@ -78,6 +79,7 @@
 
   public:
     Entry(unsigned int hash, Symbol *symbol) : _next(NULL), _hash(hash), _literal(symbol) {}
+    Entry(unsigned int hash, oop string)     : _next(NULL), _hash(hash), _literal(string) {}
 
     void *value() {
       return _literal;
@@ -85,6 +87,9 @@
     Symbol *symbol() {
       return (Symbol*)_literal;
     }
+    oop string() {
+      return (oop)_literal;
+    }
     unsigned int hash() {
       return _hash;
     }
@@ -95,7 +100,7 @@
 private:
   static int number_of_buckets(int num_entries);
 
-  const char* _table_name;
+  int _type;
   int _num_entries;
   int _num_buckets;
   juint* _bucket_sizes;
@@ -105,7 +110,7 @@
 
 public:
   // This is called at dump-time only
-  CompactHashtableWriter(const char* table_name, int num_entries, CompactHashtableStats* stats);
+  CompactHashtableWriter(int table_type, int num_entries, CompactHashtableStats* stats);
   ~CompactHashtableWriter();
 
   int get_required_bytes() {
@@ -116,6 +121,10 @@
     add(hash, new Entry(hash, symbol));
   }
 
+  void add(unsigned int hash, oop string) {
+    add(hash, new Entry(hash, string));
+  }
+
 private:
   void add(unsigned int hash, Entry* entry);
   juint* dump_table(juint* p, juint** first_bucket, NumberSeq* summary);
@@ -123,6 +132,7 @@
 
 public:
   void dump(char** top, char* end);
+  const char* table_name();
 };
 
 #define REGULAR_BUCKET_TYPE       0
@@ -136,23 +146,23 @@
 
 /////////////////////////////////////////////////////////////////////////////
 //
-// CompactHashtable is used to stored the CDS archive's symbol table. Used
+// CompactHashtable is used to stored the CDS archive's symbol/string table. Used
 // at runtime only to access the compact table from the archive.
 //
 // Because these tables are read-only (no entries can be added/deleted) at run-time
 // and tend to have large number of entries, we try to minimize the footprint
 // cost per entry.
 //
-// Layout of compact symbol table in the shared archive:
+// Layout of compact table in the shared archive:
 //
 //   uintx base_address;
-//   juint num_symbols;
+//   juint num_entries;
 //   juint num_buckets;
 //   juint bucket_infos[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
 //   juint table[]
 //
 // -----------------------------------
-// | base_address  | num_symbols     |
+// | base_address  | num_entries     |
 // |---------------------------------|
 // | num_buckets   | bucket_info0    |
 // |---------------------------------|
@@ -177,9 +187,13 @@
 // compact buckets have '01' in their highest 2-bit, and regular buckets have
 // '00' in their highest 2-bit.
 //
-// For normal buckets, each symbol's entry is 8 bytes in the table[]:
-//   juint hash;    /* symbol hash */
-//   juint offset;  /* Symbol* sym = (Symbol*)(base_address + offset) */
+// For normal buckets, each entry is 8 bytes in the table[]:
+//   juint hash;    /* symbol/string hash */
+//   union {
+//     juint offset;  /* Symbol* sym = (Symbol*)(base_address + offset) */
+//     narrowOop str; /* String narrowOop encoding */
+//   }
+//
 //
 // For compact buckets, each entry has only the 4-byte 'offset' in the table[].
 //
@@ -189,19 +203,41 @@
 //
 template <class T, class N> class CompactHashtable VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
+
+ public:
+  enum CompactHashtableType {
+    _symbol_table = 0,
+    _string_table = 1
+  };
+
+private:
+  CompactHashtableType _type;
   uintx  _base_address;
   juint  _entry_count;
   juint  _bucket_count;
   juint  _table_end_offset;
   juint* _buckets;
 
-  inline bool equals(T entry, const char* name, int len) {
-    if (entry->equals(name, len)) {
-      assert(entry->refcount() == -1, "must be shared");
-      return true;
-    } else {
-      return false;
+  inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t,
+                              juint* addr, const char* name, int len) {
+    Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
+    if (sym->equals(name, len)) {
+      assert(sym->refcount() == -1, "must be shared");
+      return sym;
     }
+
+    return NULL;
+  }
+
+  inline oop lookup_entry(CompactHashtable<oop, char>* const t,
+                        juint* addr, const char* name, int len) {
+    narrowOop obj = (narrowOop)(*addr);
+    oop string = oopDesc::decode_heap_oop(obj);
+    if (java_lang_String::equals(string, (jchar*)name, len)) {
+      return string;
+    }
+
+    return NULL;
   }
 
 public:
@@ -211,7 +247,14 @@
     _table_end_offset = 0;
     _buckets = 0;
   }
-  const char* init(const char *buffer);
+  const char* init(CompactHashtableType type, const char *buffer);
+
+  void reset() {
+    _entry_count = 0;
+    _bucket_count = 0;
+    _table_end_offset = 0;
+    _buckets = 0;
+  }
 
   // Lookup an entry from the compact table
   inline T lookup(const N* name, unsigned int hash, int len) {
@@ -225,23 +268,22 @@
       juint* bucket_end = _buckets;
 
       if (bucket_type == COMPACT_BUCKET_TYPE) {
-        // the compact bucket has one entry with symbol offset only
-        T entry = (T)((void*)(_base_address + bucket[0]));
-        if (equals(entry, name, len)) {
-          return entry;
+        // the compact bucket has one entry with entry offset only
+        T res = lookup_entry(this, &bucket[0], name, len);
+        if (res != NULL) {
+          return res;
         }
       } else {
         // This is a regular bucket, which has more than one
-        // entries. Each entry is a pair of symbol (hash, offset).
+        // entries. Each entry is a pair of entry (hash, offset).
         // Seek until the end of the bucket.
         bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
         while (bucket < bucket_end) {
           unsigned int h = (unsigned int)(bucket[0]);
           if (h == hash) {
-            juint offset = bucket[1];
-            T entry = (T)((void*)(_base_address + offset));
-            if (equals(entry, name, len)) {
-              return entry;
+            T res = lookup_entry(this, &bucket[1], name, len);
+            if (res != NULL) {
+              return res;
             }
           }
           bucket += 2;
@@ -253,12 +295,15 @@
 
   // iterate over symbols
   void symbols_do(SymbolClosure *cl);
+
+  // iterate over strings
+  void oops_do(OopClosure* f);
 };
 
 ////////////////////////////////////////////////////////////////////////
 //
 // Read/Write the contents of a hashtable textual dump (created by
-// SymbolTable::dump).
+// SymbolTable::dump and StringTable::dump).
 // Because the dump file may be big (hundred of MB in extreme cases),
 // we use mmap for fast access when reading it.
 //
@@ -269,21 +314,29 @@
   const char* _end;
   const char* _filename;
   size_t      _size;
+  int         _prefix_type;
+  int         _line_no;
 public:
   HashtableTextDump(const char* filename);
   ~HashtableTextDump();
 
+  enum {
+    SymbolPrefix = 1 << 0,
+    StringPrefix = 1 << 1,
+    Unknown = 1 << 2
+  };
+
   void quit(const char* err, const char* msg);
 
   inline int remain() {
     return (int)(_end - _p);
   }
 
-  void corrupted(const char *p);
+  void corrupted(const char *p, const char *msg);
 
   inline void corrupted_if(bool cond) {
     if (cond) {
-      corrupted(_p);
+      corrupted(_p, NULL);
     }
   }
 
@@ -292,7 +345,7 @@
   void skip_past(char c);
   void check_version(const char* ver);
 
-  inline int get_num(char delim) {
+  inline bool get_num(char delim, int *utf8_length) {
     const char* p   = _p;
     const char* end = _end;
     int num = 0;
@@ -303,18 +356,22 @@
         num = num * 10 + (c - '0');
       } else if (c == delim) {
         _p = p;
-        return num;
+        *utf8_length = num;
+        return true;
       } else {
-        corrupted(p-1);
+        // Not [0-9], not 'delim'
+        return false;
       }
     }
-    corrupted(_end);
+    corrupted(_end, "Incorrect format");
     ShouldNotReachHere();
-    return 0;
+    return false;
   }
 
-  int scan_prefix();
-  int scan_prefix2();
+  void scan_prefix_type();
+  int scan_prefix(int* utf8_length);
+  int scan_string_prefix();
+  int scan_symbol_prefix();
 
   jchar unescape(const char* from, const char* end, int count);
   void get_utf8(char* utf8_buffer, int utf8_length);
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -118,6 +118,10 @@
     return hash_offset;
   }
 
+  static void set_value_raw(oop string, typeArrayOop buffer) {
+    assert(initialized, "Must be initialized");
+    string->obj_field_put_raw(value_offset, buffer);
+  }
   static void set_value(oop string, typeArrayOop buffer) {
     assert(initialized && (value_offset > 0), "Must be initialized");
     string->obj_field_put(value_offset, (oop)buffer);
@@ -210,6 +214,7 @@
   // Debugging
   static void print(oop java_string, outputStream* st);
   friend class JavaClasses;
+  friend class StringTable;
 };
 
 
--- a/src/share/vm/classfile/stringTable.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/stringTable.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -38,6 +38,7 @@
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #endif
@@ -87,19 +88,28 @@
 
 // --------------------------------------------------------------------------
 StringTable* StringTable::_the_table = NULL;
-
+bool StringTable::_ignore_shared_strings = false;
 bool StringTable::_needs_rehashing = false;
 
 volatile int StringTable::_parallel_claimed_idx = 0;
 
+CompactHashtable<oop, char> StringTable::_shared_table;
+
 // Pick hashing algorithm
 unsigned int StringTable::hash_string(const jchar* s, int len) {
   return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
                                     java_lang_String::hash_code(s, len);
 }
 
-oop StringTable::lookup(int index, jchar* name,
-                        int len, unsigned int hash) {
+oop StringTable::lookup_shared(jchar* name, int len) {
+  // java_lang_String::hash_code() was used to compute hash values in the shared table. Don't
+  // use the hash value from StringTable::hash_string() as it might use alternate hashcode.
+  return _shared_table.lookup((const char*)name,
+                              java_lang_String::hash_code(name, len), len);
+}
+
+oop StringTable::lookup_in_main_table(int index, jchar* name,
+                                int len, unsigned int hash) {
   int count = 0;
   for (HashtableEntry<oop, mtSymbol>* l = bucket(index); l != NULL; l = l->next()) {
     count++;
@@ -140,7 +150,8 @@
   // Since look-up was done lock-free, we need to check if another
   // thread beat us in the race to insert the symbol.
 
-  oop test = lookup(index, name, len, hashValue); // calls lookup(u1*, int)
+  // No need to lookup the shared table from here since the caller (intern()) already did
+  oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int)
   if (test != NULL) {
     // Entry already added
     return test;
@@ -172,9 +183,14 @@
 }
 
 oop StringTable::lookup(jchar* name, int len) {
+  oop string = lookup_shared(name, len);
+  if (string != NULL) {
+    return string;
+  }
+
   unsigned int hash = hash_string(name, len);
   int index = the_table()->hash_to_index(hash);
-  oop string = the_table()->lookup(index, name, len, hash);
+  string = the_table()->lookup_in_main_table(index, name, len, hash);
 
   ensure_string_alive(string);
 
@@ -184,9 +200,14 @@
 
 oop StringTable::intern(Handle string_or_null, jchar* name,
                         int len, TRAPS) {
+  oop found_string = lookup_shared(name, len);
+  if (found_string != NULL) {
+    return found_string;
+  }
+
   unsigned int hashValue = hash_string(name, len);
   int index = the_table()->hash_to_index(hashValue);
-  oop found_string = the_table()->lookup(index, name, len, hashValue);
+  found_string = the_table()->lookup_in_main_table(index, name, len, hashValue);
 
   // Found
   if (found_string != NULL) {
@@ -611,3 +632,131 @@
     return 0;
   }
 }
+
+// Sharing
+bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
+                                     CompactHashtableWriter* ch_table) {
+#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
+  assert(UseG1GC, "Only support G1 GC");
+  assert(UseCompressedOops && UseCompressedClassPointers,
+         "Only support UseCompressedOops and UseCompressedClassPointers enabled");
+
+  Thread* THREAD = Thread::current();
+  G1CollectedHeap::heap()->begin_archive_alloc_range();
+  for (int i = 0; i < the_table()->table_size(); ++i) {
+    HashtableEntry<oop, mtSymbol>* bucket = the_table()->bucket(i);
+    for ( ; bucket != NULL; bucket = bucket->next()) {
+      oop s = bucket->literal();
+      unsigned int hash = java_lang_String::hash_code(s);
+      if (hash == 0) {
+        continue;
+      }
+
+      // allocate the new 'value' array first
+      typeArrayOop v = java_lang_String::value(s);
+      int v_len = v->size();
+      typeArrayOop new_v;
+      if (G1CollectedHeap::heap()->is_archive_alloc_too_large(v_len)) {
+        continue; // skip the current String. The 'value' array is too large to handle
+      } else {
+        new_v = (typeArrayOop)G1CollectedHeap::heap()->archive_mem_allocate(v_len);
+        if (new_v == NULL) {
+          return false; // allocation failed
+        }
+      }
+      // now allocate the new String object
+      int s_len = s->size();
+      oop new_s = (oop)G1CollectedHeap::heap()->archive_mem_allocate(s_len);
+      if (new_s == NULL) {
+        return false;
+      }
+
+      s->identity_hash();
+      v->identity_hash();
+
+      // copy the objects' data
+      Copy::aligned_disjoint_words((HeapWord*)s, (HeapWord*)new_s, s_len);
+      Copy::aligned_disjoint_words((HeapWord*)v, (HeapWord*)new_v, v_len);
+
+      // adjust the pointer to the 'value' field in the new String oop. Also pre-compute and set the
+      // 'hash' field. That avoids "write" to the shared strings at runtime by the deduplication process.
+      java_lang_String::set_value_raw(new_s, new_v);
+      if (java_lang_String::hash(new_s) == 0) {
+        java_lang_String::set_hash(new_s, hash);
+      }
+
+      // add to the compact table
+      ch_table->add(hash, new_s);
+    }
+  }
+
+  G1CollectedHeap::heap()->end_archive_alloc_range(string_space, os::vm_allocation_granularity());
+  assert(string_space->length() <= 2, "sanity");
+#endif
+  return true;
+}
+
+bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemRegion> *string_space,
+                                     size_t* space_size) {
+#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
+  if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
+    if (PrintSharedSpaces) {
+      tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
+                    "UseCompressedOops and UseCompressedClassPointers are required.");
+    }
+    return true;
+  }
+
+  CompactHashtableWriter ch_table(CompactHashtable<oop, char>::_string_table,
+                                  the_table()->number_of_entries(),
+                                  &MetaspaceShared::stats()->string);
+
+  // Copy the interned strings into the "string space" within the java heap
+  if (!copy_shared_string(string_space, &ch_table)) {
+    return false;
+  }
+
+  for (int i = 0; i < string_space->length(); i++) {
+    *space_size += string_space->at(i).byte_size();
+  }
+
+  // Now dump the compact table
+  if (*top + ch_table.get_required_bytes() > end) {
+    // not enough space left
+    return false;
+  }
+  ch_table.dump(top, end);
+  *top = (char*)align_pointer_up(*top, sizeof(void*));
+
+#endif
+  return true;
+}
+
+void StringTable::shared_oops_do(OopClosure* f) {
+#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
+  _shared_table.oops_do(f);
+#endif
+}
+
+const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
+#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
+  if (mapinfo->space_capacity(MetaspaceShared::first_string) == 0) {
+    // no shared string data
+    return buffer;
+  }
+
+  // initialize the shared table
+  juint *p = (juint*)buffer;
+  const char* end = _shared_table.init(
+          CompactHashtable<oop, char>::_string_table, (char*)p);
+  const char* aligned_end = (const char*)align_pointer_up(end, sizeof(void*));
+
+  if (_ignore_shared_strings) {
+    _shared_table.reset();
+  }
+
+  return aligned_end;
+#endif
+
+  return buffer;
+}
--- a/src/share/vm/classfile/stringTable.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/stringTable.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,10 @@
 #include "memory/allocation.inline.hpp"
 #include "utilities/hashtable.hpp"
 
+template <class T, class N> class CompactHashtable;
+class CompactHashtableWriter;
+class FileMapInfo;
+
 class StringTable : public RehashableHashtable<oop, mtSymbol> {
   friend class VMStructs;
   friend class Symbol;
@@ -36,6 +40,10 @@
   // The string table
   static StringTable* _the_table;
 
+  // Shared string table
+  static CompactHashtable<oop, char> _shared_table;
+  static bool _ignore_shared_strings;
+
   // Set if one bucket is out of balance due to hash algorithm deficiency
   static bool _needs_rehashing;
 
@@ -46,7 +54,8 @@
   oop basic_add(int index, Handle string_or_null, jchar* name, int len,
                 unsigned int hashValue, TRAPS);
 
-  oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
+  oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue);
+  static oop lookup_shared(jchar* name, int len);
 
   // Apply the give oop closure to the entries to the buckets
   // in the range [start_idx, end_idx).
@@ -141,12 +150,14 @@
   static int verify_and_compare_entries();
 
   // Sharing
-  static void copy_buckets(char** top, char*end) {
-    the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
-  }
-  static void copy_table(char** top, char*end) {
-    the_table()->Hashtable<oop, mtSymbol>::copy_table(top, end);
-  }
+  static void ignore_shared_strings(bool v) { _ignore_shared_strings = v; }
+  static bool shared_string_ignored()       { return _ignore_shared_strings; }
+  static void shared_oops_do(OopClosure* f);
+  static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
+                                 CompactHashtableWriter* ch_table);
+  static bool copy_compact_table(char** top, char* end, GrowableArray<MemRegion> *string_space,
+                                 size_t* space_size);
+  static const char* init_shared_table(FileMapInfo *mapinfo, char* buffer);
   static void reverse() {
     the_table()->Hashtable<oop, mtSymbol>::reverse();
   }
--- a/src/share/vm/classfile/symbolTable.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/classfile/symbolTable.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -539,7 +539,8 @@
 
 bool SymbolTable::copy_compact_table(char** top, char*end) {
 #if INCLUDE_CDS
-  CompactHashtableWriter ch_table("symbol", the_table()->number_of_entries(),
+  CompactHashtableWriter ch_table(CompactHashtable<Symbol*, char>::_symbol_table,
+                                  the_table()->number_of_entries(),
                                   &MetaspaceShared::stats()->symbol);
   if (*top + ch_table.get_required_bytes() > end) {
     // not enough space left
@@ -556,7 +557,6 @@
     }
   }
 
-  char* old_top = *top;
   ch_table.dump(top, end);
 
   *top = (char*)align_pointer_up(*top, sizeof(void*));
@@ -565,7 +565,8 @@
 }
 
 const char* SymbolTable::init_shared_table(const char* buffer) {
-  const char* end = _shared_table.init(buffer);
+  const char* end = _shared_table.init(
+          CompactHashtable<Symbol*, char>::_symbol_table, buffer);
   return (const char*)align_pointer_up(end, sizeof(void*));
 }
 
--- a/src/share/vm/code/codeCache.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/code/codeCache.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -190,7 +190,12 @@
   static void set_needs_cache_clean(bool v)           { _needs_cache_clean = v;    }
   static void clear_inline_caches();                  // clear all inline caches
 
-  // Returns the CodeBlobType for nmethods of the given compilation level
+  // Returns the CodeBlobType for the given nmethod
+  static int get_code_blob_type(nmethod* nm) {
+    return get_code_heap(nm)->code_blob_type();
+  }
+
+  // Returns the CodeBlobType for the given compilation level
   static int get_code_blob_type(int comp_level) {
     if (comp_level == CompLevel_none ||
         comp_level == CompLevel_simple ||
@@ -287,7 +292,7 @@
       // Iterate over all CodeBlobs
       _code_blob_type = CodeBlobType::All;
     } else if (nm != NULL) {
-      _code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
+      _code_blob_type = CodeCache::get_code_blob_type(nm);
     } else {
       // Only iterate over method code heaps, starting with non-profiled
       _code_blob_type = CodeBlobType::MethodNonProfiled;
--- a/src/share/vm/code/nmethod.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/code/nmethod.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1421,7 +1421,7 @@
   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
   if (PrintMethodFlushing) {
     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
-        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
   }
 
   // We need to deallocate any ExceptionCache data.
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -190,10 +190,10 @@
 };
 
 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
-     ReservedSpace rs, size_t initial_byte_size, int level,
+     ReservedSpace rs, size_t initial_byte_size,
      CardTableRS* ct, bool use_adaptive_freelists,
      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
-  CardGeneration(rs, initial_byte_size, level, ct),
+  CardGeneration(rs, initial_byte_size, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   _did_compact(false)
 {
@@ -285,9 +285,9 @@
     _ref_processor =
       new ReferenceProcessor(_span,                               // span
                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
-                             (int) ParallelGCThreads,             // mt processing degree
+                             ParallelGCThreads,                   // mt processing degree
                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
-                             (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
+                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
                              &_is_alive_closure);                 // closure for liveness info
     // Initialize the _ref_processor field of CMSGen
@@ -562,7 +562,7 @@
   // are not shared with parallel scavenge (ParNew).
   {
     uint i;
-    uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
+    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
 
     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
          || ParallelRefProcEnabled)
@@ -682,12 +682,17 @@
 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (PrintGCDetails) {
+    // I didn't want to change the logging when removing the level concept,
+    // but I guess this logging could say "old" or something instead of "1".
+    assert(gch->is_old_gen(this),
+           "The CMS generation should be the old generation");
+    uint level = 1;
     if (Verbose) {
-      gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
-        level(), short_name(), s, used(), capacity());
+      gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
+        level, short_name(), s, used(), capacity());
     } else {
-      gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
-        level(), short_name(), s, used() / K, capacity() / K);
+      gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
+        level, short_name(), s, used() / K, capacity() / K);
     }
   }
   if (Verbose) {
@@ -797,27 +802,22 @@
       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
       gclog_or_tty->print_cr("  Desired free fraction %f",
-        desired_free_percentage);
+              desired_free_percentage);
       gclog_or_tty->print_cr("  Maximum free fraction %f",
-        maximum_free_percentage);
+              maximum_free_percentage);
       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
-        desired_capacity/1000);
-      int prev_level = level() - 1;
-      if (prev_level >= 0) {
-        size_t prev_size = 0;
-        GenCollectedHeap* gch = GenCollectedHeap::heap();
-        Generation* prev_gen = gch->young_gen();
-        prev_size = prev_gen->capacity();
-          gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
-                                 prev_size/1000);
-      }
+              desired_capacity/1000);
+      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
+      size_t young_size = gch->young_gen()->capacity();
+      gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
-        unsafe_max_alloc_nogc()/1000);
+              unsafe_max_alloc_nogc()/1000);
       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
-        contiguous_available()/1000);
+              contiguous_available()/1000);
       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
-        expand_bytes);
+              expand_bytes);
     }
     // safe if expansion fails
     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
@@ -1650,8 +1650,7 @@
                                             _intra_sweep_estimate.padded_average());
   }
 
-  GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
-    ref_processor(), clear_all_soft_refs);
+  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
   #ifdef ASSERT
     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
     size_t free_size = cms_space->free();
@@ -2432,7 +2431,7 @@
     StrongRootsScope srs(1);
 
     gch->gen_process_roots(&srs,
-                           _cmsGen->level(),
+                           GenCollectedHeap::OldGen,
                            true,   // younger gens are roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2504,7 +2503,7 @@
     StrongRootsScope srs(1);
 
     gch->gen_process_roots(&srs,
-                           _cmsGen->level(),
+                           GenCollectedHeap::OldGen,
                            true,   // younger gens are roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -3031,7 +3030,7 @@
       StrongRootsScope srs(1);
 
       gch->gen_process_roots(&srs,
-                             _cmsGen->level(),
+                             GenCollectedHeap::OldGen,
                              true,   // younger gens are roots
                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
                              should_unload_classes(),
@@ -4282,15 +4281,12 @@
       FlagSetting fl(gch->_is_gc_active, false);
       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
-      int level = _cmsGen->level() - 1;
-      if (level >= 0) {
-        gch->do_collection(true,        // full (i.e. force, see below)
-                           false,       // !clear_all_soft_refs
-                           0,           // size
-                           false,       // is_tlab
-                           level        // max_level
-                          );
-      }
+      gch->do_collection(true,                      // full (i.e. force, see below)
+                         false,                     // !clear_all_soft_refs
+                         0,                         // size
+                         false,                     // is_tlab
+                         GenCollectedHeap::YoungGen // type
+        );
     }
     FreelistLocker x(this);
     MutexLockerEx y(bitMapLock(),
@@ -4464,7 +4460,7 @@
   CLDToOopClosure cld_closure(&par_mri_cl, true);
 
   gch->gen_process_roots(_strong_roots_scope,
-                         _collector->_cmsGen->level(),
+                         GenCollectedHeap::OldGen,
                          false,     // yg was scanned above
                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                          _collector->should_unload_classes(),
@@ -4603,7 +4599,7 @@
   _timer.reset();
   _timer.start();
   gch->gen_process_roots(_strong_roots_scope,
-                         _collector->_cmsGen->level(),
+                         GenCollectedHeap::OldGen,
                          false,     // yg was scanned above
                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                          _collector->should_unload_classes(),
@@ -5184,7 +5180,7 @@
     StrongRootsScope srs(1);
 
     gch->gen_process_roots(&srs,
-                           _cmsGen->level(),
+                           GenCollectedHeap::OldGen,
                            true,  // younger gens as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -5322,8 +5318,8 @@
    _bit_map(bit_map),
    _work_queue(work_queue),
    _mark_and_push(collector, span, bit_map, work_queue),
-   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
-                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
+   _low_water_mark(MIN2((work_queue->max_elems()/4),
+                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
 { }
 
 // . see if we can share work_queues with ParNew? XXX
@@ -5648,11 +5644,12 @@
   return _cmsSpace->find_chunk_at_end();
 }
 
-void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
+void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
                                                     bool full) {
-  // The next lower level has been collected.  Gather any statistics
+  // If the young generation has been collected, gather any statistics
   // that are of interest at this point.
-  if (!full && (current_level + 1) == level()) {
+  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+  if (!full && current_is_young) {
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
   }
@@ -6251,8 +6248,8 @@
   _span(span),
   _bit_map(bit_map),
   _work_queue(work_queue),
-  _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
-                       (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
+  _low_water_mark(MIN2((work_queue->max_elems()/4),
+                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
 {
   _ref_processor = rp;
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1063,7 +1063,7 @@
   void shrink_free_list_by(size_t bytes);
 
   // Update statistics for GC
-  virtual void update_gc_stats(int level, bool full);
+  virtual void update_gc_stats(Generation* current_generation, bool full);
 
   // Maximum available space in the generation (including uncommitted)
   // space.
@@ -1079,7 +1079,7 @@
 
  public:
   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
-                                int level, CardTableRS* ct,
+                                CardTableRS* ct,
                                 bool use_adaptive_freelists,
                                 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
 
--- a/src/share/vm/gc/cms/parCardTableModRefBS.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/parCardTableModRefBS.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -42,7 +42,7 @@
                                                              uint n_threads) {
   assert(n_threads > 0, "expected n_threads > 0");
   assert(n_threads <= ParallelGCThreads,
-         err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads));
+         err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));
 
   // Make sure the LNC array is valid for the space.
   jbyte**   lowest_non_clean;
--- a/src/share/vm/gc/cms/parNewGeneration.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/parNewGeneration.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -62,25 +62,25 @@
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif
 ParScanThreadState::ParScanThreadState(Space* to_space_,
-                                       ParNewGeneration* gen_,
+                                       ParNewGeneration* young_gen_,
                                        Generation* old_gen_,
                                        int thread_num_,
                                        ObjToScanQueueSet* work_queue_set_,
                                        Stack<oop, mtGC>* overflow_stacks_,
                                        size_t desired_plab_sz_,
                                        ParallelTaskTerminator& term_) :
-  _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
+  _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
   _ageTable(false), // false ==> not the global age table, no perf data.
   _to_space_alloc_buffer(desired_plab_sz_),
-  _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
-  _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
-  _older_gen_closure(gen_, this),
+  _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
+  _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
+  _older_gen_closure(young_gen_, this),
   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
-                      &_to_space_root_closure, gen_, &_old_gen_root_closure,
+                      &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
                       work_queue_set_, &term_),
-  _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
+  _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
   _keep_alive_closure(&_scan_weak_ref_closure),
   _strong_roots_time(0.0), _term_time(0.0)
 {
@@ -481,7 +481,6 @@
                                ParScanThreadState* par_scan_state) :
   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
 {
-  assert(_g->level() == 0, "Optimized for youngest generation");
   _boundary = _g->reserved().end();
 }
 
@@ -566,11 +565,11 @@
   par_scan_state()->end_term_time();
 }
 
-ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
+ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
                              StrongRootsScope* strong_roots_scope) :
     AbstractGangTask("ParNewGeneration collection"),
-    _gen(gen), _old_gen(old_gen),
+    _young_gen(young_gen), _old_gen(old_gen),
     _young_old_boundary(young_old_boundary),
     _state_set(state_set),
     _strong_roots_scope(strong_roots_scope)
@@ -596,7 +595,7 @@
 
   par_scan_state.start_strong_roots();
   gch->gen_process_roots(_strong_roots_scope,
-                         _gen->level(),
+                         GenCollectedHeap::YoungGen,
                          true,  // Process younger gens, if any,
                                 // as strong roots.
                          GenCollectedHeap::SO_ScavengeCodeCache,
@@ -616,8 +615,8 @@
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif
 ParNewGeneration::
-ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
-  : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
+ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
+  : DefNewGeneration(rs, initial_byte_size, "PCopy"),
   _overflow_list(NULL),
   _is_alive_closure(this),
   _plab_stats(YoungPLABSize, PLABWeight)
@@ -752,7 +751,7 @@
 private:
   virtual void work(uint worker_id);
 private:
-  ParNewGeneration&      _gen;
+  ParNewGeneration&      _young_gen;
   ProcessTask&           _task;
   Generation&            _old_gen;
   HeapWord*              _young_old_boundary;
@@ -760,12 +759,12 @@
 };
 
 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
-                                               ParNewGeneration& gen,
+                                               ParNewGeneration& young_gen,
                                                Generation& old_gen,
                                                HeapWord* young_old_boundary,
                                                ParScanThreadStateSet& state_set)
   : AbstractGangTask("ParNewGeneration parallel reference processing"),
-    _gen(gen),
+    _young_gen(young_gen),
     _task(task),
     _old_gen(old_gen),
     _young_old_boundary(young_old_boundary),
@@ -806,12 +805,12 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   FlexibleWorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
-  _state_set.reset(workers->active_workers(), _generation.promotion_failed());
-  ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
-                                 _generation.reserved().end(), _state_set);
+  _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
+  ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
+                                 _young_gen.reserved().end(), _state_set);
   workers->run_task(&rp_task);
   _state_set.reset(0 /* bad value in debug if not reset */,
-                   _generation.promotion_failed());
+                   _young_gen.promotion_failed());
 }
 
 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
@@ -835,10 +834,10 @@
   ScanClosure(g, gc_barrier) {}
 
 EvacuateFollowersClosureGeneral::
-EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
                                 OopsInGenClosure* cur,
                                 OopsInGenClosure* older) :
-  _gch(gch), _level(level),
+  _gch(gch),
   _scan_cur_or_nonheap(cur), _scan_older(older)
 {}
 
@@ -846,10 +845,10 @@
   do {
     // Beware: this call will lead to closure applications via virtual
     // calls.
-    _gch->oop_since_save_marks_iterate(_level,
+    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
                                        _scan_cur_or_nonheap,
                                        _scan_older);
-  } while (!_gch->no_allocs_since_save_marks(_level));
+  } while (!_gch->no_allocs_since_save_marks(true /* include_young */));
 }
 
 
@@ -972,14 +971,14 @@
   ScanClosure               scan_without_gc_barrier(this, false);
   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
-  EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
+  EvacuateFollowersClosureGeneral evacuate_followers(gch,
     &scan_without_gc_barrier, &scan_with_gc_barrier);
   rp->setup_policy(clear_all_soft_refs);
   // Can  the mt_degree be set later (at run_task() time would be best)?
   rp->set_active_mt_degree(active_workers);
   ReferenceProcessorStats stats;
   if (rp->processing_is_mt()) {
-    ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
                                               &evacuate_followers, &task_executor,
                                               _gc_timer, _gc_tracer.gc_id());
@@ -1045,7 +1044,7 @@
 
   rp->set_enqueuing_is_done(true);
   if (rp->processing_is_mt()) {
-    ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
     rp->enqueue_discovered_references(&task_executor);
   } else {
     rp->enqueue_discovered_references(NULL);
@@ -1349,7 +1348,7 @@
   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   // Trim off a prefix of at most objsFromOverflow items
   Thread* tid = Thread::current();
-  size_t spin_count = (size_t)ParallelGCThreads;
+  size_t spin_count = ParallelGCThreads;
   size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
   for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
     // someone grabbed it before we did ...
@@ -1466,9 +1465,9 @@
     _ref_processor =
       new ReferenceProcessor(_reserved,                  // span
                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                             (uint) ParallelGCThreads,   // mt processing degree
+                             ParallelGCThreads,          // mt processing degree
                              refs_discovery_is_mt(),     // mt discovery
-                             (uint) ParallelGCThreads,   // mt discovery degree
+                             ParallelGCThreads,          // mt discovery degree
                              refs_discovery_is_atomic(), // atomic_discovery
                              NULL);                      // is_alive_non_header
   }
--- a/src/share/vm/gc/cms/parNewGeneration.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/parNewGeneration.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -234,14 +234,14 @@
 
 class ParNewGenTask: public AbstractGangTask {
  private:
-  ParNewGeneration*            _gen;
+  ParNewGeneration*            _young_gen;
   Generation*                  _old_gen;
   HeapWord*                    _young_old_boundary;
   class ParScanThreadStateSet* _state_set;
   StrongRootsScope*            _strong_roots_scope;
 
 public:
-  ParNewGenTask(ParNewGeneration*      gen,
+  ParNewGenTask(ParNewGeneration*      young_gen,
                 Generation*            old_gen,
                 HeapWord*              young_old_boundary,
                 ParScanThreadStateSet* state_set,
@@ -264,11 +264,10 @@
 class EvacuateFollowersClosureGeneral: public VoidClosure {
  private:
   GenCollectedHeap* _gch;
-  int               _level;
   OopsInGenClosure* _scan_cur_or_nonheap;
   OopsInGenClosure* _scan_older;
  public:
-  EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+  EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
                                   OopsInGenClosure* cur,
                                   OopsInGenClosure* older);
   virtual void do_void();
@@ -288,12 +287,14 @@
 // Implements AbstractRefProcTaskExecutor for ParNew.
 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  private:
-  ParNewGeneration&      _generation;
+  ParNewGeneration&      _young_gen;
+  Generation&            _old_gen;
   ParScanThreadStateSet& _state_set;
  public:
-  ParNewRefProcTaskExecutor(ParNewGeneration& generation,
+  ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
+                            Generation& old_gen,
                             ParScanThreadStateSet& state_set)
-    : _generation(generation), _state_set(state_set)
+    : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
   { }
 
   // Executes a task using worker threads.
@@ -353,7 +354,7 @@
   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 
  public:
-  ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
+  ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
 
   ~ParNewGeneration() {
     for (uint i = 0; i < ParallelGCThreads; i++)
--- a/src/share/vm/gc/cms/parOopClosures.inline.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/parOopClosures.inline.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -72,7 +72,7 @@
                                         bool root_scan) {
   assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
           generation()->is_in_reserved(p))
-         && (generation()->level() == 0 || gc_barrier),
+         && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
   T heap_oop = oopDesc::load_heap_oop(p);
--- a/src/share/vm/gc/cms/vmCMSOperations.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/cms/vmCMSOperations.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -198,8 +198,7 @@
     assert(SafepointSynchronize::is_at_safepoint(),
       "We can only be executing this arm of if at a safepoint");
     GCCauseSetter gccs(gch, _gc_cause);
-    gch->do_full_collection(gch->must_clear_all_soft_refs(),
-                            0 /* collect only youngest gen */);
+    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
   } // Else no need for a foreground young gc
   assert((_gc_count_before < gch->total_collections()) ||
          (GC_locker::is_active() /* gc may have been skipped */
--- a/src/share/vm/gc/g1/collectionSetChooser.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/collectionSetChooser.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -107,7 +107,8 @@
     HeapRegion *curr = regions_at(index++);
     guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
     guarantee(!curr->is_young(), "should not be young!");
-    guarantee(!curr->is_humongous(), "should not be humongous!");
+    guarantee(!curr->is_pinned(),
+              err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
     if (prev != NULL) {
       guarantee(order_regions(prev, curr) != 1,
                 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
@@ -149,8 +150,8 @@
 
 
 void CollectionSetChooser::add_region(HeapRegion* hr) {
-  assert(!hr->is_humongous(),
-         "Humongous regions shouldn't be added to the collection set");
+  assert(!hr->is_pinned(),
+         err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
   assert(!hr->is_young(), "should not be young!");
   _regions.append(hr);
   _length++;
--- a/src/share/vm/gc/g1/collectionSetChooser.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/collectionSetChooser.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -103,13 +103,12 @@
   void sort_regions();
 
   // Determine whether to add the given region to the CSet chooser or
-  // not. Currently, we skip humongous regions (we never add them to
-  // the CSet, we only reclaim them during cleanup) and regions whose
-  // live bytes are over the threshold.
+  // not. Currently, we skip pinned regions and regions whose live
+  // bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
   bool should_add(HeapRegion* hr) {
     assert(hr->is_marked(), "pre-condition");
     assert(!hr->is_young(), "should never consider young regions");
-    return !hr->is_humongous() &&
+    return !hr->is_pinned() &&
             hr->live_bytes() < _region_live_threshold_bytes;
   }
 
--- a/src/share/vm/gc/g1/concurrentMark.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/concurrentMark.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -30,6 +30,7 @@
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
@@ -177,7 +178,7 @@
       // will have them as guarantees at the beginning / end of the bitmap
       // clearing to get some checking in the product.
       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
-      assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
+      assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
     }
 
     return false;
@@ -518,7 +519,7 @@
   _markStack(this),
   // _finger set in set_non_marking_state
 
-  _max_worker_id((uint)ParallelGCThreads),
+  _max_worker_id(ParallelGCThreads),
   // _active_tasks set in set_non_marking_state
   // _tasks set inside the constructor
   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
@@ -579,8 +580,8 @@
   _root_regions.init(_g1h, this);
 
   if (ConcGCThreads > ParallelGCThreads) {
-    warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
-            "than ParallelGCThreads (" UINTX_FORMAT ").",
+    warning("Can't have more ConcGCThreads (%u) "
+            "than ParallelGCThreads (%u).",
             ConcGCThreads, ParallelGCThreads);
     return;
   }
@@ -604,20 +605,20 @@
     double sleep_factor =
                        (1.0 - marking_task_overhead) / marking_task_overhead;
 
-    FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
+    FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
     _sleep_factor             = sleep_factor;
     _marking_task_overhead    = marking_task_overhead;
   } else {
     // Calculate the number of parallel marking threads by scaling
     // the number of parallel GC threads.
-    uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
-    FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
+    uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
+    FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
     _sleep_factor             = 0.0;
     _marking_task_overhead    = 1.0;
   }
 
   assert(ConcGCThreads > 0, "Should have been set");
-  _parallel_marking_threads = (uint) ConcGCThreads;
+  _parallel_marking_threads = ConcGCThreads;
   _max_parallel_marking_threads = _parallel_marking_threads;
 
   if (parallel_marking_threads() > 1) {
@@ -830,7 +831,7 @@
   // marking bitmap and getting it ready for the next cycle. During
   // this time no other cycle can start. So, let's make sure that this
   // is the case.
-  guarantee(!g1h->mark_in_progress(), "invariant");
+  guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 
   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
@@ -844,7 +845,7 @@
 
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
-  guarantee(!g1h->mark_in_progress(), "invariant");
+  guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 }
 
 class CheckBitmapClearHRClosure : public HeapRegionClosure {
@@ -1178,6 +1179,8 @@
 };
 
 void ConcurrentMark::scanRootRegions() {
+  double scan_start = os::elapsedTime();
+
   // Start of concurrent marking.
   ClassLoaderDataGraph::clear_claimed_marks();
 
@@ -1185,6 +1188,11 @@
   // at least one root region to scan. So, if it's false, we
   // should not attempt to do any further work.
   if (root_regions()->scan_in_progress()) {
+    if (G1Log::fine()) {
+      gclog_or_tty->gclog_stamp(concurrent_gc_id());
+      gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
+    }
+
     _parallel_marking_threads = calc_parallel_marking_threads();
     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
            "Maximum number of marking threads exceeded");
@@ -1194,6 +1202,11 @@
     _parallel_workers->set_active_workers(active_workers);
     _parallel_workers->run_task(&task);
 
+    if (G1Log::fine()) {
+      gclog_or_tty->gclog_stamp(concurrent_gc_id());
+      gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
+    }
+
     // It's possible that has_aborted() is true here without actually
     // aborting the survivor scan earlier. This is OK as it's
     // mainly used for sanity checking.
@@ -1254,7 +1267,7 @@
 
   // If a full collection has happened, we shouldn't do this.
   if (has_aborted()) {
-    g1h->set_marking_complete(); // So bitmap clearing isn't confused
+    g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
     return;
   }
 
@@ -1783,7 +1796,7 @@
   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
 
   bool doHeapRegion(HeapRegion *hr) {
-    if (hr->is_continues_humongous()) {
+    if (hr->is_continues_humongous() || hr->is_archive()) {
       return false;
     }
     // We use a claim value of zero here because all regions
@@ -1888,7 +1901,7 @@
 
   // If a full collection has happened, we shouldn't do this.
   if (has_aborted()) {
-    g1h->set_marking_complete(); // So bitmap clearing isn't confused
+    g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
     return;
   }
 
@@ -1934,7 +1947,7 @@
   }
 
   size_t start_used_bytes = g1h->used();
-  g1h->set_marking_complete();
+  g1h->collector_state()->set_mark_in_progress(false);
 
   double count_end = os::elapsedTime();
   double this_final_counting_time = (count_end - start);
@@ -2756,7 +2769,7 @@
 
 void ConcurrentMark::verify_no_cset_oops() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
-  if (!G1CollectedHeap::heap()->mark_in_progress()) {
+  if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
     return;
   }
 
@@ -2992,6 +3005,11 @@
 
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
+  if (!cmThread()->during_cycle() || _has_aborted) {
+    // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
+    return;
+  }
+
   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
   // concurrent bitmap clearing.
   _nextMarkBitMap->clearAll();
@@ -3009,12 +3027,8 @@
   }
   _first_overflow_barrier_sync.abort();
   _second_overflow_barrier_sync.abort();
-  const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
-  if (!gc_id.is_undefined()) {
-    // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
-    // to detect that it was aborted. Only keep track of the first GC id that we aborted.
-    _aborted_gc_id = gc_id;
-   }
+  _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
+  assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
   _has_aborted = true;
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
--- a/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -78,7 +78,19 @@
   }
 };
 
-
+// We want to avoid that the logging from the concurrent thread is mixed
+// with the logging from a STW GC. So, if necessary join the STS to ensure
+// that the logging is done either before or after the STW logging.
+void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
+  if (doit) {
+    SuspendibleThreadSetJoiner sts_joiner(join_sts);
+    va_list args;
+    va_start(args, fmt);
+    gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
+    gclog_or_tty->vprint_cr(fmt, args);
+    va_end(args);
+  }
+}
 
 void ConcurrentMarkThread::run() {
   initialize_in_thread();
@@ -110,28 +122,12 @@
       // without the root regions have been scanned which would be a
       // correctness issue.
 
-      double scan_start = os::elapsedTime();
       if (!cm()->has_aborted()) {
-        if (G1Log::fine()) {
-          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
-        }
-
         _cm->scanRootRegions();
-
-        double scan_end = os::elapsedTime();
-        if (G1Log::fine()) {
-          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
-                                 scan_end - scan_start);
-        }
       }
 
       double mark_start_sec = os::elapsedTime();
-      if (G1Log::fine()) {
-        gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-        gclog_or_tty->print_cr("[GC concurrent-mark-start]");
-      }
+      cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
 
       int iter = 0;
       do {
@@ -151,25 +147,15 @@
             os::sleep(current_thread, sleep_time_ms, false);
           }
 
-          if (G1Log::fine()) {
-            gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-            gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
-                                      mark_end_sec - mark_start_sec);
-          }
+          cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
           VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow()) {
-          if (G1TraceMarkStackOverflow) {
-            gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "
-                                   "in remark (restart #%d).", iter);
-          }
-          if (G1Log::fine()) {
-            gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-            gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
-          }
+          cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
+          cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
         }
       } while (cm()->restart_for_overflow());
 
@@ -194,7 +180,7 @@
         // We don't want to update the marking status if a GC pause
         // is already underway.
         SuspendibleThreadSetJoiner sts_join;
-        g1h->set_marking_complete();
+        g1h->collector_state()->set_mark_in_progress(false);
       }
 
       // Check if cleanup set the free_regions_coming flag. If it
@@ -209,10 +195,7 @@
         // reclaimed by cleanup.
 
         double cleanup_start_sec = os::elapsedTime();
-        if (G1Log::fine()) {
-          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-          gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
-        }
+        cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-start]");
 
         // Now do the concurrent cleanup operation.
         _cm->completeCleanup();
@@ -229,11 +212,7 @@
         g1h->reset_free_regions_coming();
 
         double cleanup_end_sec = os::elapsedTime();
-        if (G1Log::fine()) {
-          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-          gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
-                                 cleanup_end_sec - cleanup_start_sec);
-        }
+        cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
       }
       guarantee(cm()->cleanup_list_is_empty(),
                 "at this point there should be no regions on the cleanup list");
@@ -266,13 +245,8 @@
         SuspendibleThreadSetJoiner sts_join;
         if (!cm()->has_aborted()) {
           g1_policy->record_concurrent_mark_cleanup_completed();
-        }
-      }
-
-      if (cm()->has_aborted()) {
-        if (G1Log::fine()) {
-          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
-          gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
+        } else {
+          cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
         }
       }
 
--- a/src/share/vm/gc/g1/concurrentMarkThread.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/concurrentMarkThread.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -40,6 +40,7 @@
   double _vtime_accum;  // Accumulated virtual time.
 
   double _vtime_mark_accum;
+  void cm_log(bool doit, bool join_sts, const char* fmt, ...) ATTRIBUTE_PRINTF(4, 5);
 
  public:
   virtual void run();
--- a/src/share/vm/gc/g1/g1Allocator.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1Allocator.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -26,6 +26,7 @@
 #include "gc/g1/g1Allocator.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
 
@@ -44,6 +45,8 @@
                                             HeapRegion** retained_old) {
   HeapRegion* retained_region = *retained_old;
   *retained_old = NULL;
+  assert(retained_region == NULL || !retained_region->is_archive(),
+         err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
 
   // We will discard the current GC alloc region if:
   // a) it's in the collection set (it can happen!),
@@ -65,7 +68,7 @@
     // we allocate to in the region sets. We'll re-add it later, when
     // it's retired again.
     _g1h->_old_set.remove(retained_region);
-    bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
     retained_region->note_start_of_copying(during_im);
     old->set(retained_region);
     _g1h->_hr_printer.reuse(retained_region);
@@ -168,3 +171,153 @@
     }
   }
 }
+
+G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
+  // Create the archive allocator, and also enable archive object checking
+  // in mark-sweep, since we will be creating archive regions.
+  G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
+  G1MarkSweep::enable_archive_object_check();
+  return result;
+}
+
+bool G1ArchiveAllocator::alloc_new_region() {
+  // Allocate the highest free region in the reserved heap,
+  // and add it to our list of allocated regions. It is marked
+  // archive and added to the old set.
+  HeapRegion* hr = _g1h->alloc_highest_free_region();
+  if (hr == NULL) {
+    return false;
+  }
+  assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
+  hr->set_archive();
+  _g1h->_old_set.add(hr);
+  _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
+  _allocated_regions.append(hr);
+  _allocation_region = hr;
+
+  // Set up _bottom and _max to begin allocating in the lowest
+  // min_region_size'd chunk of the allocated G1 region.
+  _bottom = hr->bottom();
+  _max = _bottom + HeapRegion::min_region_size_in_words();
+
+  // Tell mark-sweep that objects in this region are not to be marked.
+  G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
+
+  // Since we've modified the old set, call update_sizes.
+  _g1h->g1mm()->update_sizes();
+  return true;
+}
+
+HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
+  assert(word_size != 0, "size must not be zero");
+  if (_allocation_region == NULL) {
+    if (!alloc_new_region()) {
+      return NULL;
+    }
+  }
+  HeapWord* old_top = _allocation_region->top();
+  assert(_bottom >= _allocation_region->bottom(),
+         err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
+                 p2i(_bottom), p2i(_allocation_region->bottom())));
+  assert(_max <= _allocation_region->end(),
+         err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
+                 p2i(_max), p2i(_allocation_region->end())));
+  assert(_bottom <= old_top && old_top <= _max,
+         err_msg("inconsistent allocation state: expected "
+                 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
+                 p2i(_bottom), p2i(old_top), p2i(_max)));
+
+  // Allocate the next word_size words in the current allocation chunk.
+  // If allocation would cross the _max boundary, insert a filler and begin
+  // at the base of the next min_region_size'd chunk. Also advance to the next
+  // chunk if we don't yet cross the boundary, but the remainder would be too
+  // small to fill.
+  HeapWord* new_top = old_top + word_size;
+  size_t remainder = pointer_delta(_max, new_top);
+  if ((new_top > _max) ||
+      ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
+    if (old_top != _max) {
+      size_t fill_size = pointer_delta(_max, old_top);
+      CollectedHeap::fill_with_object(old_top, fill_size);
+      _summary_bytes_used += fill_size * HeapWordSize;
+    }
+    _allocation_region->set_top(_max);
+    old_top = _bottom = _max;
+
+    // Check if we've just used up the last min_region_size'd chunk
+    // in the current region, and if so, allocate a new one.
+    if (_bottom != _allocation_region->end()) {
+      _max = _bottom + HeapRegion::min_region_size_in_words();
+    } else {
+      if (!alloc_new_region()) {
+        return NULL;
+      }
+      old_top = _allocation_region->bottom();
+    }
+  }
+  _allocation_region->set_top(old_top + word_size);
+  _summary_bytes_used += word_size * HeapWordSize;
+
+  return old_top;
+}
+
+void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
+                                          size_t end_alignment_in_bytes) {
+  assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
+          err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
+  assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
+         err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
+
+  // If we've allocated nothing, simply return.
+  if (_allocation_region == NULL) {
+    return;
+  }
+
+  // If an end alignment was requested, insert filler objects.
+  if (end_alignment_in_bytes != 0) {
+    HeapWord* currtop = _allocation_region->top();
+    HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
+    size_t fill_size = pointer_delta(newtop, currtop);
+    if (fill_size != 0) {
+      if (fill_size < CollectedHeap::min_fill_size()) {
+        // If the required fill is smaller than we can represent,
+        // bump up to the next aligned address. We know we won't exceed the current
+        // region boundary because the max supported alignment is smaller than the min
+        // region size, and because the allocation code never leaves space smaller than
+        // the min_fill_size at the top of the current allocation region.
+        newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
+                                             end_alignment_in_bytes);
+        fill_size = pointer_delta(newtop, currtop);
+      }
+      HeapWord* fill = archive_mem_allocate(fill_size);
+      CollectedHeap::fill_with_objects(fill, fill_size);
+    }
+  }
+
+  // Loop through the allocated regions, and create MemRegions summarizing
+  // the allocated address range, combining contiguous ranges. Add the
+  // MemRegions to the GrowableArray provided by the caller.
+  int index = _allocated_regions.length() - 1;
+  assert(_allocated_regions.at(index) == _allocation_region,
+         err_msg("expected region %u at end of array, found %u",
+                 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
+  HeapWord* base_address = _allocation_region->bottom();
+  HeapWord* top = base_address;
+
+  while (index >= 0) {
+    HeapRegion* next = _allocated_regions.at(index);
+    HeapWord* new_base = next->bottom();
+    HeapWord* new_top = next->top();
+    if (new_base != top) {
+      ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
+      base_address = new_base;
+    }
+    top = new_top;
+    index = index - 1;
+  }
+
+  assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
+  ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
+  _allocated_regions.clear();
+  _allocation_region = NULL;
+};
--- a/src/share/vm/gc/g1/g1Allocator.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1Allocator.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -227,7 +227,7 @@
                           size_t word_sz,
                           AllocationContext_t context) {
     G1PLAB* buffer = alloc_buffer(dest, context);
-    if (_survivor_alignment_bytes == 0) {
+    if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
       return buffer->allocate(word_sz);
     } else {
       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
@@ -269,4 +269,72 @@
   virtual void waste(size_t& wasted, size_t& undo_wasted);
 };
 
+// G1ArchiveAllocator is used to allocate memory in archive
+// regions. Such regions are not modifiable by GC, being neither
+// scavenged nor compacted, or even marked in the object header.
+// They can contain no pointers to non-archive heap regions,
+class G1ArchiveAllocator : public CHeapObj<mtGC> {
+
+protected:
+  G1CollectedHeap* _g1h;
+
+  // The current allocation region
+  HeapRegion* _allocation_region;
+
+  // Regions allocated for the current archive range.
+  GrowableArray<HeapRegion*> _allocated_regions;
+
+  // The number of bytes used in the current range.
+  size_t _summary_bytes_used;
+
+  // Current allocation window within the current region.
+  HeapWord* _bottom;
+  HeapWord* _top;
+  HeapWord* _max;
+
+  // Allocate a new region for this archive allocator.
+  // Allocation is from the top of the reserved heap downward.
+  bool alloc_new_region();
+
+public:
+  G1ArchiveAllocator(G1CollectedHeap* g1h) :
+    _g1h(g1h),
+    _allocation_region(NULL),
+    _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
+                                                         ResourceObj::C_HEAP),
+                        2), true /* C_Heap */),
+    _summary_bytes_used(0),
+    _bottom(NULL),
+    _top(NULL),
+    _max(NULL) { }
+
+  virtual ~G1ArchiveAllocator() {
+    assert(_allocation_region == NULL, "_allocation_region not NULL");
+  }
+
+  static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
+
+  // Allocate memory for an individual object.
+  HeapWord* archive_mem_allocate(size_t word_size);
+
+  // Return the memory ranges used in the current archive, after
+  // aligning to the requested alignment.
+  void complete_archive(GrowableArray<MemRegion>* ranges,
+                        size_t end_alignment_in_bytes);
+
+  // The number of bytes allocated by this allocator.
+  size_t used() {
+    return _summary_bytes_used;
+  }
+
+  // Clear the count of bytes allocated in prior G1 regions. This
+  // must be done when recalculate_use is used to reset the counter
+  // for the generic allocator, since it counts bytes in all G1
+  // regions, including those still associated with this allocator.
+  void clear_used() {
+    _summary_bytes_used = 0;
+  }
+
+};
+
 #endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
--- a/src/share/vm/gc/g1/g1BiasedArray.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1BiasedArray.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_G1_G1BIASEDARRAY_HPP
 
 #include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
 #include "utilities/debug.hpp"
 
 // Implements the common base functionality for arrays that contain provisions
@@ -128,6 +129,14 @@
     return biased_base()[biased_index];
   }
 
+  // Return the index of the element of the given array that covers the given
+  // word in the heap.
+  idx_t get_index_by_address(HeapWord* value) const {
+    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    return biased_index - _bias;
+  }
+
   // Set the value of the array entry that corresponds to the given array.
   void set_by_address(HeapWord * address, T value) {
     idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
@@ -135,6 +144,18 @@
     biased_base()[biased_index] = value;
   }
 
+  // Set the value of all array entries that correspond to addresses
+  // in the specified MemRegion.
+  void set_by_address(MemRegion range, T value) {
+    idx_t biased_start = ((uintptr_t)range.start()) >> this->shift_by();
+    idx_t biased_last = ((uintptr_t)range.last()) >> this->shift_by();
+    this->verify_biased_index(biased_start);
+    this->verify_biased_index(biased_last);
+    for (idx_t i = biased_start; i <= biased_last; i++) {
+      biased_base()[i] = value;
+    }
+  }
+
 protected:
   // Returns the address of the element the given address maps to
   T* address_mapped_to(HeapWord* address) {
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -34,6 +34,7 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1EvacFailure.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -404,7 +405,7 @@
 // can move in an incremental collection.
 bool G1CollectedHeap::is_scavengable(const void* p) {
   HeapRegion* hr = heap_region_containing(p);
-  return !hr->is_humongous();
+  return !hr->is_pinned();
 }
 
 // Private methods.
@@ -907,6 +908,207 @@
   return NULL;
 }
 
+void G1CollectedHeap::begin_archive_alloc_range() {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+  if (_archive_allocator == NULL) {
+    _archive_allocator = G1ArchiveAllocator::create_allocator(this);
+  }
+}
+
+bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
+  // Allocations in archive regions cannot be of a size that would be considered
+  // humongous even for a minimum-sized region, because G1 region sizes/boundaries
+  // may be different at archive-restore time.
+  return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
+}
+
+HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+  assert(_archive_allocator != NULL, "_archive_allocator not initialized");
+  if (is_archive_alloc_too_large(word_size)) {
+    return NULL;
+  }
+  return _archive_allocator->archive_mem_allocate(word_size);
+}
+
+void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
+                                              size_t end_alignment_in_bytes) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+  assert(_archive_allocator != NULL, "_archive_allocator not initialized");
+
+  // Call complete_archive to do the real work, filling in the MemRegion
+  // array with the archive regions.
+  _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
+  delete _archive_allocator;
+  _archive_allocator = NULL;
+}
+
+bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MemRegion reserved = _hrm.reserved();
+  for (size_t i = 0; i < count; i++) {
+    if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MutexLockerEx x(Heap_lock);
+
+  MemRegion reserved = _hrm.reserved();
+  HeapWord* prev_last_addr = NULL;
+  HeapRegion* prev_last_region = NULL;
+
+  // Temporarily disable pretouching of heap pages. This interface is used
+  // when mmap'ing archived heap data in, so pre-touching is wasted.
+  FlagSetting fs(AlwaysPreTouch, false);
+
+  // Enable archive object checking in G1MarkSweep. We have to let it know
+  // about each archive range, so that objects in those ranges aren't marked.
+  G1MarkSweep::enable_archive_object_check();
+
+  // For each specified MemRegion range, allocate the corresponding G1
+  // regions and mark them as archive regions. We expect the ranges in
+  // ascending starting address order, without overlap.
+  for (size_t i = 0; i < count; i++) {
+    MemRegion curr_range = ranges[i];
+    HeapWord* start_address = curr_range.start();
+    size_t word_size = curr_range.word_size();
+    HeapWord* last_address = curr_range.last();
+    size_t commits = 0;
+
+    guarantee(reserved.contains(start_address) && reserved.contains(last_address),
+              err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
+              p2i(start_address), p2i(last_address)));
+    guarantee(start_address > prev_last_addr,
+              err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
+              p2i(start_address), p2i(prev_last_addr)));
+    prev_last_addr = last_address;
+
+    // Check for ranges that start in the same G1 region in which the previous
+    // range ended, and adjust the start address so we don't try to allocate
+    // the same region again. If the current range is entirely within that
+    // region, skip it, just adjusting the recorded top.
+    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
+      start_address = start_region->end();
+      if (start_address > last_address) {
+        _allocator->increase_used(word_size * HeapWordSize);
+        start_region->set_top(last_address + 1);
+        continue;
+      }
+      start_region->set_top(start_address);
+      curr_range = MemRegion(start_address, last_address + 1);
+      start_region = _hrm.addr_to_region(start_address);
+    }
+
+    // Perform the actual region allocation, exiting if it fails.
+    // Then note how much new space we have allocated.
+    if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
+      return false;
+    }
+    _allocator->increase_used(word_size * HeapWordSize);
+    if (commits != 0) {
+      ergo_verbose1(ErgoHeapSizing,
+                    "attempt heap expansion",
+                    ergo_format_reason("allocate archive regions")
+                    ergo_format_byte("total size"),
+                    HeapRegion::GrainWords * HeapWordSize * commits);
+    }
+
+    // Mark each G1 region touched by the range as archive, add it to the old set,
+    // and set the allocation context and top.
+    HeapRegion* curr_region = _hrm.addr_to_region(start_address);
+    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    prev_last_region = last_region;
+
+    while (curr_region != NULL) {
+      assert(curr_region->is_empty() && !curr_region->is_pinned(),
+             err_msg("Region already in use (index %u)", curr_region->hrm_index()));
+      _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
+      curr_region->set_allocation_context(AllocationContext::system());
+      curr_region->set_archive();
+      _old_set.add(curr_region);
+      if (curr_region != last_region) {
+        curr_region->set_top(curr_region->end());
+        curr_region = _hrm.next_region_in_heap(curr_region);
+      } else {
+        curr_region->set_top(last_address + 1);
+        curr_region = NULL;
+      }
+    }
+
+    // Notify mark-sweep of the archive range.
+    G1MarkSweep::mark_range_archive(curr_range);
+  }
+  return true;
+}
+
+void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MemRegion reserved = _hrm.reserved();
+  HeapWord *prev_last_addr = NULL;
+  HeapRegion* prev_last_region = NULL;
+
+  // For each MemRegion, create filler objects, if needed, in the G1 regions
+  // that contain the address range. The address range actually within the
+  // MemRegion will not be modified. That is assumed to have been initialized
+  // elsewhere, probably via an mmap of archived heap data.
+  MutexLockerEx x(Heap_lock);
+  for (size_t i = 0; i < count; i++) {
+    HeapWord* start_address = ranges[i].start();
+    HeapWord* last_address = ranges[i].last();
+
+    assert(reserved.contains(start_address) && reserved.contains(last_address),
+           err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
+                   p2i(start_address), p2i(last_address)));
+    assert(start_address > prev_last_addr,
+           err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
+                   p2i(start_address), p2i(prev_last_addr)));
+
+    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapWord* bottom_address = start_region->bottom();
+
+    // Check for a range beginning in the same region in which the
+    // previous one ended.
+    if (start_region == prev_last_region) {
+      bottom_address = prev_last_addr + 1;
+    }
+
+    // Verify that the regions were all marked as archive regions by
+    // alloc_archive_regions.
+    HeapRegion* curr_region = start_region;
+    while (curr_region != NULL) {
+      guarantee(curr_region->is_archive(),
+                err_msg("Expected archive region at index %u", curr_region->hrm_index()));
+      if (curr_region != last_region) {
+        curr_region = _hrm.next_region_in_heap(curr_region);
+      } else {
+        curr_region = NULL;
+      }
+    }
+
+    prev_last_addr = last_address;
+    prev_last_region = last_region;
+
+    // Fill the memory below the allocated range with dummy object(s),
+    // if the region bottom does not match the range start, or if the previous
+    // range ended within the same G1 region, and there is a gap.
+    if (start_address != bottom_address) {
+      size_t fill_size = pointer_delta(start_address, bottom_address);
+      G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
+      _allocator->increase_used(fill_size * HeapWordSize);
+    }
+  }
+}
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
@@ -1039,7 +1241,7 @@
   } else {
     HeapWord* result = humongous_obj_allocate(word_size, context);
     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
-      g1_policy()->set_initiate_conc_mark_if_possible();
+      collector_state()->set_initiate_conc_mark_if_possible(true);
     }
     return result;
   }
@@ -1131,6 +1333,8 @@
       }
     } else if (hr->is_continues_humongous()) {
       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+    } else if (hr->is_archive()) {
+      _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
     } else if (hr->is_old()) {
       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
     } else {
@@ -1250,7 +1454,7 @@
       g1_policy()->stop_incremental_cset_building();
 
       tear_down_region_sets(false /* free_list_only */);
-      g1_policy()->set_gcs_are_young(true);
+      collector_state()->set_gcs_are_young(true);
 
       // See the comments in g1CollectedHeap.hpp and
       // G1CollectedHeap::ref_processing_init() about
@@ -1714,16 +1918,15 @@
   _ref_processor_stw(NULL),
   _bot_shared(NULL),
   _evac_failure_scan_stack(NULL),
-  _mark_in_progress(false),
   _cg1r(NULL),
   _g1mm(NULL),
   _refine_cte_cl(NULL),
-  _full_collection(false),
   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
   _humongous_reclaim_candidates(),
   _has_humongous_reclaim_candidates(false),
+  _archive_allocator(NULL),
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
@@ -1733,7 +1936,6 @@
   _surviving_young_words(NULL),
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
-  _concurrent_cycle_started(false),
   _heap_summary_sent(false),
   _in_cset_fast_test(),
   _dirty_cards_region_list(NULL),
@@ -1750,9 +1952,13 @@
   _workers->initialize_workers();
 
   _allocator = G1Allocator::create_allocator(this);
-  _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
-
-  int n_queues = (int)ParallelGCThreads;
+  _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
+
+  // Override the default _filler_array_max_size so that no humongous filler
+  // objects are created.
+  _filler_array_max_size = _humongous_object_threshold_in_words;
+
+  uint n_queues = ParallelGCThreads;
   _task_queues = new RefToScanQueueSet(n_queues);
 
   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
@@ -1762,7 +1968,7 @@
   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
 
-  for (int i = 0; i < n_queues; i++) {
+  for (uint i = 0; i < n_queues; i++) {
     RefToScanQueue* q = new RefToScanQueue();
     q->initialize();
     _task_queues->register_queue(i, q);
@@ -2064,11 +2270,11 @@
     new ReferenceProcessor(mr,    // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
                                 // mt processing
-                           (uint) ParallelGCThreads,
+                           ParallelGCThreads,
                                 // degree of mt processing
                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
                                 // mt discovery
-                           (uint) MAX2(ParallelGCThreads, ConcGCThreads),
+                           MAX2(ParallelGCThreads, ConcGCThreads),
                                 // degree of mt discovery
                            false,
                                 // Reference discovery is not atomic
@@ -2081,11 +2287,11 @@
     new ReferenceProcessor(mr,    // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
                                 // mt processing
-                           (uint) ParallelGCThreads,
+                           ParallelGCThreads,
                                 // degree of mt processing
                            (ParallelGCThreads > 1),
                                 // mt discovery
-                           (uint) ParallelGCThreads,
+                           ParallelGCThreads,
                                 // degree of mt discovery
                            true,
                                 // Reference discovery is atomic
@@ -2165,7 +2371,11 @@
 
 // Computes the sum of the storage used by the various regions.
 size_t G1CollectedHeap::used() const {
-  return _allocator->used();
+  size_t result = _allocator->used();
+  if (_archive_allocator != NULL) {
+    result += _archive_allocator->used();
+  }
+  return result;
 }
 
 size_t G1CollectedHeap::used_unlocked() const {
@@ -2288,7 +2498,7 @@
 }
 
 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
-  _concurrent_cycle_started = true;
+  collector_state()->set_concurrent_cycle_started(true);
   _gc_timer_cm->register_gc_start(start_time);
 
   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
@@ -2296,7 +2506,7 @@
 }
 
 void G1CollectedHeap::register_concurrent_cycle_end() {
-  if (_concurrent_cycle_started) {
+  if (collector_state()->concurrent_cycle_started()) {
     if (_cm->has_aborted()) {
       _gc_tracer_cm->report_concurrent_mode_failure();
     }
@@ -2305,13 +2515,13 @@
     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 
     // Clear state variables to prepare for the next concurrent cycle.
-    _concurrent_cycle_started = false;
+     collector_state()->set_concurrent_cycle_started(false);
     _heap_summary_sent = false;
   }
 }
 
 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
-  if (_concurrent_cycle_started) {
+  if (collector_state()->concurrent_cycle_started()) {
     // This function can be called when:
     //  the cleanup pause is run
     //  the concurrent cycle is aborted before the cleanup pause.
@@ -2325,22 +2535,6 @@
   }
 }
 
-G1YCType G1CollectedHeap::yc_type() {
-  bool is_young = g1_policy()->gcs_are_young();
-  bool is_initial_mark = g1_policy()->during_initial_mark_pause();
-  bool is_during_mark = mark_in_progress();
-
-  if (is_initial_mark) {
-    return InitialMark;
-  } else if (is_during_mark) {
-    return DuringMark;
-  } else if (is_young) {
-    return Normal;
-  } else {
-    return Mixed;
-  }
-}
-
 void G1CollectedHeap::collect(GCCause::Cause cause) {
   assert_heap_not_locked();
 
@@ -2594,7 +2788,7 @@
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
   HeapRegion* result = _hrm.next_region_in_heap(from);
-  while (result != NULL && result->is_humongous()) {
+  while (result != NULL && result->is_pinned()) {
     result = _hrm.next_region_in_heap(result);
   }
   return result;
@@ -2902,6 +3096,31 @@
   size_t live_bytes() { return _live_bytes; }
 };
 
+class VerifyArchiveOopClosure: public OopClosure {
+public:
+  VerifyArchiveOopClosure(HeapRegion *hr) { }
+  void do_oop(narrowOop *p) { do_oop_work(p); }
+  void do_oop(      oop *p) { do_oop_work(p); }
+
+  template <class T> void do_oop_work(T *p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
+              err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
+                      p2i(p), p2i(obj)));
+  }
+};
+
+class VerifyArchiveRegionClosure: public ObjectClosure {
+public:
+  VerifyArchiveRegionClosure(HeapRegion *hr) { }
+  // Verify that all object pointers are to archive regions.
+  void do_object(oop o) {
+    VerifyArchiveOopClosure checkOop(NULL);
+    assert(o != NULL, "Should not be here for NULL oops");
+    o->oop_iterate_no_header(&checkOop);
+  }
+};
+
 class VerifyRegionClosure: public HeapRegionClosure {
 private:
   bool             _par;
@@ -2921,6 +3140,13 @@
   }
 
   bool doHeapRegion(HeapRegion* r) {
+    // For archive regions, verify there are no heap pointers to
+    // non-pinned regions. For all others, verify liveness info.
+    if (r->is_archive()) {
+      VerifyArchiveRegionClosure verify_oop_pointers(r);
+      r->object_iterate(&verify_oop_pointers);
+      return true;
+    }
     if (!r->is_continues_humongous()) {
       bool failures = false;
       r->verify(_vo, &failures);
@@ -3105,7 +3331,7 @@
   switch (vo) {
   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
-  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
+  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
   default:                            ShouldNotReachHere();
   }
   return false; // keep some compilers happy
@@ -3116,7 +3342,10 @@
   switch (vo) {
   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
-  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
+  case VerifyOption_G1UseMarkWord: {
+    HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
+    return !obj->is_gc_marked() && !hr->is_archive();
+  }
   default:                            ShouldNotReachHere();
   }
   return false; // keep some compilers happy
@@ -3149,7 +3378,7 @@
   st->cr();
   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
                "HS=humongous(starts), HC=humongous(continues), "
-               "CS=collection set, F=free, TS=gc time stamp, "
+               "CS=collection set, F=free, A=archive, TS=gc time stamp, "
                "PTAMS=previous top-at-mark-start, "
                "NTAMS=next top-at-mark-start)");
   PrintRegionClosure blk(st);
@@ -3251,6 +3480,28 @@
 }
 #endif // PRODUCT
 
+G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
+  YoungList* young_list = heap()->young_list();
+
+  size_t eden_used_bytes = young_list->eden_used_bytes();
+  size_t survivor_used_bytes = young_list->survivor_used_bytes();
+
+  size_t eden_capacity_bytes =
+    (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
+
+  VirtualSpaceSummary heap_summary = create_heap_space_summary();
+  return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
+}
+
+void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
+  const G1HeapSummary& heap_summary = create_g1_heap_summary();
+  gc_tracer->report_gc_heap_summary(when, heap_summary);
+
+  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
+  gc_tracer->report_metaspace_summary(when, metaspace_summary);
+}
+
+
 G1CollectedHeap* G1CollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
@@ -3587,8 +3838,8 @@
   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
 
   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
-    .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
-    .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
+    .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
+    .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
 
   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
 }
@@ -3616,6 +3867,21 @@
   gclog_or_tty->flush();
 }
 
+void G1CollectedHeap::wait_for_root_region_scanning() {
+  double scan_wait_start = os::elapsedTime();
+  // We have to wait until the CM threads finish scanning the
+  // root regions as it's the only way to ensure that all the
+  // objects on them have been correctly scanned before we start
+  // moving them during the GC.
+  bool waited = _cm->root_regions()->wait_until_scan_finished();
+  double wait_time_ms = 0.0;
+  if (waited) {
+    double scan_wait_end = os::elapsedTime();
+    wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
+  }
+  g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
+}
+
 bool
 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   assert_at_safepoint(true /* should_be_vm_thread */);
@@ -3632,6 +3898,8 @@
   SvcGCMarker sgcm(SvcGCMarker::MINOR);
   ResourceMark rm;
 
+  wait_for_root_region_scanning();
+
   G1Log::update_level();
   print_heap_before_gc();
   trace_heap_before_gc(_gc_tracer_stw);
@@ -3645,29 +3913,29 @@
   g1_policy()->decide_on_conc_mark_initiation();
 
   // We do not allow initial-mark to be piggy-backed on a mixed GC.
-  assert(!g1_policy()->during_initial_mark_pause() ||
-          g1_policy()->gcs_are_young(), "sanity");
+  assert(!collector_state()->during_initial_mark_pause() ||
+          collector_state()->gcs_are_young(), "sanity");
 
   // We also do not allow mixed GCs during marking.
-  assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
+  assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
 
   // Record whether this pause is an initial mark. When the current
   // thread has completed its logging output and it's safe to signal
   // the CM thread, the flag's value in the policy has been reset.
-  bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
+  bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
 
   // Inner scope for scope based logging, timers, and stats collection
   {
     EvacuationInfo evacuation_info;
 
-    if (g1_policy()->during_initial_mark_pause()) {
+    if (collector_state()->during_initial_mark_pause()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
       increment_old_marking_cycles_started();
       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
     }
 
-    _gc_tracer_stw->report_yc_type(yc_type());
+    _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
 
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
@@ -3677,7 +3945,7 @@
     workers()->set_active_workers(active_workers);
 
     double pause_start_sec = os::elapsedTime();
-    g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
+    g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress());
     log_gc_header();
 
     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
@@ -3753,25 +4021,12 @@
 
         g1_policy()->record_collection_pause_start(sample_start_time_sec);
 
-        double scan_wait_start = os::elapsedTime();
-        // We have to wait until the CM threads finish scanning the
-        // root regions as it's the only way to ensure that all the
-        // objects on them have been correctly scanned before we start
-        // moving them during the GC.
-        bool waited = _cm->root_regions()->wait_until_scan_finished();
-        double wait_time_ms = 0.0;
-        if (waited) {
-          double scan_wait_end = os::elapsedTime();
-          wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
-        }
-        g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
-
 #if YOUNG_LIST_VERBOSE
         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
         _young_list->print();
 #endif // YOUNG_LIST_VERBOSE
 
-        if (g1_policy()->during_initial_mark_pause()) {
+        if (collector_state()->during_initial_mark_pause()) {
           concurrent_mark()->checkpointRootsInitialPre();
         }
 
@@ -3848,6 +4103,9 @@
 
         if (evacuation_failed()) {
           _allocator->set_used(recalculate_used());
+          if (_archive_allocator != NULL) {
+            _archive_allocator->clear_used();
+          }
           for (uint i = 0; i < ParallelGCThreads; i++) {
             if (_evacuation_failed_info_array[i].has_failed()) {
               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
@@ -3859,12 +4117,12 @@
           _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
-        if (g1_policy()->during_initial_mark_pause()) {
+        if (collector_state()->during_initial_mark_pause()) {
           // We have to do this before we notify the CM threads that
           // they can start working to make sure that all the
           // appropriate initialization is done on the CM object.
           concurrent_mark()->checkpointRootsInitialPost();
-          set_marking_started();
+          collector_state()->set_mark_in_progress(true);
           // Note that we don't actually trigger the CM thread at
           // this point. We do that later when we're sure that
           // the current thread has completed its logging output.
@@ -4343,7 +4601,7 @@
 
       pss.set_evac_failure_closure(&evac_failure_cl);
 
-      bool only_young = _g1h->g1_policy()->gcs_are_young();
+      bool only_young = _g1h->collector_state()->gcs_are_young();
 
       // Non-IM young GC.
       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
@@ -4369,7 +4627,7 @@
 
       bool trace_metadata = false;
 
-      if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      if (_g1h->collector_state()->during_initial_mark_pause()) {
         // We also need to mark copied objects.
         strong_root_cl = &scan_mark_root_cl;
         strong_cld_cl  = &scan_mark_cld_cl;
@@ -5021,7 +5279,7 @@
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
-    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+    if (_g1h->collector_state()->during_initial_mark_pause()) {
       // We also need to mark copied objects.
       copy_non_heap_cl = &copy_mark_non_heap_cl;
     }
@@ -5122,7 +5380,7 @@
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
-    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+    if (_g1h->collector_state()->during_initial_mark_pause()) {
       // We also need to mark copied objects.
       copy_non_heap_cl = &copy_mark_non_heap_cl;
     }
@@ -5234,7 +5492,7 @@
 
   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
-  if (g1_policy()->during_initial_mark_pause()) {
+  if (collector_state()->during_initial_mark_pause()) {
     // We also need to mark copied objects.
     copy_non_heap_cl = &copy_mark_non_heap_cl;
   }
@@ -5342,7 +5600,7 @@
     G1RootProcessor root_processor(this, n_workers);
     G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers);
     // InitialMark needs claim bits to keep track of the marked-through CLDs.
-    if (g1_policy()->during_initial_mark_pause()) {
+    if (collector_state()->during_initial_mark_pause()) {
       ClassLoaderDataGraph::clear_claimed_marks();
     }
 
@@ -5598,7 +5856,7 @@
   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
   // if we happen to be in that state.
-  if (mark_in_progress() || !_cmThread->in_progress()) {
+  if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
   }
   if (!res_p || !res_n) {
@@ -6169,13 +6427,18 @@
       assert(!r->is_young(), "we should not come across young regions");
 
       if (r->is_humongous()) {
-        // We ignore humongous regions, we left the humongous set unchanged
+        // We ignore humongous regions. We left the humongous set unchanged.
       } else {
         // Objects that were compacted would have ended up on regions
-        // that were previously old or free.
+        // that were previously old or free.  Archive regions (which are
+        // old) will not have been touched.
         assert(r->is_free() || r->is_old(), "invariant");
-        // We now consider them old, so register as such.
-        r->set_old();
+        // We now consider them old, so register as such. Leave
+        // archive regions set that way, however, while still adding
+        // them to the old set.
+        if (!r->is_archive()) {
+          r->set_old();
+        }
         _old_set->add(r);
       }
       _total_used += r->used();
@@ -6201,6 +6464,9 @@
 
   if (!free_list_only) {
     _allocator->set_used(cl.total_used());
+    if (_archive_allocator != NULL) {
+      _archive_allocator->clear_used();
+    }
   }
   assert(_allocator->used_unlocked() == recalculate_used(),
          err_msg("inconsistent _allocator->used_unlocked(), "
@@ -6279,7 +6545,7 @@
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
         check_bitmaps("Old Region Allocation", new_alloc_region);
       }
-      bool during_im = g1_policy()->during_initial_mark_pause();
+      bool during_im = collector_state()->during_initial_mark_pause();
       new_alloc_region->note_start_of_copying(during_im);
       return new_alloc_region;
     }
@@ -6290,7 +6556,7 @@
 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
                                              size_t allocated_bytes,
                                              InCSetState dest) {
-  bool during_im = g1_policy()->during_initial_mark_pause();
+  bool during_im = collector_state()->during_initial_mark_pause();
   alloc_region->note_end_of_copying(during_im);
   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
   if (dest.is_young()) {
@@ -6301,6 +6567,25 @@
   _hr_printer.retire(alloc_region);
 }
 
+HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
+  bool expanded = false;
+  uint index = _hrm.find_highest_free(&expanded);
+
+  if (index != G1_NO_HRM_INDEX) {
+    if (expanded) {
+      ergo_verbose1(ErgoHeapSizing,
+                    "attempt heap expansion",
+                    ergo_format_reason("requested address range outside heap bounds")
+                    ergo_format_byte("region size"),
+                    HeapRegion::GrainWords * HeapWordSize);
+    }
+    _hrm.allocate_free_regions_starting_at(index, 1);
+    return region_at(index);
+  }
+  return NULL;
+}
+
+
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
@@ -6337,6 +6622,9 @@
       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
       _old_count.increment(1u, hr->capacity());
     } else {
+      // There are no other valid region types. Check for one invalid
+      // one we can identify: pinned without old or humongous set.
+      assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
       ShouldNotReachHere();
     }
     return false;
--- a/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -31,6 +31,7 @@
 #include "gc/g1/g1AllocationContext.hpp"
 #include "gc/g1/g1Allocator.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1HRPrinter.hpp"
 #include "gc/g1/g1InCSetState.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
@@ -187,6 +188,7 @@
   friend class SurvivorGCAllocRegion;
   friend class OldGCAllocRegion;
   friend class G1Allocator;
+  friend class G1ArchiveAllocator;
 
   // Closures used in implementation.
   friend class G1ParScanThreadState;
@@ -249,6 +251,9 @@
   // Class that handles the different kinds of allocations.
   G1Allocator* _allocator;
 
+  // Class that handles archive allocation ranges.
+  G1ArchiveAllocator* _archive_allocator;
+
   // Statistics for each allocation context
   AllocationContextStats _allocation_context_stats;
 
@@ -328,6 +333,9 @@
   // (d) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
+  // indicates whether we are in young or mixed GC mode
+  G1CollectorState _collector_state;
+
   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   // concurrent cycles) we have started.
   volatile uint _old_marking_cycles_started;
@@ -336,7 +344,6 @@
   // concurrent cycles) we have completed.
   volatile uint _old_marking_cycles_completed;
 
-  bool _concurrent_cycle_started;
   bool _heap_summary_sent;
 
   // This is a non-product method that is helpful for testing. It is
@@ -367,6 +374,8 @@
   void log_gc_header();
   void log_gc_footer(double pause_time_sec);
 
+  void trace_heap(GCWhen::Type when, const GCTracer* tracer);
+
   // These are macros so that, if the assert fires, we get the correct
   // line number, file, etc.
 
@@ -571,6 +580,10 @@
   void retire_gc_alloc_region(HeapRegion* alloc_region,
                               size_t allocated_bytes, InCSetState dest);
 
+  // Allocate the highest free region in the reserved heap. This will commit
+  // regions as necessary.
+  HeapRegion* alloc_highest_free_region();
+
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
   // - if clear_all_soft_refs is true, all soft references should be
@@ -701,8 +714,6 @@
   void register_concurrent_cycle_end();
   void trace_heap_after_concurrent_cycle();
 
-  G1YCType yc_type();
-
   G1HRPrinter* hr_printer() { return &_hr_printer; }
 
   // Frees a non-humongous region by initializing its contents and
@@ -728,6 +739,44 @@
   void free_humongous_region(HeapRegion* hr,
                              FreeRegionList* free_list,
                              bool par);
+
+  // Facility for allocating in 'archive' regions in high heap memory and
+  // recording the allocated ranges. These should all be called from the
+  // VM thread at safepoints, without the heap lock held. They can be used
+  // to create and archive a set of heap regions which can be mapped at the
+  // same fixed addresses in a subsequent JVM invocation.
+  void begin_archive_alloc_range();
+
+  // Check if the requested size would be too large for an archive allocation.
+  bool is_archive_alloc_too_large(size_t word_size);
+
+  // Allocate memory of the requested size from the archive region. This will
+  // return NULL if the size is too large or if no memory is available. It
+  // does not trigger a garbage collection.
+  HeapWord* archive_mem_allocate(size_t word_size);
+
+  // Optionally aligns the end address and returns the allocated ranges in
+  // an array of MemRegions in order of ascending addresses.
+  void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
+                               size_t end_alignment_in_bytes = 0);
+
+  // Facility for allocating a fixed range within the heap and marking
+  // the containing regions as 'archive'. For use at JVM init time, when the
+  // caller may mmap archived heap data at the specified range(s).
+  // Verify that the MemRegions specified in the argument array are within the
+  // reserved heap.
+  bool check_archive_addresses(MemRegion* range, size_t count);
+
+  // Commit the appropriate G1 regions containing the specified MemRegions
+  // and mark them as 'archive' regions. The regions in the array must be
+  // non-overlapping and in order of ascending address.
+  bool alloc_archive_regions(MemRegion* range, size_t count);
+
+  // Insert any required filler objects in the G1 regions around the specified
+  // ranges to make the regions parseable. This must be called after
+  // alloc_archive_regions, and after class loading has occurred.
+  void fill_archive_regions(MemRegion* range, size_t count);
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -756,6 +805,8 @@
                                 bool*          succeeded,
                                 GCCause::Cause gc_cause);
 
+  void wait_for_root_region_scanning();
+
   // The guts of the incremental collection pause, executed by the vm
   // thread. It returns false if it is unable to do the collection due
   // to the GC locker being active, true otherwise
@@ -791,7 +842,6 @@
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
   ConcurrentMarkThread* _cmThread;
-  bool _mark_in_progress;
 
   // The concurrent refiner.
   ConcurrentG1Refine* _cg1r;
@@ -1019,6 +1069,8 @@
     return CollectedHeap::G1CollectedHeap;
   }
 
+  G1CollectorState* collector_state() { return &_collector_state; }
+
   // The current policy object for the collector.
   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
 
@@ -1391,6 +1443,11 @@
     return word_size > _humongous_object_threshold_in_words;
   }
 
+  // Returns the humongous threshold for a specific region size
+  static size_t humongous_threshold_for(size_t region_size) {
+    return (region_size / 2);
+  }
+
   // Update mod union table with the set of dirty cards.
   void updateModUnion();
 
@@ -1399,17 +1456,6 @@
   // bits.
   void markModUnionRange(MemRegion mr);
 
-  // Records the fact that a marking phase is no longer in progress.
-  void set_marking_complete() {
-    _mark_in_progress = false;
-  }
-  void set_marking_started() {
-    _mark_in_progress = true;
-  }
-  bool mark_in_progress() {
-    return _mark_in_progress;
-  }
-
   // Print the maximum heap capacity.
   virtual size_t max_capacity() const;
 
@@ -1448,21 +1494,23 @@
 
   // Determine if an object is dead, given the object and also
   // the region to which the object belongs. An object is dead
-  // iff a) it was not allocated since the last mark and b) it
-  // is not marked.
+  // iff a) it was not allocated since the last mark, b) it
+  // is not marked, and c) it is not in an archive region.
   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
     return
       !hr->obj_allocated_since_prev_marking(obj) &&
-      !isMarkedPrev(obj);
+      !isMarkedPrev(obj) &&
+      !hr->is_archive();
   }
 
   // This function returns true when an object has been
   // around since the previous marking and hasn't yet
-  // been marked during this marking.
+  // been marked during this marking, and is not in an archive region.
   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
     return
       !hr->obj_allocated_since_next_marking(obj) &&
-      !isMarkedNext(obj);
+      !isMarkedNext(obj) &&
+      !hr->is_archive();
   }
 
   // Determine if an object is dead, given only the object itself.
@@ -1522,14 +1570,6 @@
   void redirty_logged_cards();
   // Verification
 
-  // The following is just to alert the verification code
-  // that a full collection has occurred and that the
-  // remembered sets are no longer up to date.
-  bool _full_collection;
-  void set_full_collection() { _full_collection = true;}
-  void clear_full_collection() {_full_collection = false;}
-  bool full_collection() {return _full_collection;}
-
   // Perform any cleanup actions necessary before allowing a verification.
   virtual void prepare_for_verify();
 
@@ -1565,6 +1605,8 @@
   bool is_obj_dead_cond(const oop obj,
                         const VerifyOption vo) const;
 
+  G1HeapSummary create_g1_heap_summary();
+
   // Printing
 
   virtual void print_on(outputStream* st) const;
--- a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -29,6 +29,7 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
@@ -288,9 +289,9 @@
     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 
     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
-    const bool gcs_are_young = g1_policy()->gcs_are_young();
-    const bool during_im = g1_policy()->during_initial_mark_pause();
-    const bool during_marking = mark_in_progress();
+    const bool gcs_are_young = collector_state()->gcs_are_young();
+    const bool during_im = collector_state()->during_initial_mark_pause();
+    const bool during_marking = collector_state()->mark_in_progress();
 
     _evacuation_failure_alot_for_current_gc &=
       evacuation_failure_alot_for_gc_type(gcs_are_young,
--- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -107,22 +107,11 @@
 
   _pause_time_target_ms((double) MaxGCPauseMillis),
 
-  _gcs_are_young(true),
-
-  _during_marking(false),
-  _in_marking_window(false),
-  _in_marking_window_im(false),
-
   _recent_prev_end_times_for_all_gcs_sec(
                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 
   _recent_avg_pause_time_ratio(0.0),
 
-  _initiate_conc_mark_if_possible(false),
-  _during_initial_mark_pause(false),
-  _last_young_gc(false),
-  _last_gc_was_young(false),
-
   _eden_used_bytes_before_gc(0),
   _survivor_used_bytes_before_gc(0),
   _heap_used_bytes_before_gc(0),
@@ -334,6 +323,8 @@
   }
 }
 
+G1CollectorState* G1CollectorPolicy::collector_state() { return _g1->collector_state(); }
+
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
         _min_desired_young_length(0), _max_desired_young_length(0) {
   if (FLAG_IS_CMDLINE(NewRatio)) {
@@ -552,7 +543,7 @@
 
   uint young_list_target_length = 0;
   if (adaptive_young_list_length()) {
-    if (gcs_are_young()) {
+    if (collector_state()->gcs_are_young()) {
       young_list_target_length =
                         calculate_young_list_target_length(rs_lengths,
                                                            base_min_length,
@@ -594,7 +585,7 @@
                                                      uint desired_min_length,
                                                      uint desired_max_length) {
   assert(adaptive_young_list_length(), "pre-condition");
-  assert(gcs_are_young(), "only call this for young GCs");
+  assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 
   // In case some edge-condition makes the desired max length too small...
   if (desired_max_length <= desired_min_length) {
@@ -697,7 +688,7 @@
   for (HeapRegion * r = _recorded_survivor_head;
        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
        r = r->get_next_young_region()) {
-    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
+    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
   }
   return survivor_regions_evac_time;
 }
@@ -782,7 +773,7 @@
   _full_collection_start_sec = os::elapsedTime();
   record_heap_size_info_at_start(true /* full */);
   // Release the future to-space so that it is available for compaction into.
-  _g1->set_full_collection();
+  collector_state()->set_full_collection(true);
 }
 
 void G1CollectorPolicy::record_full_collection_end() {
@@ -796,16 +787,16 @@
 
   update_recent_gc_times(end_sec, full_gc_time_ms);
 
-  _g1->clear_full_collection();
+  collector_state()->set_full_collection(false);
 
   // "Nuke" the heuristics that control the young/mixed GC
   // transitions and make sure we start with young GCs after the Full GC.
-  set_gcs_are_young(true);
-  _last_young_gc = false;
-  clear_initiate_conc_mark_if_possible();
-  clear_during_initial_mark_pause();
-  _in_marking_window = false;
-  _in_marking_window_im = false;
+  collector_state()->set_gcs_are_young(true);
+  collector_state()->set_last_young_gc(false);
+  collector_state()->set_initiate_conc_mark_if_possible(false);
+  collector_state()->set_during_initial_mark_pause(false);
+  collector_state()->set_in_marking_window(false);
+  collector_state()->set_in_marking_window_im(false);
 
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
@@ -845,7 +836,7 @@
   _collection_set_bytes_used_before = 0;
   _bytes_copied_during_gc = 0;
 
-  _last_gc_was_young = false;
+  collector_state()->set_last_gc_was_young(false);
 
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
@@ -856,15 +847,15 @@
 
 void G1CollectorPolicy::record_concurrent_mark_init_end(double
                                                    mark_init_elapsed_time_ms) {
-  _during_marking = true;
-  assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
-  clear_during_initial_mark_pause();
+  collector_state()->set_during_marking(true);
+  assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
+  collector_state()->set_during_initial_mark_pause(false);
   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 }
 
 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   _mark_remark_start_sec = os::elapsedTime();
-  _during_marking = false;
+  collector_state()->set_during_marking(false);
 }
 
 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
@@ -882,8 +873,8 @@
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
-  _last_young_gc = true;
-  _in_marking_window = false;
+  collector_state()->set_last_young_gc(true);
+  collector_state()->set_in_marking_window(false);
 }
 
 void G1CollectorPolicy::record_concurrent_pause() {
@@ -904,7 +895,7 @@
   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 
   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
-    if (gcs_are_young() && !_last_young_gc) {
+    if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
       ergo_verbose5(ErgoConcCycles,
         "request concurrent cycle initiation",
         ergo_format_reason("occupancy higher than threshold")
@@ -959,14 +950,14 @@
   }
 #endif // PRODUCT
 
-  last_pause_included_initial_mark = during_initial_mark_pause();
+  last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
   if (last_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
   } else if (need_to_start_conc_mark("end of GC")) {
     // Note: this might have already been set, if during the last
     // pause we decided to start a cycle but at the beginning of
     // this pause we decided to postpone it. That's OK.
-    set_initiate_conc_mark_if_possible();
+    collector_state()->set_initiate_conc_mark_if_possible(true);
   }
 
   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
@@ -1028,37 +1019,37 @@
     }
   }
 
-  bool new_in_marking_window = _in_marking_window;
+  bool new_in_marking_window = collector_state()->in_marking_window();
   bool new_in_marking_window_im = false;
   if (last_pause_included_initial_mark) {
     new_in_marking_window = true;
     new_in_marking_window_im = true;
   }
 
-  if (_last_young_gc) {
+  if (collector_state()->last_young_gc()) {
     // This is supposed to to be the "last young GC" before we start
     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
 
     if (!last_pause_included_initial_mark) {
       if (next_gc_should_be_mixed("start mixed GCs",
                                   "do not start mixed GCs")) {
-        set_gcs_are_young(false);
+        collector_state()->set_gcs_are_young(false);
       }
     } else {
       ergo_verbose0(ErgoMixedGCs,
                     "do not start mixed GCs",
                     ergo_format_reason("concurrent cycle is about to start"));
     }
-    _last_young_gc = false;
+    collector_state()->set_last_young_gc(false);
   }
 
-  if (!_last_gc_was_young) {
+  if (!collector_state()->last_gc_was_young()) {
     // This is a mixed GC. Here we decide whether to continue doing
     // mixed GCs or not.
 
     if (!next_gc_should_be_mixed("continue mixed GCs",
                                  "do not continue mixed GCs")) {
-      set_gcs_are_young(true);
+      collector_state()->set_gcs_are_young(true);
     }
   }
 
@@ -1077,7 +1068,7 @@
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
       cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
-      if (_last_gc_was_young) {
+      if (collector_state()->last_gc_was_young()) {
         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
       } else {
         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
@@ -1087,7 +1078,7 @@
     if (_max_rs_lengths > 0) {
       double cards_per_entry_ratio =
         (double) cards_scanned / (double) _max_rs_lengths;
-      if (_last_gc_was_young) {
+      if (collector_state()->last_gc_was_young()) {
         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
       } else {
         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
@@ -1119,7 +1110,7 @@
 
     if (copied_bytes > 0) {
       cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
-      if (_in_marking_window) {
+      if (collector_state()->in_marking_window()) {
         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
       } else {
         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
@@ -1162,8 +1153,8 @@
     _rs_lengths_seq->add((double) _max_rs_lengths);
   }
 
-  _in_marking_window = new_in_marking_window;
-  _in_marking_window_im = new_in_marking_window_im;
+  collector_state()->set_in_marking_window(new_in_marking_window);
+  collector_state()->set_in_marking_window_im(new_in_marking_window_im);
   _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
@@ -1301,7 +1292,7 @@
 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
   size_t rs_length = predict_rs_length_diff();
   size_t card_num;
-  if (gcs_are_young()) {
+  if (collector_state()->gcs_are_young()) {
     card_num = predict_young_card_num(rs_length);
   } else {
     card_num = predict_non_young_card_num(rs_length);
@@ -1467,7 +1458,7 @@
                   ergo_format_reason("requested by GC cause")
                   ergo_format_str("GC cause"),
                   GCCause::to_string(gc_cause));
-    set_initiate_conc_mark_if_possible();
+    collector_state()->set_initiate_conc_mark_if_possible(true);
     return true;
   } else {
     ergo_verbose1(ErgoConcCycles,
@@ -1484,13 +1475,13 @@
   // We are about to decide on whether this pause will be an
   // initial-mark pause.
 
-  // First, during_initial_mark_pause() should not be already set. We
+  // First, collector_state()->during_initial_mark_pause() should not be already set. We
   // will set it here if we have to. However, it should be cleared by
   // the end of the pause (it's only set for the duration of an
   // initial-mark pause).
-  assert(!during_initial_mark_pause(), "pre-condition");
+  assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
 
-  if (initiate_conc_mark_if_possible()) {
+  if (collector_state()->initiate_conc_mark_if_possible()) {
     // We had noticed on a previous pause that the heap occupancy has
     // gone over the initiating threshold and we should start a
     // concurrent marking cycle. So we might initiate one.
@@ -1501,10 +1492,10 @@
       // it has completed the last one. So we can go ahead and
       // initiate a new cycle.
 
-      set_during_initial_mark_pause();
+      collector_state()->set_during_initial_mark_pause(true);
       // We do not allow mixed GCs during marking.
-      if (!gcs_are_young()) {
-        set_gcs_are_young(true);
+      if (!collector_state()->gcs_are_young()) {
+        collector_state()->set_gcs_are_young(true);
         ergo_verbose0(ErgoMixedGCs,
                       "end mixed GCs",
                       ergo_format_reason("concurrent cycle is about to start"));
@@ -1512,7 +1503,7 @@
 
       // And we can now clear initiate_conc_mark_if_possible() as
       // we've already acted on it.
-      clear_initiate_conc_mark_if_possible();
+      collector_state()->set_initiate_conc_mark_if_possible(false);
 
       ergo_verbose0(ErgoConcCycles,
                   "initiate concurrent cycle",
@@ -1686,7 +1677,7 @@
   // retiring the current allocation region) or a concurrent
   // refine thread (RSet sampling).
 
-  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
+  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
   size_t used_bytes = hr->used();
   _inc_cset_recorded_rs_lengths += rs_length;
   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
@@ -1721,7 +1712,7 @@
   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
 
   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
-  double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
+  double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
 
@@ -1914,9 +1905,9 @@
                 ergo_format_ms("target pause time"),
                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 
-  _last_gc_was_young = gcs_are_young() ? true : false;
+  collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
 
-  if (_last_gc_was_young) {
+  if (collector_state()->last_gc_was_young()) {
     _trace_young_gen_time_data.increment_young_collection_count();
   } else {
     _trace_young_gen_time_data.increment_mixed_collection_count();
@@ -1967,7 +1958,7 @@
   // Set the start of the non-young choice time.
   double non_young_start_time_sec = young_end_time_sec;
 
-  if (!gcs_are_young()) {
+  if (!collector_state()->gcs_are_young()) {
     CollectionSetChooser* cset_chooser = _collectionSetChooser;
     cset_chooser->verify();
     const uint min_old_cset_length = calc_min_old_cset_length();
@@ -2013,7 +2004,7 @@
         break;
       }
 
-      double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
+      double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
       if (check_time_remaining) {
         if (predicted_time_ms > time_remaining_ms) {
           // Too expensive for the current CSet.
--- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1Allocator.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/shared/collectorPolicy.hpp"
 
@@ -193,9 +194,6 @@
 
   double _stop_world_start;
 
-  // indicates whether we are in young or mixed GC mode
-  bool _gcs_are_young;
-
   uint _young_list_target_length;
   uint _young_list_fixed_length;
 
@@ -203,12 +201,6 @@
   // locker is active. This should be >= _young_list_target_length;
   uint _young_list_max_length;
 
-  bool _last_gc_was_young;
-
-  bool _during_marking;
-  bool _in_marking_window;
-  bool _in_marking_window_im;
-
   SurvRateGroup* _short_lived_surv_rate_group;
   SurvRateGroup* _survivor_surv_rate_group;
   // add here any more surv rate groups
@@ -218,10 +210,6 @@
   double _reserve_factor;
   uint   _reserve_regions;
 
-  bool during_marking() {
-    return _during_marking;
-  }
-
   enum PredictionConstants {
     TruncatedSeqLength = 10
   };
@@ -363,7 +351,7 @@
   }
 
   double predict_rs_scan_time_ms(size_t card_num) {
-    if (gcs_are_young()) {
+    if (collector_state()->gcs_are_young()) {
       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
     } else {
       return predict_mixed_rs_scan_time_ms(card_num);
@@ -390,7 +378,7 @@
   }
 
   double predict_object_copy_time_ms(size_t bytes_to_copy) {
-    if (_in_marking_window && !_in_marking_window_im) {
+    if (collector_state()->during_concurrent_mark()) {
       return predict_object_copy_time_ms_during_cm(bytes_to_copy);
     } else {
       return (double) bytes_to_copy *
@@ -428,7 +416,7 @@
   double predict_survivor_regions_evac_time();
 
   void cset_regions_freed() {
-    bool propagate = _last_gc_was_young && !_in_marking_window;
+    bool propagate = collector_state()->should_propagate();
     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
     _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
     // also call it on any more surv rate groups
@@ -552,33 +540,6 @@
     return _recent_avg_pause_time_ratio;
   }
 
-  // At the end of a pause we check the heap occupancy and we decide
-  // whether we will start a marking cycle during the next pause. If
-  // we decide that we want to do that, we will set this parameter to
-  // true. So, this parameter will stay true between the end of a
-  // pause and the beginning of a subsequent pause (not necessarily
-  // the next one, see the comments on the next field) when we decide
-  // that we will indeed start a marking cycle and do the initial-mark
-  // work.
-  volatile bool _initiate_conc_mark_if_possible;
-
-  // If initiate_conc_mark_if_possible() is set at the beginning of a
-  // pause, it is a suggestion that the pause should start a marking
-  // cycle by doing the initial-mark work. However, it is possible
-  // that the concurrent marking thread is still finishing up the
-  // previous marking cycle (e.g., clearing the next marking
-  // bitmap). If that is the case we cannot start a new cycle and
-  // we'll have to wait for the concurrent marking thread to finish
-  // what it is doing. In this case we will postpone the marking cycle
-  // initiation decision for the next pause. When we eventually decide
-  // to start a cycle, we will set _during_initial_mark_pause which
-  // will stay true until the end of the initial-mark pause and it's
-  // the condition that indicates that a pause is doing the
-  // initial-mark work.
-  volatile bool _during_initial_mark_pause;
-
-  bool _last_young_gc;
-
   // This set of variables tracks the collector efficiency, in order to
   // determine whether we should initiate a new marking.
   double _cur_mark_stop_world_time_ms;
@@ -647,6 +608,8 @@
     return CollectorPolicy::G1CollectorPolicyKind;
   }
 
+  G1CollectorState* collector_state();
+
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
 
   // Check the current value of the young list RSet lengths and
@@ -786,14 +749,6 @@
   void print_collection_set(HeapRegion* list_head, outputStream* st);
 #endif // !PRODUCT
 
-  bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
-  void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
-  void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
-
-  bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
-  void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
-  void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
-
   // This sets the initiate_conc_mark_if_possible() flag to start a
   // new cycle, as long as we are not already in one. It's best if it
   // is called during a safepoint when the test whether a cycle is in
@@ -837,13 +792,6 @@
     return _young_list_max_length;
   }
 
-  bool gcs_are_young() {
-    return _gcs_are_young;
-  }
-  void set_gcs_are_young(bool gcs_are_young) {
-    _gcs_are_young = gcs_are_young;
-  }
-
   bool adaptive_young_list_length() {
     return _young_gen_sizer->adaptive_young_list_length();
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/g1/g1CollectorState.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
+#define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "gc/g1/g1YCTypes.hpp"
+
+// Various state variables that indicate
+// the phase of the G1 collection.
+class G1CollectorState VALUE_OBJ_CLASS_SPEC {
+  // Indicates whether we are in "full young" or "mixed" GC mode.
+  bool _gcs_are_young;
+  // Was the last GC "young"?
+  bool _last_gc_was_young;
+  // Is this the "last young GC" before we start doing mixed GCs?
+  // Set after a concurrent mark has completed.
+  bool _last_young_gc;
+
+  // If initiate_conc_mark_if_possible() is set at the beginning of a
+  // pause, it is a suggestion that the pause should start a marking
+  // cycle by doing the initial-mark work. However, it is possible
+  // that the concurrent marking thread is still finishing up the
+  // previous marking cycle (e.g., clearing the next marking
+  // bitmap). If that is the case we cannot start a new cycle and
+  // we'll have to wait for the concurrent marking thread to finish
+  // what it is doing. In this case we will postpone the marking cycle
+  // initiation decision for the next pause. When we eventually decide
+  // to start a cycle, we will set _during_initial_mark_pause which
+  // will stay true until the end of the initial-mark pause and it's
+  // the condition that indicates that a pause is doing the
+  // initial-mark work.
+  volatile bool _during_initial_mark_pause;
+
+  // At the end of a pause we check the heap occupancy and we decide
+  // whether we will start a marking cycle during the next pause. If
+  // we decide that we want to do that, we will set this parameter to
+  // true. So, this parameter will stay true between the end of a
+  // pause and the beginning of a subsequent pause (not necessarily
+  // the next one, see the comments on the next field) when we decide
+  // that we will indeed start a marking cycle and do the initial-mark
+  // work.
+  volatile bool _initiate_conc_mark_if_possible;
+
+  // NOTE: if some of these are synonyms for others,
+  // the redundant fields should be eliminated. XXX
+  bool _during_marking;
+  bool _mark_in_progress;
+  bool _in_marking_window;
+  bool _in_marking_window_im;
+
+  bool _concurrent_cycle_started;
+  bool _full_collection;
+
+  public:
+    G1CollectorState() :
+      _gcs_are_young(true),
+      _last_gc_was_young(false),
+      _last_young_gc(false),
+
+      _during_initial_mark_pause(false),
+      _initiate_conc_mark_if_possible(false),
+
+      _during_marking(false),
+      _mark_in_progress(false),
+      _in_marking_window(false),
+      _in_marking_window_im(false),
+      _concurrent_cycle_started(false),
+      _full_collection(false) {}
+
+  // Setters
+  void set_gcs_are_young(bool v) { _gcs_are_young = v; }
+  void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
+  void set_last_young_gc(bool v) { _last_young_gc = v; }
+  void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
+  void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
+  void set_during_marking(bool v) { _during_marking = v; }
+  void set_mark_in_progress(bool v) { _mark_in_progress = v; }
+  void set_in_marking_window(bool v) { _in_marking_window = v; }
+  void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
+  void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; }
+  void set_full_collection(bool v) { _full_collection = v; }
+
+  // Getters
+  bool gcs_are_young() { return _gcs_are_young; }
+  bool last_gc_was_young() { return _last_gc_was_young; }
+  bool last_young_gc() { return _last_young_gc; }
+  bool during_initial_mark_pause() { return _during_initial_mark_pause; }
+  bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
+  bool during_marking() { return _during_marking; }
+  bool mark_in_progress() { return _mark_in_progress; }
+  bool in_marking_window() { return _in_marking_window; }
+  bool in_marking_window_im() { return _in_marking_window_im; }
+  bool concurrent_cycle_started() { return _concurrent_cycle_started; }
+  bool full_collection() { return _full_collection; }
+
+  // Composite booleans (clients worry about flickering)
+  bool during_concurrent_mark() {
+    return (_in_marking_window && !_in_marking_window_im);
+  }
+
+  bool should_propagate() { // XXX should have a more suitable state name or abstraction for this
+    return (_last_young_gc && !_in_marking_window);
+  }
+
+  G1YCType yc_type() {
+    if (during_initial_mark_pause()) {
+      return InitialMark;
+    } else if (mark_in_progress()) {
+      return DuringMark;
+    } else if (gcs_are_young()) {
+      return Normal;
+    } else {
+      return Mixed;
+    }
+  }
+};
+
+#endif /* SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP */
--- a/src/share/vm/gc/g1/g1EvacFailure.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1EvacFailure.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -26,6 +26,7 @@
 #include "gc/g1/concurrentMark.inline.hpp"
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1EvacFailure.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1_globals.hpp"
@@ -186,10 +187,10 @@
   }
 
   bool doHeapRegion(HeapRegion *hr) {
-    bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
-    bool during_conc_mark = _g1h->mark_in_progress();
+    bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
+    bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
 
-    assert(!hr->is_humongous(), "sanity");
+    assert(!hr->is_pinned(), err_msg("Unexpected pinned region at index %u", hr->hrm_index()));
     assert(hr->in_collection_set(), "bad CS");
 
     if (_hrclaimer->claim_region(hr->hrm_index())) {
--- a/src/share/vm/gc/g1/g1HRPrinter.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1HRPrinter.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -54,6 +54,7 @@
     case SingleHumongous:    return "SingleH";
     case StartsHumongous:    return "StartsH";
     case ContinuesHumongous: return "ContinuesH";
+    case Archive:            return "Archive";
     default:                 ShouldNotReachHere();
   }
   // trying to keep the Windows compiler happy
--- a/src/share/vm/gc/g1/g1HRPrinter.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1HRPrinter.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -52,7 +52,8 @@
     Old,
     SingleHumongous,
     StartsHumongous,
-    ContinuesHumongous
+    ContinuesHumongous,
+    Archive
   } RegionType;
 
   typedef enum {
--- a/src/share/vm/gc/g1/g1MarkSweep.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1MarkSweep.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -57,6 +57,9 @@
 
 class HeapRegion;
 
+bool G1MarkSweep::_archive_check_enabled = false;
+G1ArchiveRegionMap G1MarkSweep::_archive_region_map;
+
 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
                                       bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
@@ -212,7 +215,7 @@
         // point all the oops to the new location
         MarkSweep::adjust_pointers(obj);
       }
-    } else {
+    } else if (!r->is_pinned()) {
       // This really ought to be "as_CompactibleSpace"...
       r->adjust_pointers();
     }
@@ -275,7 +278,7 @@
         }
         hr->reset_during_compaction();
       }
-    } else {
+    } else if (!hr->is_pinned()) {
       hr->compact();
     }
     return false;
@@ -298,6 +301,26 @@
 
 }
 
+void G1MarkSweep::enable_archive_object_check() {
+  assert(!_archive_check_enabled, "archive range check already enabled");
+  _archive_check_enabled = true;
+  size_t length = Universe::heap()->max_capacity();
+  _archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
+                                 (HeapWord*)Universe::heap()->base() + length,
+                                 HeapRegion::GrainBytes);
+}
+
+void G1MarkSweep::mark_range_archive(MemRegion range) {
+  assert(_archive_check_enabled, "archive range check not enabled");
+  _archive_region_map.set_by_address(range, true);
+}
+
+bool G1MarkSweep::in_archive_range(oop object) {
+  // This is the out-of-line part of is_archive_object test, done separately
+  // to avoid additional performance impact when the check is not enabled.
+  return _archive_region_map.get_by_address((HeapWord*)object);
+}
+
 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   g1h->heap_region_iterate(blk);
@@ -357,7 +380,7 @@
     } else {
       assert(hr->is_continues_humongous(), "Invalid humongous.");
     }
-  } else {
+  } else if (!hr->is_pinned()) {
     prepare_for_compaction(hr, hr->end());
   }
   return false;
--- a/src/share/vm/gc/g1/g1MarkSweep.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1MarkSweep.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -44,6 +44,7 @@
 //
 // Class unloading will only occur when a full gc is invoked.
 class G1PrepareCompactClosure;
+class G1ArchiveRegionMap;
 
 class G1MarkSweep : AllStatic {
  public:
@@ -54,7 +55,22 @@
   static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
   static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
 
+  // Create the _archive_region_map which is used to identify archive objects.
+  static void enable_archive_object_check();
+
+  // Mark the regions containing the specified address range as archive regions.
+  static void mark_range_archive(MemRegion range);
+
+  // Check if an object is in an archive region using the _archive_region_map.
+  static bool in_archive_range(oop object);
+
+  // Check if archive object checking is enabled, to avoid calling in_archive_range
+  // unnecessarily.
+  static bool archive_check_enabled() { return G1MarkSweep::_archive_check_enabled; }
+
  private:
+  static bool _archive_check_enabled;
+  static G1ArchiveRegionMap  _archive_region_map;
 
   // Mark live objects
   static void mark_sweep_phase1(bool& marked_for_deopt,
@@ -93,4 +109,12 @@
   bool doHeapRegion(HeapRegion* hr);
 };
 
+// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
+// archive regions.  This allows a quick check for whether an object
+// should not be marked because it is in an archive region.
+class G1ArchiveRegionMap : public G1BiasedMappedArray<bool> {
+protected:
+  bool default_value() const { return false; }
+};
+
 #endif // SHARE_VM_GC_G1_G1MARKSWEEP_HPP
--- a/src/share/vm/gc/g1/g1OopClosures.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1OopClosures.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -51,7 +51,7 @@
   _worker_id = par_scan_state->queue_num();
 
   assert(_worker_id < ParallelGCThreads,
-         err_msg("The given worker id %u must be less than the number of threads " UINTX_FORMAT, _worker_id, ParallelGCThreads));
+         err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads));
 }
 
 // Generate G1 specialized oop_oop_iterate functions.
--- a/src/share/vm/gc/g1/g1RemSet.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1RemSet.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -627,7 +627,7 @@
 void G1RemSet::prepare_for_verify() {
   if (G1HRRSFlushLogBuffersOnVerify &&
       (VerifyBeforeGC || VerifyAfterGC)
-      &&  (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
+      &&  (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) {
     cleanupHRRS();
     _g1->set_refine_cte_cl_concurrency(false);
     if (SafepointSynchronize::is_at_safepoint()) {
--- a/src/share/vm/gc/g1/g1RootProcessor.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1RootProcessor.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -30,6 +30,7 @@
 #include "gc/g1/bufferingOopClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1RemSet.inline.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
@@ -199,7 +200,7 @@
   // as implicitly live).
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) {
+    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
       JavaThread::satb_mark_queue_set().filter_thread_buffers();
     }
   }
--- a/src/share/vm/gc/g1/g1StringDedupThread.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1StringDedupThread.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -23,12 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/stringTable.hpp"
 #include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
+#include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 
 G1StringDedupThread* G1StringDedupThread::_thread = NULL;
@@ -55,11 +57,36 @@
   return _thread;
 }
 
+class G1StringDedupSharedClosure: public OopClosure {
+ private:
+  G1StringDedupStat& _stat;
+
+ public:
+  G1StringDedupSharedClosure(G1StringDedupStat& stat) : _stat(stat) {}
+
+  virtual void do_oop(oop* p) { ShouldNotReachHere(); }
+  virtual void do_oop(narrowOop* p) {
+    oop java_string = oopDesc::load_decode_heap_oop(p);
+    G1StringDedupTable::deduplicate(java_string, _stat);
+  }
+};
+
+// The CDS archive does not include the string dedupication table. Only the string
+// table is saved in the archive. The shared strings from CDS archive need to be
+// added to the string dedupication table before deduplication occurs. That is
+// done in the begining of the G1StringDedupThread (see G1StringDedupThread::run()
+// below).
+void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
+  G1StringDedupSharedClosure sharedStringDedup(stat);
+  StringTable::shared_oops_do(&sharedStringDedup);
+}
+
 void G1StringDedupThread::run() {
   G1StringDedupStat total_stat;
 
   initialize_in_thread();
   wait_for_universe_init();
+  deduplicate_shared_strings(total_stat);
 
   // Main loop
   for (;;) {
--- a/src/share/vm/gc/g1/g1StringDedupThread.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1StringDedupThread.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -52,6 +52,8 @@
   static G1StringDedupThread* thread();
 
   virtual void run();
+
+  void deduplicate_shared_strings(G1StringDedupStat& stat);
 };
 
 #endif // SHARE_VM_GC_G1_G1STRINGDEDUPTHREAD_HPP
--- a/src/share/vm/gc/g1/g1_globals.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1_globals.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -25,8 +25,14 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1_globals.hpp"
 
-G1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
-         MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG,     \
-         MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \
+G1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \
+         MATERIALIZE_PD_DEVELOPER_FLAG, \
+         MATERIALIZE_PRODUCT_FLAG, \
+         MATERIALIZE_PD_PRODUCT_FLAG,     \
+         MATERIALIZE_DIAGNOSTIC_FLAG, \
+         MATERIALIZE_EXPERIMENTAL_FLAG, \
          MATERIALIZE_NOTPRODUCT_FLAG,  \
-         MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG)
+         MATERIALIZE_MANAGEABLE_FLAG, \
+         MATERIALIZE_PRODUCT_RW_FLAG, \
+         IGNORE_RANGE, \
+         IGNORE_CONSTRAINT)
--- a/src/share/vm/gc/g1/g1_globals.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/g1_globals.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -30,16 +30,19 @@
 // Defines all globals flags used by the garbage-first compiler.
 //
 
-#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
+#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, range, constraint) \
                                                                             \
   product(uintx, G1ConfidencePercent, 50,                                   \
           "Confidence level for MMU/pause predictions")                     \
+          range(0, 100)                                                     \
                                                                             \
   develop(intx, G1MarkingOverheadPercent, 0,                                \
           "Overhead of concurrent marking")                                 \
+          range(0, 100)                                                     \
                                                                             \
   develop(intx, G1MarkingVerboseLevel, 0,                                   \
           "Level (0-4) of verboseness of the marking code")                 \
+          range(0, 4)                                                       \
                                                                             \
   develop(bool, G1TraceMarkStackOverflow, false,                            \
           "If true, extra debugging code for CM restart for ovflw.")        \
@@ -68,10 +71,12 @@
   product(double, G1ConcMarkStepDurationMillis, 10.0,                       \
           "Target duration of individual concurrent marking steps "         \
           "in milliseconds.")                                               \
+          range(1.0, (double)max_uintx)                                     \
                                                                             \
   product(intx, G1RefProcDrainInterval, 10,                                 \
           "The number of discovered reference objects to process before "   \
           "draining concurrent marking work queues.")                       \
+          range(1, max_intx)                                                \
                                                                             \
   experimental(bool, G1UseConcMarkReferenceProcessing, true,                \
           "If true, enable reference discovery during concurrent "          \
@@ -89,9 +94,11 @@
           "the percentage of retained entries is over this threshold "      \
           "the buffer will be enqueued for processing. A value of 0 "       \
           "specifies that mutator threads should not do such filtering.")   \
+          range(0, 100)                                                     \
                                                                             \
   experimental(intx, G1ExpandByPercentOfAvailable, 20,                      \
           "When expanding, % of uncommitted space to claim.")               \
+          range(0, 100)                                                     \
                                                                             \
   develop(bool, G1RSBarrierRegionFilter, true,                              \
           "If true, generate region filtering code in RS barrier")          \
@@ -138,9 +145,11 @@
                                                                             \
   product(size_t, G1ConcRSLogCacheSize, 10,                                 \
           "Log base 2 of the length of conc RS hot-card cache.")            \
+          range(0, 27)                                                      \
                                                                             \
   product(uintx, G1ConcRSHotCardLimit, 4,                                   \
           "The threshold that defines (>=) a hot card.")                    \
+          range(0, max_jubyte)                                              \
                                                                             \
   develop(intx, G1RSetRegionEntriesBase, 256,                               \
           "Max number of regions in a fine-grain table per MB.")            \
@@ -183,6 +192,7 @@
   product(uintx, G1ReservePercent, 10,                                      \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
+          range(0, 100)                                                     \
                                                                             \
   diagnostic(bool, G1PrintHeapRegions, false,                               \
           "If set G1 will print information on which regions are being "    \
@@ -238,22 +248,27 @@
           "The number of times we'll force an overflow during "             \
           "concurrent marking")                                             \
                                                                             \
+  experimental(uintx, G1MaxNewSizePercent, 60,                              \
+          "Percentage (0-100) of the heap size to use as default "          \
+          " maximum young gen size.")                                       \
+          range(0, 100)                                                     \
+          constraint(G1MaxNewSizePercentConstraintFunc)                     \
+                                                                            \
   experimental(uintx, G1NewSizePercent, 5,                                  \
           "Percentage (0-100) of the heap size to use as default "          \
           "minimum young gen size.")                                        \
-                                                                            \
-  experimental(uintx, G1MaxNewSizePercent, 60,                              \
-          "Percentage (0-100) of the heap size to use as default "          \
-          " maximum young gen size.")                                       \
+          constraint(G1NewSizePercentConstraintFunc)                        \
                                                                             \
   experimental(uintx, G1MixedGCLiveThresholdPercent, 85,                    \
           "Threshold for regions to be considered for inclusion in the "    \
           "collection set of mixed GCs. "                                   \
           "Regions with live bytes exceeding this will not be collected.")  \
+          range(0, 100)                                                     \
                                                                             \
   product(uintx, G1HeapWastePercent, 5,                                     \
           "Amount of space, expressed as a percentage of the heap size, "   \
           "that G1 is willing not to collect to avoid expensive GCs.")      \
+          range(0, 100)                                                     \
                                                                             \
   product(uintx, G1MixedGCCountTarget, 8,                                   \
           "The target number of mixed GCs after a marking cycle.")          \
@@ -272,6 +287,7 @@
   experimental(uintx, G1OldCSetRegionThresholdPercent, 10,                  \
           "An upper bound for the number of old CSet regions expressed "    \
           "as a percentage of the heap size.")                              \
+          range(0, 100)                                                     \
                                                                             \
   experimental(ccstr, G1LogLevel, NULL,                                     \
           "Log level for G1 logging: fine, finer, finest")                  \
@@ -314,6 +330,16 @@
   develop(bool, G1VerifyBitmaps, false,                                     \
           "Verifies the consistency of the marking bitmaps")
 
-G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
+G1_FLAGS(DECLARE_DEVELOPER_FLAG, \
+         DECLARE_PD_DEVELOPER_FLAG, \
+         DECLARE_PRODUCT_FLAG, \
+         DECLARE_PD_PRODUCT_FLAG, \
+         DECLARE_DIAGNOSTIC_FLAG, \
+         DECLARE_EXPERIMENTAL_FLAG, \
+         DECLARE_NOTPRODUCT_FLAG, \
+         DECLARE_MANAGEABLE_FLAG, \
+         DECLARE_PRODUCT_RW_FLAG, \
+         IGNORE_RANGE, \
+         IGNORE_CONSTRAINT)
 
 #endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP
--- a/src/share/vm/gc/g1/heapRegion.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegion.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -103,6 +103,10 @@
   return HeapRegionBounds::max_size();
 }
 
+size_t HeapRegion::min_region_size_in_words() {
+  return HeapRegionBounds::min_size() >> LogHeapWordSize;
+}
+
 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   size_t region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
@@ -711,12 +715,12 @@
         _n_failures++;
       }
 
-      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
+      if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
         HeapRegion* to   = _g1h->heap_region_containing(obj);
         if (from != NULL && to != NULL &&
             from != to &&
-            !to->is_humongous()) {
+            !to->is_pinned()) {
           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
           jbyte cv_field = *_bs->byte_for_const(p);
           const jbyte dirty = CardTableModRefBS::dirty_card_val();
--- a/src/share/vm/gc/g1/heapRegion.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegion.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -331,6 +331,7 @@
   }
 
   static size_t max_region_size();
+  static size_t min_region_size_in_words();
 
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
@@ -417,6 +418,15 @@
 
   bool is_old() const { return _type.is_old(); }
 
+  // A pinned region contains objects which are not moved by garbage collections.
+  // Humongous regions and archive regions are pinned.
+  bool is_pinned() const { return _type.is_pinned(); }
+
+  // An archive region is a pinned region, also tagged as old, which
+  // should not be marked during mark/sweep. This allows the address
+  // space to be shared by JVM instances.
+  bool is_archive() const { return _type.is_archive(); }
+
   // For a humongous region, region in which it starts.
   HeapRegion* humongous_start_region() const {
     return _humongous_start_region;
@@ -670,6 +680,8 @@
 
   void set_old() { _type.set_old(); }
 
+  void set_archive() { _type.set_archive(); }
+
   // Determine if an object has been allocated since the last
   // mark performed by the collector. This returns true iff the object
   // is within the unmarked area of the region.
--- a/src/share/vm/gc/g1/heapRegionManager.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegionManager.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -278,6 +278,55 @@
   return num_regions;
 }
 
+uint HeapRegionManager::find_highest_free(bool* expanded) {
+  // Loop downwards from the highest region index, looking for an
+  // entry which is either free or not yet committed.  If not yet
+  // committed, expand_at that index.
+  uint curr = max_length() - 1;
+  while (true) {
+    HeapRegion *hr = _regions.get_by_index(curr);
+    if (hr == NULL) {
+      uint res = expand_at(curr, 1);
+      if (res == 1) {
+        *expanded = true;
+        return curr;
+      }
+    } else {
+      if (hr->is_free()) {
+        *expanded = false;
+        return curr;
+      }
+    }
+    if (curr == 0) {
+      return G1_NO_HRM_INDEX;
+    }
+    curr--;
+  }
+}
+
+bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
+  size_t commits = 0;
+  uint start_index = (uint)_regions.get_index_by_address(range.start());
+  uint last_index = (uint)_regions.get_index_by_address(range.last());
+
+  // Ensure that each G1 region in the range is free, returning false if not.
+  // Commit those that are not yet available, and keep count.
+  for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
+    if (!is_available(curr_index)) {
+      commits++;
+      expand_at(curr_index, 1);
+    }
+    HeapRegion* curr_region  = _regions.get_by_index(curr_index);
+    if (!curr_region->is_free()) {
+      return false;
+    }
+  }
+
+  allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
+  *commit_count = commits;
+  return true;
+}
+
 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 
--- a/src/share/vm/gc/g1/heapRegionManager.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegionManager.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -221,6 +221,16 @@
 
   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
+  // Find the highest free or uncommitted region in the reserved heap,
+  // and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
+  // Set the 'expanded' boolean true if a new region was committed.
+  uint find_highest_free(bool* expanded);
+
+  // Allocate the regions that contain the address range specified, committing the
+  // regions if necessary. Return false if any of the regions is already committed
+  // and not free, and return the number of regions newly committed in commit_count.
+  bool allocate_containing_regions(MemRegion range, size_t* commit_count);
+
   // Apply blk->doHeapRegion() on all committed regions in address order,
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
--- a/src/share/vm/gc/g1/heapRegionRemSet.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegionRemSet.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -817,7 +817,7 @@
 // This can be done by either mutator threads together with the
 // concurrent refinement threads or GC threads.
 uint HeapRegionRemSet::num_par_rem_sets() {
-  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
+  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
 }
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
--- a/src/share/vm/gc/g1/heapRegionSet.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegionSet.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -42,7 +42,8 @@
   assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
   assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
   assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
-  assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
+  assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
+         err_msg("Empty region %u is not free or archive for set %s", hr->hrm_index(), name()));
   assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
 }
 #endif
--- a/src/share/vm/gc/g1/heapRegionType.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegionType.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -33,6 +33,7 @@
     case StartsHumongousTag:
     case ContinuesHumongousTag:
     case OldTag:
+    case ArchiveTag:
       return true;
   }
   return false;
@@ -47,6 +48,7 @@
     case StartsHumongousTag:    return "HUMS";
     case ContinuesHumongousTag: return "HUMC";
     case OldTag:                return "OLD";
+    case ArchiveTag:            return "ARC";
   }
   ShouldNotReachHere();
   // keep some compilers happy
@@ -62,6 +64,7 @@
     case StartsHumongousTag:    return "HS";
     case ContinuesHumongousTag: return "HC";
     case OldTag:                return "O";
+    case ArchiveTag:            return "A";
   }
   ShouldNotReachHere();
   // keep some compilers happy
--- a/src/share/vm/gc/g1/heapRegionType.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/g1/heapRegionType.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -44,15 +44,18 @@
   //
   // 0000 0 [ 0] Free
   //
-  // 0001 0      Young Mask
+  // 0001 0 [ 2] Young Mask
   // 0001 0 [ 2] Eden
   // 0001 1 [ 3] Survivor
   //
-  // 0010 0      Humongous Mask
-  // 0010 0 [ 4] Starts Humongous
-  // 0010 1 [ 5] Continues Humongous
+  // 0010 0 [ 4] Humongous Mask
+  // 0100 0 [ 8] Pinned Mask
+  // 0110 0 [12] Starts Humongous
+  // 0110 1 [13] Continues Humongous
   //
-  // 01000 [ 8] Old
+  // 1000 0 [16] Old Mask
+  //
+  // 1100 0 [24] Archive
   typedef enum {
     FreeTag               = 0,
 
@@ -61,10 +64,14 @@
     SurvTag               = YoungMask + 1,
 
     HumongousMask         = 4,
-    StartsHumongousTag    = HumongousMask,
-    ContinuesHumongousTag = HumongousMask + 1,
+    PinnedMask            = 8,
+    StartsHumongousTag    = HumongousMask | PinnedMask,
+    ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
 
-    OldTag                = 8
+    OldMask               = 16,
+    OldTag                = OldMask,
+
+    ArchiveTag            = PinnedMask | OldMask
   } Tag;
 
   volatile Tag _tag;
@@ -108,7 +115,13 @@
   bool is_starts_humongous()    const { return get() == StartsHumongousTag;    }
   bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
 
-  bool is_old() const { return get() == OldTag; }
+  bool is_archive() const { return get() == ArchiveTag; }
+
+  // is_old regions may or may not also be pinned
+  bool is_old() const { return (get() & OldMask) != 0; }
+
+  // is_pinned regions may be archive or humongous
+  bool is_pinned() const { return (get() & PinnedMask) != 0; }
 
   // Setters
 
@@ -123,6 +136,8 @@
 
   void set_old() { set(OldTag); }
 
+  void set_archive() { set_from(ArchiveTag, FreeTag); }
+
   // Misc
 
   const char* get_str() const;
--- a/src/share/vm/gc/parallel/gcTaskManager.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/gcTaskManager.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -484,12 +484,12 @@
 
   assert(!all_workers_active() || active_workers() == ParallelGCThreads,
          err_msg("all_workers_active() is  incorrect: "
-                 "active %d  ParallelGCThreads " UINTX_FORMAT, active_workers(),
+                 "active %d  ParallelGCThreads %u", active_workers(),
                  ParallelGCThreads));
   if (TraceDynamicGCThreads) {
     gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
                            "all_workers_active()  %d  workers %d  "
-                           "active  %d  ParallelGCThreads " UINTX_FORMAT,
+                           "active  %d  ParallelGCThreads %u",
                            all_workers_active(), workers(),  active_workers(),
                            ParallelGCThreads);
   }
--- a/src/share/vm/gc/parallel/pcTasks.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/pcTasks.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -255,7 +255,7 @@
     which_stack_index = which;
     assert(manager->active_workers() == ParallelGCThreads,
            err_msg("all_workers_active has been incorrectly set: "
-                   " active %d  ParallelGCThreads " UINTX_FORMAT, manager->active_workers(),
+                   " active %d  ParallelGCThreads %u", manager->active_workers(),
                    ParallelGCThreads));
   } else {
     which_stack_index = ParCompactionManager::pop_recycled_stack_index();
@@ -334,7 +334,7 @@
     which_stack_index = which;
     assert(manager->active_workers() == ParallelGCThreads,
            err_msg("all_workers_active has been incorrectly set: "
-                   " active %d  ParallelGCThreads " UINTX_FORMAT, manager->active_workers(),
+                   " active %d  ParallelGCThreads %u", manager->active_workers(),
                    ParallelGCThreads));
   } else {
     which_stack_index = stack_index();
--- a/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -1304,7 +1304,7 @@
     size_t survived_guess = survived + promoted;
     _avg_survived->sample(survived_guess);
   }
-  avg_promoted()->sample(promoted + _avg_pretenured->padded_average());
+  avg_promoted()->sample(promoted);
 
   if (PrintAdaptiveSizePolicy) {
     gclog_or_tty->print_cr(
--- a/src/share/vm/gc/parallel/psCompactionManager.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psCompactionManager.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -170,8 +170,8 @@
 }
 
 ParCompactionManager*
-ParCompactionManager::gc_thread_compaction_manager(int index) {
-  assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
+ParCompactionManager::gc_thread_compaction_manager(uint index) {
+  assert(index < ParallelGCThreads, "index out of range");
   assert(_manager_array != NULL, "Sanity");
   return _manager_array[index];
 }
--- a/src/share/vm/gc/parallel/psCompactionManager.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psCompactionManager.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -133,7 +133,7 @@
   RegionTaskQueue* region_stack()                { return _region_stack; }
   void set_region_stack(RegionTaskQueue* v)       { _region_stack = v; }
 
-  inline static ParCompactionManager* manager_array(int index);
+  inline static ParCompactionManager* manager_array(uint index);
 
   inline static RegionTaskQueue* region_list(int index) {
     return _region_list[index];
@@ -177,7 +177,7 @@
   void follow_class_loader(ClassLoaderData* klass);
 
   // Access function for compaction managers
-  static ParCompactionManager* gc_thread_compaction_manager(int index);
+  static ParCompactionManager* gc_thread_compaction_manager(uint index);
 
   static bool steal(int queue_num, int* seed, oop& t);
   static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t);
@@ -229,10 +229,9 @@
   };
 };
 
-inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
+inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
-  assert(index >= 0 && index <= (int)ParallelGCThreads,
-    "out of range manager_array access");
+  assert(index <= ParallelGCThreads, "out of range manager_array access");
   return _manager_array[index];
 }
 
--- a/src/share/vm/gc/parallel/psOldGen.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psOldGen.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -199,7 +199,7 @@
   // Allocations in the old generation need to be reported
   if (res != NULL) {
     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-    heap->size_policy()->tenured_allocation(word_size);
+    heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
   }
 
   return res;
--- a/src/share/vm/gc/parallel/psParallelCompact.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -832,10 +832,10 @@
   _ref_processor =
     new ReferenceProcessor(mr,            // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                           (uint) ParallelGCThreads, // mt processing degree
-                           true,          // mt discovery
-                           (uint) ParallelGCThreads, // mt discovery degree
-                           true,          // atomic_discovery
+                           ParallelGCThreads, // mt processing degree
+                           true,              // mt discovery
+                           ParallelGCThreads, // mt discovery degree
+                           true,              // atomic_discovery
                            &_is_alive_closure); // non-header is alive closure
   _counters = new CollectorCounters("PSParallelCompact", 1);
 
--- a/src/share/vm/gc/parallel/psPromotionManager.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psPromotionManager.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -75,8 +75,8 @@
   return PSScavenge::should_scavenge(p, check_to_space);
 }
 
-PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
-  assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
+PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) {
+  assert(index < ParallelGCThreads, "index out of range");
   assert(_manager_array != NULL, "Sanity");
   return &_manager_array[index];
 }
--- a/src/share/vm/gc/parallel/psPromotionManager.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psPromotionManager.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -90,7 +90,7 @@
   static PSOldGen* old_gen()         { return _old_gen; }
   static MutableSpace* young_space() { return _young_space; }
 
-  inline static PSPromotionManager* manager_array(int index);
+  inline static PSPromotionManager* manager_array(uint index);
   template <class T> inline void claim_or_forward_internal_depth(T* p);
 
   // On the task queues we push reference locations as well as
@@ -154,7 +154,7 @@
   static void pre_scavenge();
   static bool post_scavenge(YoungGCTracer& gc_tracer);
 
-  static PSPromotionManager* gc_thread_promotion_manager(int index);
+  static PSPromotionManager* gc_thread_promotion_manager(uint index);
   static PSPromotionManager* vm_thread_promotion_manager();
 
   static bool steal_depth(int queue_num, int* seed, StarTask& t);
--- a/src/share/vm/gc/parallel/psPromotionManager.inline.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psPromotionManager.inline.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -33,9 +33,9 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "oops/oop.inline.hpp"
 
-inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
+inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
-  assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
+  assert(index <= ParallelGCThreads, "out of range manager_array access");
   return &_manager_array[index];
 }
 
--- a/src/share/vm/gc/parallel/psScavenge.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/parallel/psScavenge.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -845,9 +845,9 @@
   _ref_processor =
     new ReferenceProcessor(mr,                         // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                           (uint) ParallelGCThreads,   // mt processing degree
+                           ParallelGCThreads,          // mt processing degree
                            true,                       // mt discovery
-                           (uint) ParallelGCThreads,   // mt discovery degree
+                           ParallelGCThreads,          // mt discovery degree
                            true,                       // atomic_discovery
                            NULL);                      // header provides liveness info
 
--- a/src/share/vm/gc/serial/defNewGeneration.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -58,11 +58,13 @@
 
 // Methods of protected closure types.
 
-DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
-  assert(g->level() == 0, "Optimized for youngest gen.");
+DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
+  assert(_young_gen->kind() == Generation::ParNew ||
+         _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
 }
+
 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
-  return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
+  return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
 }
 
 DefNewGeneration::KeepAliveClosure::
@@ -85,39 +87,38 @@
 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
 
 DefNewGeneration::EvacuateFollowersClosure::
-EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
-                         ScanClosure* cur, ScanClosure* older) :
-  _gch(gch), _level(level),
-  _scan_cur_or_nonheap(cur), _scan_older(older)
+EvacuateFollowersClosure(GenCollectedHeap* gch,
+                         ScanClosure* cur,
+                         ScanClosure* older) :
+  _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 {}
 
 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
   do {
-    _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
-                                       _scan_older);
-  } while (!_gch->no_allocs_since_save_marks(_level));
+    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
+  } while (!_gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen));
 }
 
 DefNewGeneration::FastEvacuateFollowersClosure::
-FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
-                             DefNewGeneration* gen,
-                             FastScanClosure* cur, FastScanClosure* older) :
-  _gch(gch), _level(level), _gen(gen),
-  _scan_cur_or_nonheap(cur), _scan_older(older)
-{}
+FastEvacuateFollowersClosure(GenCollectedHeap* gch,
+                             FastScanClosure* cur,
+                             FastScanClosure* older) :
+  _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
+{
+  assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
+  _gen = (DefNewGeneration*)_gch->young_gen();
+}
 
 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
   do {
-    _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
-                                       _scan_older);
-  } while (!_gch->no_allocs_since_save_marks(_level));
+    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
+  } while (!_gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen));
   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 }
 
 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 {
-  assert(_g->level() == 0, "Optimized for youngest generation");
   _boundary = _g->reserved().end();
 }
 
@@ -127,7 +128,6 @@
 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 {
-  assert(_g->level() == 0, "Optimized for youngest generation");
   _boundary = _g->reserved().end();
 }
 
@@ -168,7 +168,6 @@
 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
   _g(g)
 {
-  assert(_g->level() == 0, "Optimized for youngest generation");
   _boundary = _g->reserved().end();
 }
 
@@ -186,9 +185,8 @@
 
 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
                                    size_t initial_size,
-                                   int level,
                                    const char* policy)
-  : Generation(rs, initial_size, level),
+  : Generation(rs, initial_size),
     _promo_failure_drain_in_progress(false),
     _should_allocate_from_space(false)
 {
@@ -372,22 +370,18 @@
   return success;
 }
 
-
 void DefNewGeneration::compute_new_size() {
-  // This is called after a gc that includes the following generation
-  // (which is required to exist.)  So from-space will normally be empty.
+  // This is called after a GC that includes the old generation, so from-space
+  // will normally be empty.
   // Note that we check both spaces, since if scavenge failed they revert roles.
-  // If not we bail out (otherwise we would have to relocate the objects)
+  // If not we bail out (otherwise we would have to relocate the objects).
   if (!from()->is_empty() || !to()->is_empty()) {
     return;
   }
 
-  int next_level = level() + 1;
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(next_level == 1, "DefNewGeneration must be a young gen");
 
-  Generation* old_gen = gch->old_gen();
-  size_t old_size = old_gen->capacity();
+  size_t old_size = gch->old_gen()->capacity();
   size_t new_size_before = _virtual_space.committed_size();
   size_t min_new_size = spec()->init_size();
   size_t max_new_size = reserved().byte_size();
@@ -603,7 +597,7 @@
 
   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 
-  assert(gch->no_allocs_since_save_marks(0),
+  assert(gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen),
          "save marks have not been newly set.");
 
   // Not very pretty.
@@ -619,11 +613,11 @@
                                            false);
 
   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
-  FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
+  FastEvacuateFollowersClosure evacuate_followers(gch,
                                                   &fsc_with_no_gc_barrier,
                                                   &fsc_with_gc_barrier);
 
-  assert(gch->no_allocs_since_save_marks(0),
+  assert(gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen),
          "save marks have not been newly set.");
 
   {
@@ -633,7 +627,7 @@
     StrongRootsScope srs(0);
 
     gch->gen_process_roots(&srs,
-                           _level,
+                           GenCollectedHeap::YoungGen,
                            true,  // Process younger gens, if any,
                                   // as strong roots.
                            GenCollectedHeap::SO_ScavengeCodeCache,
@@ -870,8 +864,10 @@
 
 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
                                          size_t max_alloc_words) {
-  if (requestor == this || _promotion_failed) return;
-  assert(requestor->level() > level(), "DefNewGeneration must be youngest");
+  if (requestor == this || _promotion_failed) {
+    return;
+  }
+  assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
 
   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
   if (to_space->top() > to_space->bottom()) {
--- a/src/share/vm/gc/serial/defNewGeneration.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/defNewGeneration.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -154,9 +154,9 @@
 
  public:  // was "protected" but caused compile error on win32
   class IsAliveClosure: public BoolObjectClosure {
-    Generation* _g;
+    Generation* _young_gen;
   public:
-    IsAliveClosure(Generation* g);
+    IsAliveClosure(Generation* young_gen);
     bool do_object_b(oop p);
   };
 
@@ -183,31 +183,28 @@
 
   class EvacuateFollowersClosure: public VoidClosure {
     GenCollectedHeap* _gch;
-    int _level;
     ScanClosure* _scan_cur_or_nonheap;
     ScanClosure* _scan_older;
   public:
-    EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
+    EvacuateFollowersClosure(GenCollectedHeap* gch,
                              ScanClosure* cur, ScanClosure* older);
     void do_void();
   };
 
   class FastEvacuateFollowersClosure: public VoidClosure {
     GenCollectedHeap* _gch;
-    int _level;
     DefNewGeneration* _gen;
     FastScanClosure* _scan_cur_or_nonheap;
     FastScanClosure* _scan_older;
   public:
-    FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
-                                 DefNewGeneration* gen,
+    FastEvacuateFollowersClosure(GenCollectedHeap* gch,
                                  FastScanClosure* cur,
                                  FastScanClosure* older);
     void do_void();
   };
 
  public:
-  DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
+  DefNewGeneration(ReservedSpace rs, size_t initial_byte_size,
                    const char* policy="Copy");
 
   virtual void ref_processor_init();
--- a/src/share/vm/gc/serial/genMarkSweep.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/genMarkSweep.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -36,6 +36,7 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/generation.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/modRefBarrierSet.hpp"
 #include "gc/shared/referencePolicy.hpp"
@@ -53,8 +54,7 @@
 #include "utilities/events.hpp"
 #include "utilities/stack.inline.hpp"
 
-void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
-  guarantee(level == 1, "We always collect both old and young.");
+void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -87,11 +87,11 @@
   // Capture used regions for each generation that will be
   // subject to collection, so that card table adjustments can
   // be made intelligently (see clear / invalidate further below).
-  gch->save_used_regions(level);
+  gch->save_used_regions();
 
   allocate_stacks();
 
-  mark_sweep_phase1(level, clear_all_softrefs);
+  mark_sweep_phase1(clear_all_softrefs);
 
   mark_sweep_phase2();
 
@@ -99,7 +99,7 @@
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
 
-  mark_sweep_phase3(level);
+  mark_sweep_phase3();
 
   mark_sweep_phase4();
 
@@ -184,8 +184,7 @@
   _objarray_stack.clear(true);
 }
 
-void GenMarkSweep::mark_sweep_phase1(int level,
-                                     bool clear_all_softrefs) {
+void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
   GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 
@@ -195,7 +194,6 @@
   // use OopsInGenClosure constructor which takes a generation,
   // as the Universe has not been created when the static constructors
   // are run.
-  assert(level == 1, "We don't use mark-sweep on young generations");
   follow_root_closure.set_orig_generation(gch->old_gen());
 
   // Need new claim bits before marking starts.
@@ -205,10 +203,10 @@
     StrongRootsScope srs(1);
 
     gch->gen_process_roots(&srs,
-                           level,
+                           GenCollectedHeap::OldGen,
                            false, // Younger gens are not roots.
                            GenCollectedHeap::SO_None,
-                           GenCollectedHeap::StrongRootsOnly,
+                           ClassUnloading,
                            &follow_root_closure,
                            &follow_root_closure,
                            &follow_cld_closure);
@@ -273,7 +271,7 @@
   }
 };
 
-void GenMarkSweep::mark_sweep_phase3(int level) {
+void GenMarkSweep::mark_sweep_phase3() {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
@@ -286,14 +284,13 @@
   // use OopsInGenClosure constructor which takes a generation,
   // as the Universe has not been created when the static constructors
   // are run.
-  assert(level == 1, "We don't use mark-sweep on young generations.");
   adjust_pointer_closure.set_orig_generation(gch->old_gen());
 
   {
     StrongRootsScope srs(1);
 
     gch->gen_process_roots(&srs,
-                           level,
+                           GenCollectedHeap::OldGen,
                            false, // Younger gens are not roots.
                            GenCollectedHeap::SO_AllCodeCache,
                            GenCollectedHeap::StrongAndWeakRoots,
--- a/src/share/vm/gc/serial/genMarkSweep.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/genMarkSweep.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -31,17 +31,16 @@
   friend class VM_MarkSweep;
   friend class G1MarkSweep;
  public:
-  static void invoke_at_safepoint(int level, ReferenceProcessor* rp,
-                                  bool clear_all_softrefs);
+  static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs);
 
  private:
 
   // Mark live objects
-  static void mark_sweep_phase1(int level, bool clear_all_softrefs);
+  static void mark_sweep_phase1(bool clear_all_softrefs);
   // Calculate new addresses
   static void mark_sweep_phase2();
   // Update pointers
-  static void mark_sweep_phase3(int level);
+  static void mark_sweep_phase3();
   // Move objects to new positions
   static void mark_sweep_phase4();
 
--- a/src/share/vm/gc/serial/markSweep.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/markSweep.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -313,7 +313,7 @@
 
 MarkSweep::IsAliveClosure   MarkSweep::is_alive;
 
-bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
+bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked() || is_archive_object(p); }
 
 MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
 
--- a/src/share/vm/gc/serial/markSweep.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/markSweep.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -147,6 +147,9 @@
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
+  // Archive Object handling
+  static inline bool is_archive_object(oop object);
+
   static STWGCTimer* gc_timer() { return _gc_timer; }
   static SerialOldTracer* gc_tracer() { return _gc_tracer; }
 
--- a/src/share/vm/gc/serial/markSweep.inline.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/markSweep.inline.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -37,6 +37,7 @@
 #include "utilities/stack.inline.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/g1MarkSweep.hpp"
 #endif // INCLUDE_ALL_GCS
 
 inline void MarkSweep::mark_object(oop obj) {
@@ -57,6 +58,15 @@
   }
 }
 
+inline bool MarkSweep::is_archive_object(oop object) {
+#if INCLUDE_ALL_GCS
+  return (G1MarkSweep::archive_check_enabled() &&
+          G1MarkSweep::in_archive_range(object));
+#else
+  return false;
+#endif
+}
+
 inline void MarkSweep::follow_klass(Klass* klass) {
   oop op = klass->klass_holder();
   MarkSweep::mark_and_push(&op);
@@ -74,7 +84,8 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked()) {
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
       mark_object(obj);
       follow_object(obj);
     }
@@ -87,7 +98,8 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked()) {
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
       mark_object(obj);
       _marking_stack.push(obj);
     }
@@ -111,15 +123,18 @@
     assert(Universe::heap()->is_in(obj), "should be in heap");
 
     oop new_obj = oop(obj->mark()->decode_pointer());
-    assert(new_obj != NULL ||                         // is forwarding ptr?
+    assert(is_archive_object(obj) ||                  // no forwarding of archive objects
+           new_obj != NULL ||                         // is forwarding ptr?
            obj->mark() == markOopDesc::prototype() || // not gc marked?
            (UseBiasedLocking && obj->mark()->has_bias_pattern()),
-                                                      // not gc marked?
+           // not gc marked?
            "should be forwarded");
     if (new_obj != NULL) {
-      assert(Universe::heap()->is_in_reserved(new_obj),
-             "should be in object space");
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      if (!is_archive_object(obj)) {
+        assert(Universe::heap()->is_in_reserved(new_obj),
+              "should be in object space");
+        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      }
     }
   }
 }
--- a/src/share/vm/gc/serial/tenuredGeneration.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/tenuredGeneration.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -41,9 +41,9 @@
 #endif
 
 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
-                                     size_t initial_byte_size, int level,
+                                     size_t initial_byte_size,
                                      GenRemSet* remset) :
-  CardGeneration(rs, initial_byte_size, level, remset)
+  CardGeneration(rs, initial_byte_size, remset)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   HeapWord* end    = (HeapWord*) _virtual_space.high();
@@ -134,11 +134,12 @@
          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
 }
 
-void TenuredGeneration::update_gc_stats(int current_level,
+void TenuredGeneration::update_gc_stats(Generation* current_generation,
                                         bool full) {
-  // If the next lower level(s) has been collected, gather any statistics
+  // If the young generation has been collected, gather any statistics
   // that are of interest at this point.
-  if (!full && (current_level + 1) == level()) {
+  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+  if (!full && current_is_young) {
     // Calculate size of data promoted from the younger generations
     // before doing the collection.
     size_t used_before_gc = used();
@@ -192,7 +193,7 @@
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
-  GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
 
   gc_timer->register_gc_end();
 
--- a/src/share/vm/gc/serial/tenuredGeneration.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/serial/tenuredGeneration.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -55,8 +55,9 @@
 
   void assert_correct_size_change_locking();
  public:
-  TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
-                               int level, GenRemSet* remset);
+  TenuredGeneration(ReservedSpace rs,
+                    size_t initial_byte_size,
+                    GenRemSet* remset);
 
   Generation::Name kind() { return Generation::MarkSweepCompact; }
 
@@ -120,7 +121,7 @@
 
   // Statistics
 
-  virtual void update_gc_stats(int level, bool full);
+  virtual void update_gc_stats(Generation* current_generation, bool full);
 
   virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
 
--- a/src/share/vm/gc/shared/cardGeneration.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/shared/cardGeneration.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -35,10 +35,10 @@
 #include "memory/memRegion.hpp"
 #include "runtime/java.hpp"
 
-CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
-                               int level,
+CardGeneration::CardGeneration(ReservedSpace rs,
+                               size_t initial_byte_size,
                                GenRemSet* remset) :
-  Generation(rs, initial_byte_size, level), _rs(remset),
+  Generation(rs, initial_byte_size), _rs(remset),
   _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
   _used_at_prologue()
 {
--- a/src/share/vm/gc/shared/cardGeneration.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/shared/cardGeneration.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -52,8 +52,7 @@
   size_t _capacity_at_prologue;
   size_t _used_at_prologue;
 
-  CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
-                 GenRemSet* remset);
+  CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset);
 
   virtual void assert_correct_size_change_locking() = 0;
 
--- a/src/share/vm/gc/shared/cardTableRS.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/shared/cardTableRS.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -104,7 +104,9 @@
 void CardTableRS::younger_refs_iterate(Generation* g,
                                        OopsInGenClosure* blk,
                                        uint n_threads) {
-  _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
+  // The indexing in this array is slightly odd. We want to access
+  // the old generation record here, which is at index 2.
+  _last_cur_val_in_gen[2] = cur_youngergen_card_val();
   g->younger_refs_iterate(blk, n_threads);
 }
 
@@ -300,7 +302,8 @@
 }
 
 void CardTableRS::clear_into_younger(Generation* old_gen) {
-  assert(old_gen->level() == 1, "Should only be called for the old generation");
+  assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
+         "Should only be called for the old generation");
   // The card tables for the youngest gen need never be cleared.
   // There's a bit of subtlety in the clear() and invalidate()
   // methods that we exploit here and in invalidate_or_clear()
@@ -311,7 +314,8 @@
 }
 
 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
-  assert(old_gen->level() == 1, "Should only be called for the old generation");
+  assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
+         "Should only be called for the old generation");
   // Invalidate the cards for the currently occupied part of
   // the old generation and clear the cards for the
   // unoccupied part of the generation (if any, making use
@@ -377,7 +381,9 @@
   VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
   void do_generation(Generation* gen) {
     // Skip the youngest generation.
-    if (gen->level() == 0) return;
+    if (GenCollectedHeap::heap()->is_young_gen(gen)) {
+      return;
+    }
     // Normally, we're interested in pointers to younger generations.
     VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
     gen->space_iterate(&blk, true);
--- a/src/share/vm/gc/shared/cardTableRS.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/shared/cardTableRS.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -76,9 +76,8 @@
 
   // An array that contains, for each generation, the card table value last
   // used as the current value for a younger_refs_do iteration of that
-  // portion of the table.  (The perm gen is index 0; other gens are at
-  // their level plus 1.  They youngest gen is in the table, but will
-  // always have the value "clean_card".)
+  // portion of the table. The perm gen is index 0. The young gen is index 1,
+  // but will always have the value "clean_card". The old gen is index 2.
   jbyte* _last_cur_val_in_gen;
 
   jbyte _cur_youngergen_card_val;
--- a/src/share/vm/gc/shared/collectedHeap.hpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/shared/collectedHeap.hpp	Thu Jun 25 09:48:50 2015 -0700
@@ -88,9 +88,6 @@
   static int       _fire_out_of_memory_count;
 #endif
 
-  // Used for filler objects (static, but initialized in ctor).
-  static size_t _filler_array_max_size;
-
   GCHeapLog* _gc_heap_log;
 
   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
@@ -102,6 +99,9 @@
   BarrierSet* _barrier_set;
   bool _is_gc_active;
 
+  // Used for filler objects (static, but initialized in ctor).
+  static size_t _filler_array_max_size;
+
   unsigned int _total_collections;          // ... started
   unsigned int _total_full_collections;     // ... started
   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
--- a/src/share/vm/gc/shared/collectorPolicy.cpp	Wed Jun 24 09:13:12 2015 +0200
+++ b/src/share/vm/gc/shared/collectorPolicy.cpp	Thu Jun 25 09:48:50 2015 -0700
@@ -746,11 +746,11 @@
     return result;   // Could be null if we are out of space.
   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
     // Do an incremental collection.
-    gch->do_collection(false            /* full */,
-                       false            /* clear_all_soft_refs */,
-                       size             /* size */,
-                       is_tlab          /* is_tlab */,
-                       number_of_generations() - 1 /* max_level */);
+    gch->do_collection(false,                     // full
+                       false,                     // clear_all_soft_refs
+                       size,                      // size
+                       is_tlab,                   // is_tlab
+                       GenCollectedHeap::OldGen); // max_generation
   } else {
     if (Verbose && PrintGCDetails) {
       gclog_or_tty->print(" :: Trying full because partial may fail :: ");
@@ -759,11 +759,11 @@
     // for the original code and why this has been simplified
     // with from-space allocation criteria modified and
     // such allocation moved out of the safepoint path.
-    gch->do_collection(true             /* full */,
-                       false            /* clear_all_soft_refs */,
-                       size             /* size */,
-                       is_tlab          /* is_tlab */,
-                       number_of_generations() - 1 /* max_level */);
+    gch->do_collection(true,                      // full
+                       false,                     // clear_all_soft_refs
+                       size,                      // size
+                       is_tlab,                   // is_tlab
+                       GenCollectedHeap::OldGen); // max_generation
   }
 
   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);