changeset 5435:d840f02d03b4

Merge
author chegar
date Mon, 15 Jul 2013 11:07:03 +0100
parents 5c599c419c1d 1a3390aa8326
children 7ec210434b3c
files src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp
diffstat 248 files changed, 5696 insertions(+), 2993 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Jul 11 12:59:03 2013 -0400
+++ b/.hgtags	Mon Jul 15 11:07:03 2013 +0100
@@ -351,3 +351,10 @@
 3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36
 1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94
 69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37
+5d65c078cd0ac455aa5e58a09844c7acce54b487 jdk8-b95
+2cc5a9d1ba66dfdff578918b393c727bd9450210 hs25-b38
+e6a4b8c71fa6f225bd989a34de2d0d0a656a8be8 jdk8-b96
+2b9380b0bf0b649f40704735773e8956c2d88ba0 hs25-b39
+d197d377ab2e016d024e8c86cb06a57bd7eae590 jdk8-b97
+c9dd82da51ed34a28f7c6b3245163ee962e94572 hs25-b40
+30b5b75c42ac5174b640fbef8aa87527668e8400 jdk8-b98
--- a/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java	Mon Jul 15 11:07:03 2013 +0100
@@ -31,13 +31,19 @@
 import java.util.*;
 
 public class CLHSDB {
+
+    public CLHSDB(JVMDebugger d) {
+        jvmDebugger = d;
+    }
+
     public static void main(String[] args) {
         new CLHSDB(args).run();
     }
 
-    private void run() {
-        // At this point, if pidText != null we are supposed to attach to it.
-        // Else, if execPath != null, it is the path of a jdk/bin/java
+    public void run() {
+        // If jvmDebugger is already set, we have been given a JVMDebugger.
+        // Otherwise, if pidText != null we are supposed to attach to it.
+        // Finally, if execPath != null, it is the path of a jdk/bin/java
         // and coreFilename is the pathname of a core file we are
         // supposed to attach to.
 
@@ -49,7 +55,9 @@
                 }
             });
 
-        if (pidText != null) {
+        if (jvmDebugger != null) {
+            attachDebugger(jvmDebugger);
+        } else if (pidText != null) {
             attachDebugger(pidText);
         } else if (execPath != null) {
             attachDebugger(execPath, coreFilename);
@@ -96,6 +104,7 @@
     // Internals only below this point
     //
     private HotSpotAgent agent;
+    private JVMDebugger jvmDebugger;
     private boolean      attached;
     // These had to be made data members because they are referenced in inner classes.
     private String pidText;
@@ -120,7 +129,7 @@
         case (1):
             if (args[0].equals("help") || args[0].equals("-help")) {
                 doUsage();
-                System.exit(0);
+                return;
             }
             // If all numbers, it is a PID to attach to
             // Else, it is a pathname to a .../bin/java for a core file.
@@ -142,10 +151,15 @@
         default:
             System.out.println("HSDB Error: Too many options specified");
             doUsage();
-            System.exit(1);
+            return;
         }
     }
 
+    private void attachDebugger(JVMDebugger d) {
+        agent.attach(d);
+        attached = true;
+     }
+
     /** NOTE we are in a different thread here than either the main
         thread or the Swing/AWT event handler thread, so we must be very
         careful when creating or removing widgets */
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Mon Jul 15 11:07:03 2013 +0100
@@ -101,6 +101,9 @@
 import sun.jvm.hotspot.utilities.soql.JSJavaScriptEngine;
 
 public class CommandProcessor {
+
+    volatile boolean quit;
+
     public abstract static class DebuggerInterface {
         public abstract HotSpotAgent getAgent();
         public abstract boolean isAttached();
@@ -1135,7 +1138,7 @@
                     usage();
                 } else {
                     debugger.detach();
-                    System.exit(0);
+                    quit = true;
                 }
             }
         },
@@ -1714,7 +1717,7 @@
                         }
                         protected void quit() {
                             debugger.detach();
-                            System.exit(0);
+                            quit = true;
                         }
                         protected BufferedReader getInputReader() {
                             return in;
@@ -1781,7 +1784,7 @@
 
     public void run(boolean prompt) {
         // Process interactive commands.
-        while (true) {
+        while (!quit) {
             if (prompt) printPrompt();
             String ln = null;
             try {
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Mon Jul 15 11:07:03 2013 +0100
@@ -59,8 +59,11 @@
   // Internals only below this point
   //
   private HotSpotAgent agent;
+  private JVMDebugger jvmDebugger;
   private JDesktopPane desktop;
   private boolean      attached;
+  private boolean      argError;
+  private JFrame frame;
   /** List <JMenuItem> */
   private java.util.List attachMenuItems;
   /** List <JMenuItem> */
@@ -85,6 +88,11 @@
     System.out.println("           path-to-corefile:        Debug this corefile.  The default is 'core'");
     System.out.println("        If no arguments are specified, you can select what to do from the GUI.\n");
     HotSpotAgent.showUsage();
+    argError = true;
+  }
+
+  public HSDB(JVMDebugger d) {
+    jvmDebugger = d;
   }
 
   private HSDB(String[] args) {
@@ -95,7 +103,6 @@
     case (1):
       if (args[0].equals("help") || args[0].equals("-help")) {
         doUsage();
-        System.exit(0);
       }
       // If all numbers, it is a PID to attach to
       // Else, it is a pathname to a .../bin/java for a core file.
@@ -117,24 +124,29 @@
     default:
       System.out.println("HSDB Error: Too many options specified");
       doUsage();
-      System.exit(1);
     }
   }
 
-  private void run() {
-    // At this point, if pidText != null we are supposed to attach to it.
-    // Else, if execPath != null, it is the path of a jdk/bin/java
-    // and coreFilename is the pathname of a core file we are
-    // supposed to attach to.
+  // close this tool without calling System.exit
+  protected void closeUI() {
+      workerThread.shutdown();
+      frame.dispose();
+  }
+
+  public void run() {
+    // Don't start the UI if there were bad arguments.
+    if (argError) {
+        return;
+    }
 
     agent = new HotSpotAgent();
     workerThread = new WorkerThread();
     attachMenuItems = new java.util.ArrayList();
     detachMenuItems = new java.util.ArrayList();
 
-    JFrame frame = new JFrame("HSDB - HotSpot Debugger");
+    frame = new JFrame("HSDB - HotSpot Debugger");
     frame.setSize(800, 600);
-    frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
+    frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
 
     JMenuBar menuBar = new JMenuBar();
 
@@ -197,7 +209,7 @@
     item = createMenuItem("Exit",
                             new ActionListener() {
                                 public void actionPerformed(ActionEvent e) {
-                                  System.exit(0);
+                                  closeUI();
                                 }
                               });
     item.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_X, ActionEvent.ALT_MASK));
@@ -406,7 +418,15 @@
         }
       });
 
-    if (pidText != null) {
+    // If jvmDebugger is already set, we have been given a JVMDebugger.
+    // Otherwise, if pidText != null we are supposed to attach to it.
+    // Finally, if execPath != null, it is the path of a jdk/bin/java
+    // and coreFilename is the pathname of a core file we are
+    // supposed to attach to.
+
+    if (jvmDebugger != null) {
+      attach(jvmDebugger);
+    } else if (pidText != null) {
       attach(pidText);
     } else if (execPath != null) {
       attach(execPath, coreFilename);
@@ -1113,6 +1133,12 @@
       });
   }
 
+  // Attach to existing JVMDebugger, which should be already attached to a core/process.
+  private void attach(JVMDebugger d) {
+    attached = true;
+    showThreadsDialog();
+  }
+
   /** NOTE we are in a different thread here than either the main
       thread or the Swing/AWT event handler thread, so we must be very
       careful when creating or removing widgets */
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,6 +25,8 @@
 package sun.jvm.hotspot;
 
 import java.rmi.RemoteException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
 
 import sun.jvm.hotspot.debugger.Debugger;
 import sun.jvm.hotspot.debugger.DebuggerException;
@@ -63,7 +65,6 @@
 
     private String os;
     private String cpu;
-    private String fileSep;
 
     // The system can work in several ways:
     //  - Attaching to local process
@@ -155,6 +156,14 @@
         go();
     }
 
+    /** This uses a JVMDebugger that is already attached to the core or process */
+    public synchronized void attach(JVMDebugger d)
+    throws DebuggerException {
+        debugger = d;
+        isServer = false;
+        go();
+    }
+
     /** This attaches to a "debug server" on a remote machine; this
       remote server has already attached to a process or opened a
       core file and is waiting for RMI calls on the Debugger object to
@@ -303,28 +312,37 @@
             // server, but not client attaching to server)
             //
 
-            try {
-                os  = PlatformInfo.getOS();
-                cpu = PlatformInfo.getCPU();
-            }
-            catch (UnsupportedPlatformException e) {
-                throw new DebuggerException(e);
-            }
-            fileSep = System.getProperty("file.separator");
+            // Handle existing or alternate JVMDebugger:
+            // these will set os, cpu independently of our PlatformInfo implementation.
+            String alternateDebugger = System.getProperty("sa.altDebugger");
+            if (debugger != null) {
+                setupDebuggerExisting();
 
-            if (os.equals("solaris")) {
-                setupDebuggerSolaris();
-            } else if (os.equals("win32")) {
-                setupDebuggerWin32();
-            } else if (os.equals("linux")) {
-                setupDebuggerLinux();
-            } else if (os.equals("bsd")) {
-                setupDebuggerBsd();
-            } else if (os.equals("darwin")) {
-                setupDebuggerDarwin();
+            } else if (alternateDebugger != null) {
+                setupDebuggerAlternate(alternateDebugger);
+
             } else {
-                // Add support for more operating systems here
-                throw new DebuggerException("Operating system " + os + " not yet supported");
+                // Otherwise, os, cpu are those of our current platform:
+                try {
+                    os  = PlatformInfo.getOS();
+                    cpu = PlatformInfo.getCPU();
+                } catch (UnsupportedPlatformException e) {
+                   throw new DebuggerException(e);
+                }
+                if (os.equals("solaris")) {
+                    setupDebuggerSolaris();
+                } else if (os.equals("win32")) {
+                    setupDebuggerWin32();
+                } else if (os.equals("linux")) {
+                    setupDebuggerLinux();
+                } else if (os.equals("bsd")) {
+                    setupDebuggerBsd();
+                } else if (os.equals("darwin")) {
+                    setupDebuggerDarwin();
+                } else {
+                    // Add support for more operating systems here
+                    throw new DebuggerException("Operating system " + os + " not yet supported");
+                }
             }
 
             if (isServer) {
@@ -423,6 +441,41 @@
     // OS-specific debugger setup/connect routines
     //
 
+    // Use the existing JVMDebugger, as passed to our constructor.
+    // Retrieve os and cpu from that debugger, not the current platform.
+    private void setupDebuggerExisting() {
+
+        os = debugger.getOS();
+        cpu = debugger.getCPU();
+        setupJVMLibNames(os);
+        machDesc = debugger.getMachineDescription();
+    }
+
+    // Given a classname, load an alternate implementation of JVMDebugger.
+    private void setupDebuggerAlternate(String alternateName) {
+
+        try {
+            Class c = Class.forName(alternateName);
+            Constructor cons = c.getConstructor();
+            debugger = (JVMDebugger) cons.newInstance();
+            attachDebugger();
+            setupDebuggerExisting();
+
+        } catch (ClassNotFoundException cnfe) {
+            throw new DebuggerException("Cannot find alternate SA Debugger: '" + alternateName + "'");
+        } catch (NoSuchMethodException nsme) {
+            throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' has missing constructor.");
+        } catch (InstantiationException ie) {
+            throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", ie);
+        } catch (IllegalAccessException iae) {
+            throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", iae);
+        } catch (InvocationTargetException iae) {
+            throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", iae);
+        }
+
+        System.err.println("Loaded alternate HotSpot SA Debugger: " + alternateName);
+    }
+
     //
     // Solaris
     //
@@ -466,6 +519,11 @@
         debugger = new RemoteDebuggerClient(remote);
         machDesc = ((RemoteDebuggerClient) debugger).getMachineDescription();
         os = debugger.getOS();
+        setupJVMLibNames(os);
+        cpu = debugger.getCPU();
+    }
+
+    private void setupJVMLibNames(String os) {
         if (os.equals("solaris")) {
             setupJVMLibNamesSolaris();
         } else if (os.equals("win32")) {
@@ -479,8 +537,6 @@
         } else {
             throw new RuntimeException("Unknown OS type");
         }
-
-        cpu = debugger.getCPU();
     }
 
     private void setupJVMLibNamesSolaris() {
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java	Mon Jul 15 11:07:03 2013 +0100
@@ -26,11 +26,11 @@
 
 import sun.jvm.hotspot.debugger.*;
 
-class LinuxAddress implements Address {
+public class LinuxAddress implements Address {
     protected LinuxDebugger debugger;
     protected long addr;
 
-    LinuxAddress(LinuxDebugger debugger, long addr) {
+    public LinuxAddress(LinuxDebugger debugger, long addr) {
         this.debugger = debugger;
         this.addr = addr;
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java	Mon Jul 15 11:07:03 2013 +0100
@@ -26,8 +26,8 @@
 
 import sun.jvm.hotspot.debugger.*;
 
-class LinuxOopHandle extends LinuxAddress implements OopHandle {
-  LinuxOopHandle(LinuxDebugger debugger, long addr) {
+public class LinuxOopHandle extends LinuxAddress implements OopHandle {
+  public LinuxOopHandle(LinuxDebugger debugger, long addr) {
     super(debugger, addr);
   }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Mon Jul 15 11:07:03 2013 +0100
@@ -246,7 +246,7 @@
      }
   }
 
-  private static final boolean disableDerivedPrinterTableCheck;
+  private static final boolean disableDerivedPointerTableCheck;
   private static final Properties saProps;
 
   static {
@@ -256,12 +256,12 @@
        url = VM.class.getClassLoader().getResource("sa.properties");
        saProps.load(new BufferedInputStream(url.openStream()));
      } catch (Exception e) {
-       throw new RuntimeException("Unable to load properties  " +
+       System.err.println("Unable to load properties  " +
                                   (url == null ? "null" : url.toString()) +
                                   ": " + e.getMessage());
      }
 
-     disableDerivedPrinterTableCheck = System.getProperty("sun.jvm.hotspot.runtime.VM.disableDerivedPointerTableCheck") != null;
+     disableDerivedPointerTableCheck = System.getProperty("sun.jvm.hotspot.runtime.VM.disableDerivedPointerTableCheck") != null;
   }
 
   private VM(TypeDataBase db, JVMDebugger debugger, boolean isBigEndian) {
@@ -371,7 +371,8 @@
   /** This is used by the debugging system */
   public static void initialize(TypeDataBase db, JVMDebugger debugger) {
     if (soleInstance != null) {
-      throw new RuntimeException("Attempt to initialize VM twice");
+      // Using multiple SA Tool classes in the same process creates a call here.
+      return;
     }
     soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian());
 
@@ -683,7 +684,7 @@
 
   /** Returns true if C2 derived pointer table should be used, false otherwise */
   public boolean useDerivedPointerTable() {
-    return !disableDerivedPrinterTableCheck;
+    return !disableDerivedPointerTableCheck;
   }
 
   /** Returns the code cache; should not be used if is core build */
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Mon Jul 15 11:07:03 2013 +0100
@@ -41,6 +41,14 @@
 public class ClassLoaderStats extends Tool {
    boolean verbose = true;
 
+   public ClassLoaderStats() {
+      super();
+   }
+
+   public ClassLoaderStats(JVMDebugger d) {
+      super(d);
+   }
+
    public static void main(String[] args) {
       ClassLoaderStats cls = new ClassLoaderStats();
       cls.start(args);
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java	Mon Jul 15 11:07:03 2013 +0100
@@ -24,6 +24,7 @@
 
 package sun.jvm.hotspot.tools;
 
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.tools.*;
 
 import sun.jvm.hotspot.oops.*;
@@ -42,6 +43,15 @@
  * summary of these objects in the form of a histogram.
  */
 public class FinalizerInfo extends Tool {
+
+    public FinalizerInfo() {
+        super();
+    }
+
+    public FinalizerInfo(JVMDebugger d) {
+        super(d);
+    }
+
     public static void main(String[] args) {
         FinalizerInfo finfo = new FinalizerInfo();
         finfo.start(args);
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,10 +25,19 @@
 package sun.jvm.hotspot.tools;
 
 import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.runtime.*;
 
 public class FlagDumper extends Tool {
 
+    public FlagDumper() {
+        super();
+    }
+
+    public FlagDumper(JVMDebugger d) {
+        super(d);
+    }
+
    public void run() {
       VM.Flag[] flags = VM.getVM().getCommandLineFlags();
       PrintStream out = System.out;
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,6 +25,7 @@
 package sun.jvm.hotspot.tools;
 
 import sun.jvm.hotspot.utilities.HeapHprofBinWriter;
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import java.io.IOException;
 
 /*
@@ -42,6 +43,11 @@
         this.dumpFile = dumpFile;
     }
 
+    public HeapDumper(String dumpFile, JVMDebugger d) {
+        super(d);
+        this.dumpFile = dumpFile;
+    }
+
     protected void printFlagsUsage() {
         System.out.println("    <no option>\tto dump heap to " +
             DEFAULT_DUMP_FILE);
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Mon Jul 15 11:07:03 2013 +0100
@@ -29,12 +29,21 @@
 import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.gc_implementation.shared.*;
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.runtime.*;
 
 public class HeapSummary extends Tool {
 
+   public HeapSummary() {
+      super();
+   }
+
+   public HeapSummary(JVMDebugger d) {
+      super(d);
+   }
+
    public static void main(String[] args) {
       HeapSummary hs = new HeapSummary();
       hs.start(args);
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,12 +25,21 @@
 package sun.jvm.hotspot.tools;
 
 import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.debugger.JVMDebugger;
 
 public class JInfo extends Tool {
+    public JInfo() {
+        super();
+    }
+
     public JInfo(int m) {
         mode = m;
     }
 
+    public JInfo(JVMDebugger d) {
+        super(d);
+    }
+
     protected boolean needsJavaPrefix() {
         return false;
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,6 +25,7 @@
 package sun.jvm.hotspot.tools;
 
 import java.io.*;
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.utilities.*;
 
 public class JMap extends Tool {
@@ -36,6 +37,10 @@
         this(MODE_PMAP);
     }
 
+    public JMap(JVMDebugger d) {
+        super(d);
+    }
+
     protected boolean needsJavaPrefix() {
         return false;
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,9 +25,19 @@
 package sun.jvm.hotspot.tools;
 
 import java.io.*;
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.runtime.*;
 
 public class JSnap extends Tool {
+
+    public JSnap() {
+        super();
+    }
+
+    public JSnap(JVMDebugger d) {
+        super(d);
+    }
+
     public void run() {
         final PrintStream out = System.out;
         if (PerfMemory.initialized()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java	Mon Jul 15 11:07:03 2013 +0100
@@ -24,6 +24,8 @@
 
 package sun.jvm.hotspot.tools;
 
+import sun.jvm.hotspot.debugger.JVMDebugger;
+
 public class JStack extends Tool {
     public JStack(boolean mixedMode, boolean concurrentLocks) {
         this.mixedMode = mixedMode;
@@ -34,6 +36,10 @@
         this(true, true);
     }
 
+    public JStack(JVMDebugger d) {
+        super(d);
+    }
+
     protected boolean needsJavaPrefix() {
         return false;
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java	Mon Jul 15 11:07:03 2013 +0100
@@ -33,6 +33,14 @@
     an object histogram from a remote or crashed VM. */
 public class ObjectHistogram extends Tool {
 
+    public ObjectHistogram() {
+       super();
+    }
+
+    public ObjectHistogram(JVMDebugger d) {
+       super(d);
+    }
+
    public void run() {
       run(System.out, System.err);
    }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java	Mon Jul 15 11:07:03 2013 +0100
@@ -31,6 +31,15 @@
 import sun.jvm.hotspot.runtime.*;
 
 public class PMap extends Tool {
+
+   public PMap() {
+       super();
+   }
+
+   public PMap(JVMDebugger d) {
+       super(d);
+   }
+
    public void run() {
       run(System.out);
    }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java	Mon Jul 15 11:07:03 2013 +0100
@@ -45,6 +45,10 @@
       this(true, true);
    }
 
+   public PStack(JVMDebugger d) {
+      super(d);
+   }
+
    public void run() {
       run(System.out);
    }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java	Mon Jul 15 11:07:03 2013 +0100
@@ -45,6 +45,16 @@
         run(System.out);
     }
 
+    public StackTrace(JVMDebugger d) {
+        super(d);
+    }
+
+    public StackTrace(JVMDebugger d, boolean v, boolean concurrentLocks) {
+        super(d);
+        this.verbose = v;
+        this.concurrentLocks = concurrentLocks;
+    }
+
     public void run(java.io.PrintStream tty) {
         // Ready to go with the database...
         try {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java	Mon Jul 15 11:07:03 2013 +0100
@@ -27,10 +27,19 @@
 import java.io.PrintStream;
 import java.util.*;
 
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.runtime.*;
 
 public class SysPropsDumper extends Tool {
 
+   public SysPropsDumper() {
+      super();
+   }
+
+   public SysPropsDumper(JVMDebugger d) {
+      super(d);
+   }
+
    public void run() {
       Properties sysProps = VM.getVM().getSystemProperties();
       PrintStream out = System.out;
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java	Mon Jul 15 11:07:03 2013 +0100
@@ -35,6 +35,7 @@
 
 public abstract class Tool implements Runnable {
    private HotSpotAgent agent;
+   private JVMDebugger jvmDebugger;
    private int debugeeType;
 
    // debugeeType is one of constants below
@@ -42,6 +43,13 @@
    protected static final int DEBUGEE_CORE   = 1;
    protected static final int DEBUGEE_REMOTE = 2;
 
+   public Tool() {
+   }
+
+   public Tool(JVMDebugger d) {
+      jvmDebugger = d;
+   }
+
    public String getName() {
       return getClass().getName();
    }
@@ -90,7 +98,6 @@
 
    protected void usage() {
       printUsage();
-      System.exit(1);
    }
 
    /*
@@ -106,13 +113,13 @@
    protected void stop() {
       if (agent != null) {
          agent.detach();
-         System.exit(0);
       }
    }
 
    protected void start(String[] args) {
       if ((args.length < 1) || (args.length > 2)) {
          usage();
+         return;
       }
 
       // Attempt to handle -h or -help or some invalid flag
@@ -185,13 +192,31 @@
         }
         if (e.getMessage() != null) {
           err.print(e.getMessage());
+          e.printStackTrace();
         }
         err.println();
-        System.exit(1);
+        return;
       }
 
       err.println("Debugger attached successfully.");
+      startInternal();
+   }
 
+   // When using an existing JVMDebugger.
+   public void start() {
+
+      if (jvmDebugger == null) {
+         throw new RuntimeException("Tool.start() called with no JVMDebugger set.");
+      }
+      agent = new HotSpotAgent();
+      agent.attach(jvmDebugger);
+      startInternal();
+   }
+
+   // Remains of the start mechanism, common to both start methods.
+   private void startInternal() {
+
+      PrintStream err = System.err;
       VM vm = VM.getVM();
       if (vm.isCore()) {
         err.println("Core build detected.");
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Mon Jul 15 11:07:03 2013 +0100
@@ -25,6 +25,7 @@
 package sun.jvm.hotspot.tools.jcore;
 
 import java.io.*;
+import java.lang.reflect.Constructor;
 import java.util.jar.JarOutputStream;
 import java.util.jar.JarEntry;
 import java.util.jar.Manifest;
@@ -38,6 +39,16 @@
     private ClassFilter classFilter;
     private String      outputDirectory;
     private JarOutputStream jarStream;
+    private String      pkgList;
+
+    public ClassDump() {
+        super();
+    }
+
+    public ClassDump(JVMDebugger d, String pkgList) {
+        super(d);
+        this.pkgList = pkgList;
+    }
 
     public void setClassFilter(ClassFilter cf) {
         classFilter = cf;
@@ -63,6 +74,25 @@
     public void run() {
         // Ready to go with the database...
         try {
+            // The name of the filter always comes from a System property.
+            // If we have a pkgList, pass it, otherwise let the filter read
+            // its own System property for the list of classes.
+            String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter",
+                                                        "sun.jvm.hotspot.tools.jcore.PackageNameFilter");
+            try {
+                Class filterClass = Class.forName(filterClassName);
+                if (pkgList == null) {
+                    classFilter = (ClassFilter) filterClass.newInstance();
+                } else {
+                    Constructor con = filterClass.getConstructor(String.class);
+                    classFilter = (ClassFilter) con.newInstance(pkgList);
+                }
+            } catch(Exception exp) {
+                System.err.println("Warning: Can not create class filter!");
+            }
+
+            String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
+            setOutputDirectory(outputDirectory);
 
             // walk through the system dictionary
             SystemDictionary dict = VM.getVM().getSystemDictionary();
@@ -139,26 +169,8 @@
     }
 
     public static void main(String[] args) {
-        // load class filters
-        ClassFilter classFilter = null;
-        String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter");
-        if (filterClassName != null) {
-            try {
-                Class filterClass = Class.forName(filterClassName);
-                classFilter = (ClassFilter) filterClass.newInstance();
-            } catch(Exception exp) {
-                System.err.println("Warning: Can not create class filter!");
-            }
-        }
-
-        String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir");
-        if (outputDirectory == null)
-            outputDirectory = ".";
-
 
         ClassDump cd = new ClassDump();
-        cd.setClassFilter(classFilter);
-        cd.setOutputDirectory(outputDirectory);
         cd.start(args);
         cd.stop();
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java	Mon Jul 15 11:07:03 2013 +0100
@@ -24,12 +24,22 @@
 
 package sun.jvm.hotspot.tools.soql;
 
+import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.tools.*;
 import sun.jvm.hotspot.utilities.*;
 import sun.jvm.hotspot.utilities.soql.*;
 
 /** This is command line JavaScript debugger console */
 public class JSDB extends Tool {
+
+    public JSDB() {
+        super();
+    }
+
+    public JSDB(JVMDebugger d) {
+        super(d);
+    }
+
     public static void main(String[] args) {
         JSDB jsdb = new JSDB();
         jsdb.start(args);
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Thu Jul 11 12:59:03 2013 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Mon Jul 15 11:07:03 2013 +0100
@@ -44,6 +44,14 @@
       soql.stop();
    }
 
+   public SOQL() {
+      super();
+   }
+
+   public SOQL(JVMDebugger d) {
+      super(d);
+   }
+
    protected SOQLEngine soqlEngine;
    protected BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
    protected PrintStream out       = System.out;
--- a/make/bsd/makefiles/build_vm_def.sh	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/bsd/makefiles/build_vm_def.sh	Mon Jul 15 11:07:03 2013 +0100
@@ -7,6 +7,6 @@
 NM=nm
 fi
 
-$NM --defined-only $* | awk '
-   { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
+$NM -Uj $* | awk '
+   { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 }
    '
--- a/make/bsd/makefiles/gcc.make	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/bsd/makefiles/gcc.make	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -368,8 +368,8 @@
   # Standard linker flags
   LFLAGS +=
 
-  # Darwin doesn't use ELF and doesn't support version scripts
-  LDNOMAP = true
+  # The apple linker has its own variant of mapfiles/version-scripts
+  MAPFLAG = -Xlinker -exported_symbols_list -Xlinker FILENAME
 
   # Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
   SONAMEFLAG =
--- a/make/bsd/makefiles/mapfile-vers-debug	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/bsd/makefiles/mapfile-vers-debug	Mon Jul 15 11:07:03 2013 +0100
@@ -1,7 +1,3 @@
-#
-# @(#)mapfile-vers-debug        1.18 07/10/25 16:47:35
-#
-
 #
 # Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -23,273 +19,244 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
 #
+#
+# Only used for OSX/Darwin builds
 
 # Define public interface.
+                # _JNI
+                _JNI_CreateJavaVM
+                _JNI_GetCreatedJavaVMs
+                _JNI_GetDefaultJavaVMInitArgs
 
-SUNWprivate_1.1 {
-        global:
-                # JNI
-                JNI_CreateJavaVM;
-                JNI_GetCreatedJavaVMs;
-                JNI_GetDefaultJavaVMInitArgs;
+                # _JVM
+                _JVM_Accept
+                _JVM_ActiveProcessorCount
+                _JVM_AllocateNewArray
+                _JVM_AllocateNewObject
+                _JVM_ArrayCopy
+                _JVM_AssertionStatusDirectives
+                _JVM_Available
+                _JVM_Bind
+                _JVM_ClassDepth
+                _JVM_ClassLoaderDepth
+                _JVM_Clone
+                _JVM_Close
+                _JVM_CX8Field
+                _JVM_CompileClass
+                _JVM_CompileClasses
+                _JVM_CompilerCommand
+                _JVM_Connect
+                _JVM_ConstantPoolGetClassAt
+                _JVM_ConstantPoolGetClassAtIfLoaded
+                _JVM_ConstantPoolGetDoubleAt
+                _JVM_ConstantPoolGetFieldAt
+                _JVM_ConstantPoolGetFieldAtIfLoaded
+                _JVM_ConstantPoolGetFloatAt
+                _JVM_ConstantPoolGetIntAt
+                _JVM_ConstantPoolGetLongAt
+                _JVM_ConstantPoolGetMethodAt
+                _JVM_ConstantPoolGetMethodAtIfLoaded
+                _JVM_ConstantPoolGetMemberRefInfoAt
+                _JVM_ConstantPoolGetSize
+                _JVM_ConstantPoolGetStringAt
+                _JVM_ConstantPoolGetUTF8At
+                _JVM_CountStackFrames
+                _JVM_CurrentClassLoader
+                _JVM_CurrentLoadedClass
+                _JVM_CurrentThread
+                _JVM_CurrentTimeMillis
+                _JVM_DefineClass
+                _JVM_DefineClassWithSource
+                _JVM_DefineClassWithSourceCond
+                _JVM_DesiredAssertionStatus
+                _JVM_DisableCompiler
+                _JVM_DoPrivileged
+                _JVM_DTraceGetVersion
+                _JVM_DTraceActivate
+                _JVM_DTraceIsProbeEnabled
+                _JVM_DTraceIsSupported
+                _JVM_DTraceDispose
+                _JVM_DumpAllStacks
+                _JVM_DumpThreads
+                _JVM_EnableCompiler
+                _JVM_Exit
+                _JVM_FillInStackTrace
+                _JVM_FindClassFromClass
+                _JVM_FindClassFromClassLoader
+                _JVM_FindClassFromBootLoader
+                _JVM_FindLibraryEntry
+                _JVM_FindLoadedClass
+                _JVM_FindPrimitiveClass
+                _JVM_FindSignal
+                _JVM_FreeMemory
+                _JVM_GC
+                _JVM_GetAllThreads
+                _JVM_GetArrayElement
+                _JVM_GetArrayLength
+                _JVM_GetCPClassNameUTF
+                _JVM_GetCPFieldClassNameUTF
+                _JVM_GetCPFieldModifiers
+                _JVM_GetCPFieldNameUTF
+                _JVM_GetCPFieldSignatureUTF
+                _JVM_GetCPMethodClassNameUTF
+                _JVM_GetCPMethodModifiers
+                _JVM_GetCPMethodNameUTF
+                _JVM_GetCPMethodSignatureUTF
+                _JVM_GetCallerClass
+                _JVM_GetClassAccessFlags
+                _JVM_GetClassAnnotations
+                _JVM_GetClassCPEntriesCount
+                _JVM_GetClassCPTypes
+                _JVM_GetClassConstantPool
+                _JVM_GetClassContext
+                _JVM_GetClassDeclaredConstructors
+                _JVM_GetClassDeclaredFields
+                _JVM_GetClassDeclaredMethods
+                _JVM_GetClassFieldsCount
+                _JVM_GetClassInterfaces
+                _JVM_GetClassLoader
+                _JVM_GetClassMethodsCount
+                _JVM_GetClassModifiers
+                _JVM_GetClassName
+                _JVM_GetClassNameUTF
+                _JVM_GetClassSignature
+                _JVM_GetClassSigners
+                _JVM_GetClassTypeAnnotations
+                _JVM_GetComponentType
+                _JVM_GetDeclaredClasses
+                _JVM_GetDeclaringClass
+                _JVM_GetEnclosingMethodInfo
+                _JVM_GetFieldAnnotations
+                _JVM_GetFieldIxModifiers
+                _JVM_GetFieldTypeAnnotations
+                _JVM_GetHostName
+                _JVM_GetInheritedAccessControlContext
+                _JVM_GetInterfaceVersion
+                _JVM_GetLastErrorString
+                _JVM_GetManagement
+                _JVM_GetMethodAnnotations
+                _JVM_GetMethodDefaultAnnotationValue
+                _JVM_GetMethodIxArgsSize
+                _JVM_GetMethodIxByteCode
+                _JVM_GetMethodIxByteCodeLength
+                _JVM_GetMethodIxExceptionIndexes
+                _JVM_GetMethodIxExceptionTableEntry
+                _JVM_GetMethodIxExceptionTableLength
+                _JVM_GetMethodIxExceptionsCount
+                _JVM_GetMethodIxLocalsCount
+                _JVM_GetMethodIxMaxStack
+                _JVM_GetMethodIxModifiers
+                _JVM_GetMethodIxNameUTF
+                _JVM_GetMethodIxSignatureUTF
+                _JVM_GetMethodParameterAnnotations
+                _JVM_GetMethodParameters
+                _JVM_GetMethodTypeAnnotations
+                _JVM_GetPrimitiveArrayElement
+                _JVM_GetProtectionDomain
+                _JVM_GetSockName
+                _JVM_GetSockOpt
+                _JVM_GetStackAccessControlContext
+                _JVM_GetStackTraceDepth
+                _JVM_GetStackTraceElement
+                _JVM_GetSystemPackage
+                _JVM_GetSystemPackages
+                _JVM_GetThreadStateNames
+                _JVM_GetThreadStateValues
+                _JVM_GetVersionInfo
+                _JVM_Halt
+                _JVM_HoldsLock
+                _JVM_IHashCode
+                _JVM_InitAgentProperties
+                _JVM_InitProperties
+                _JVM_InitializeCompiler
+                _JVM_InitializeSocketLibrary
+                _JVM_InternString
+                _JVM_Interrupt
+                _JVM_InvokeMethod
+                _JVM_IsArrayClass
+                _JVM_IsConstructorIx
+                _JVM_IsInterface
+                _JVM_IsInterrupted
+                _JVM_IsNaN
+                _JVM_IsPrimitiveClass
+                _JVM_IsSameClassPackage
+                _JVM_IsSilentCompiler
+                _JVM_IsSupportedJNIVersion
+                _JVM_IsThreadAlive
+                _JVM_IsVMGeneratedMethodIx
+                _JVM_LatestUserDefinedLoader
+                _JVM_Listen
+                _JVM_LoadClass0
+                _JVM_LoadLibrary
+                _JVM_Lseek
+                _JVM_MaxObjectInspectionAge
+                _JVM_MaxMemory
+                _JVM_MonitorNotify
+                _JVM_MonitorNotifyAll
+                _JVM_MonitorWait
+                _JVM_NanoTime
+                _JVM_NativePath
+                _JVM_NewArray
+                _JVM_NewInstanceFromConstructor
+                _JVM_NewMultiArray
+                _JVM_OnExit
+                _JVM_Open
+                _JVM_RaiseSignal
+                _JVM_RawMonitorCreate
+                _JVM_RawMonitorDestroy
+                _JVM_RawMonitorEnter
+                _JVM_RawMonitorExit
+                _JVM_Read
+                _JVM_Recv
+                _JVM_RecvFrom
+                _JVM_RegisterSignal
+                _JVM_ReleaseUTF
+                _JVM_ResolveClass
+                _JVM_ResumeThread
+                _JVM_Send
+                _JVM_SendTo
+                _JVM_SetArrayElement
+                _JVM_SetClassSigners
+                _JVM_SetLength
+                _JVM_SetNativeThreadName
+                _JVM_SetPrimitiveArrayElement
+                _JVM_SetProtectionDomain
+                _JVM_SetSockOpt
+                _JVM_SetThreadPriority
+                _JVM_Sleep
+                _JVM_Socket
+                _JVM_SocketAvailable
+                _JVM_SocketClose
+                _JVM_SocketShutdown
+                _JVM_StartThread
+                _JVM_StopThread
+                _JVM_SuspendThread
+                _JVM_SupportsCX8
+                _JVM_Sync
+                _JVM_Timeout
+                _JVM_TotalMemory
+                _JVM_TraceInstructions
+                _JVM_TraceMethodCalls
+                _JVM_UnloadLibrary
+                _JVM_Write
+                _JVM_Yield
+                _JVM_handle_bsd_signal
 
-                # JVM
-                JVM_Accept;
-                JVM_ActiveProcessorCount;
-                JVM_AllocateNewArray;
-                JVM_AllocateNewObject;
-                JVM_ArrayCopy;
-                JVM_AssertionStatusDirectives;
-                JVM_Available;
-                JVM_Bind;
-                JVM_ClassDepth;
-                JVM_ClassLoaderDepth;
-                JVM_Clone;
-                JVM_Close;
-                JVM_CX8Field;
-                JVM_CompileClass;
-                JVM_CompileClasses;
-                JVM_CompilerCommand;
-                JVM_Connect;
-                JVM_ConstantPoolGetClassAt;
-                JVM_ConstantPoolGetClassAtIfLoaded;
-                JVM_ConstantPoolGetDoubleAt;
-                JVM_ConstantPoolGetFieldAt;
-                JVM_ConstantPoolGetFieldAtIfLoaded;
-                JVM_ConstantPoolGetFloatAt;
-                JVM_ConstantPoolGetIntAt;
-                JVM_ConstantPoolGetLongAt;
-                JVM_ConstantPoolGetMethodAt;
-                JVM_ConstantPoolGetMethodAtIfLoaded;
-                JVM_ConstantPoolGetMemberRefInfoAt;
-                JVM_ConstantPoolGetSize;
-                JVM_ConstantPoolGetStringAt;
-                JVM_ConstantPoolGetUTF8At;
-                JVM_CountStackFrames;
-                JVM_CurrentClassLoader;
-                JVM_CurrentLoadedClass;
-                JVM_CurrentThread;
-                JVM_CurrentTimeMillis;
-                JVM_DefineClass;
-                JVM_DefineClassWithSource;
-                JVM_DefineClassWithSourceCond;
-                JVM_DesiredAssertionStatus;
-                JVM_DisableCompiler;
-                JVM_DoPrivileged;
-                JVM_DTraceGetVersion;
-                JVM_DTraceActivate;
-                JVM_DTraceIsProbeEnabled;
-                JVM_DTraceIsSupported;
-                JVM_DTraceDispose;
-                JVM_DumpAllStacks;
-                JVM_DumpThreads;
-                JVM_EnableCompiler;
-                JVM_Exit;
-                JVM_FillInStackTrace;
-                JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
-                JVM_FindClassFromBootLoader;
-                JVM_FindLibraryEntry;
-                JVM_FindLoadedClass;
-                JVM_FindPrimitiveClass;
-                JVM_FindSignal;
-                JVM_FreeMemory;
-                JVM_GC;
-                JVM_GetAllThreads;
-                JVM_GetArrayElement;
-                JVM_GetArrayLength;
-                JVM_GetCPClassNameUTF;
-                JVM_GetCPFieldClassNameUTF;
-                JVM_GetCPFieldModifiers;
-                JVM_GetCPFieldNameUTF;
-                JVM_GetCPFieldSignatureUTF;
-                JVM_GetCPMethodClassNameUTF;
-                JVM_GetCPMethodModifiers;
-                JVM_GetCPMethodNameUTF;
-                JVM_GetCPMethodSignatureUTF;
-                JVM_GetCallerClass;
-                JVM_GetClassAccessFlags;
-                JVM_GetClassAnnotations;
-                JVM_GetClassCPEntriesCount;
-                JVM_GetClassCPTypes;
-                JVM_GetClassConstantPool;
-                JVM_GetClassContext;
-                JVM_GetClassDeclaredConstructors;
-                JVM_GetClassDeclaredFields;
-                JVM_GetClassDeclaredMethods;
-                JVM_GetClassFieldsCount;
-                JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
-                JVM_GetClassMethodsCount;
-                JVM_GetClassModifiers;
-                JVM_GetClassName;
-                JVM_GetClassNameUTF;
-                        JVM_GetClassSignature;
-                JVM_GetClassSigners;
-                JVM_GetClassTypeAnnotations;
-                JVM_GetComponentType;
-                JVM_GetDeclaredClasses;
-                JVM_GetDeclaringClass;
-                JVM_GetEnclosingMethodInfo;
-                JVM_GetFieldAnnotations;
-                JVM_GetFieldIxModifiers;
-                JVM_GetFieldTypeAnnotations;
-                JVM_GetHostName;
-                JVM_GetInheritedAccessControlContext;
-                JVM_GetInterfaceVersion;
-                JVM_GetLastErrorString;
-                JVM_GetManagement;
-                JVM_GetMethodAnnotations;
-                JVM_GetMethodDefaultAnnotationValue;
-                JVM_GetMethodIxArgsSize;
-                JVM_GetMethodIxByteCode;
-                JVM_GetMethodIxByteCodeLength;
-                JVM_GetMethodIxExceptionIndexes;
-                JVM_GetMethodIxExceptionTableEntry;
-                JVM_GetMethodIxExceptionTableLength;
-                JVM_GetMethodIxExceptionsCount;
-                JVM_GetMethodIxLocalsCount;
-                JVM_GetMethodIxMaxStack;
-                JVM_GetMethodIxModifiers;
-                JVM_GetMethodIxNameUTF;
-                JVM_GetMethodIxSignatureUTF;
-                JVM_GetMethodParameterAnnotations;
-                JVM_GetMethodParameters;
-                JVM_GetMethodTypeAnnotations;
-                JVM_GetPrimitiveArrayElement;
-                JVM_GetProtectionDomain;
-                JVM_GetSockName;
-                JVM_GetSockOpt;
-                JVM_GetStackAccessControlContext;
-                JVM_GetStackTraceDepth;
-                JVM_GetStackTraceElement;
-                JVM_GetSystemPackage;
-                JVM_GetSystemPackages;
-                JVM_GetThreadStateNames;
-                JVM_GetThreadStateValues;
-                JVM_GetVersionInfo;
-                JVM_Halt;
-                JVM_HoldsLock;
-                JVM_IHashCode;
-                JVM_InitAgentProperties;
-                JVM_InitProperties;
-                JVM_InitializeCompiler;
-                JVM_InitializeSocketLibrary;
-                JVM_InternString;
-                JVM_Interrupt;
-                JVM_InvokeMethod;
-                JVM_IsArrayClass;
-                JVM_IsConstructorIx;
-                JVM_IsInterface;
-                JVM_IsInterrupted;
-                JVM_IsNaN;
-                JVM_IsPrimitiveClass;
-                JVM_IsSameClassPackage;
-                JVM_IsSilentCompiler;
-                JVM_IsSupportedJNIVersion;
-                JVM_IsThreadAlive;
-                JVM_IsVMGeneratedMethodIx;
-                JVM_LatestUserDefinedLoader;
-                JVM_Listen;
-                JVM_LoadClass0;
-                JVM_LoadLibrary;
-                JVM_Lseek;
-                JVM_MaxObjectInspectionAge;
-                JVM_MaxMemory;
-                JVM_MonitorNotify;
-                JVM_MonitorNotifyAll;
-                JVM_MonitorWait;
-                JVM_NanoTime;
-                JVM_NativePath;
-                JVM_NewArray;
-                JVM_NewInstanceFromConstructor;
-                JVM_NewMultiArray;
-                JVM_OnExit;
-                JVM_Open;
-                JVM_RaiseSignal;
-                JVM_RawMonitorCreate;
-                JVM_RawMonitorDestroy;
-                JVM_RawMonitorEnter;
-                JVM_RawMonitorExit;
-                JVM_Read;
-                JVM_Recv;
-                JVM_RecvFrom;
-                JVM_RegisterSignal;
-                JVM_ReleaseUTF;
-                JVM_ResolveClass;
-                JVM_ResumeThread;
-                JVM_Send;
-                JVM_SendTo;
-                JVM_SetArrayElement;
-                JVM_SetClassSigners;
-                JVM_SetLength;
-                JVM_SetPrimitiveArrayElement;
-                JVM_SetProtectionDomain;
-                JVM_SetSockOpt;
-                JVM_SetThreadPriority;
-                JVM_Sleep;
-                JVM_Socket;
-                JVM_SocketAvailable;
-                JVM_SocketClose;
-                JVM_SocketShutdown;
-                JVM_StartThread;
-                JVM_StopThread;
-                JVM_SuspendThread;
-                JVM_SupportsCX8;
-                JVM_Sync;
-                JVM_Timeout;
-                JVM_TotalMemory;
-                JVM_TraceInstructions;
-                JVM_TraceMethodCalls;
-                JVM_UnloadLibrary;
-                JVM_Write;
-                JVM_Yield;
-                JVM_handle_bsd_signal;
-
-                # Old reflection routines
-                # These do not need to be present in the product build in JDK 1.4
-                # but their code has not been removed yet because there will not
-                # be a substantial code savings until JVM_InvokeMethod and
-                # JVM_NewInstanceFromConstructor can also be removed; see
-                # reflectionCompat.hpp.
-                JVM_GetClassConstructor;
-                JVM_GetClassConstructors;
-                JVM_GetClassField;
-                JVM_GetClassFields;
-                JVM_GetClassMethod;
-                JVM_GetClassMethods;
-                JVM_GetField;
-                JVM_GetPrimitiveField;
-                JVM_NewInstance;
-                JVM_SetField;
-                JVM_SetPrimitiveField;
-
-                # debug JVM
-                JVM_AccessVMBooleanFlag;
-                JVM_AccessVMIntFlag;
-                JVM_VMBreakPoint;
+                # debug _JVM
+                _JVM_AccessVMBooleanFlag
+                _JVM_AccessVMIntFlag
+                _JVM_VMBreakPoint
 
                 # miscellaneous functions
-                jio_fprintf;
-                jio_printf;
-                jio_snprintf;
-                jio_vfprintf;
-                jio_vsnprintf;
-                fork1;
-                numa_warn;
-                numa_error;
-
-                # Needed because there is no JVM interface for this.
-                sysThreadAvailableStackWithSlack;
+                _jio_fprintf
+                _jio_printf
+                _jio_snprintf
+                _jio_vfprintf
+                _jio_vsnprintf
 
                 # This is for Forte Analyzer profiling support.
-                AsyncGetCallTrace;
+                _AsyncGetCallTrace
 
                 # INSERT VTABLE SYMBOLS HERE
 
-        local:
-                *;
-};
-
--- a/make/bsd/makefiles/mapfile-vers-product	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/bsd/makefiles/mapfile-vers-product	Mon Jul 15 11:07:03 2013 +0100
@@ -1,7 +1,3 @@
-#
-# @(#)mapfile-vers-product	1.19 08/02/12 10:56:37
-#
-
 #
 # Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -23,268 +19,239 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
 #
+#
+# Only used for OSX/Darwin builds
 
 # Define public interface.
+                # _JNI
+                _JNI_CreateJavaVM
+                _JNI_GetCreatedJavaVMs
+                _JNI_GetDefaultJavaVMInitArgs
 
-SUNWprivate_1.1 {
-        global:
-                # JNI
-                JNI_CreateJavaVM;
-                JNI_GetCreatedJavaVMs;
-                JNI_GetDefaultJavaVMInitArgs;
-
-                # JVM
-                JVM_Accept;
-                JVM_ActiveProcessorCount;
-                JVM_AllocateNewArray;
-                JVM_AllocateNewObject;
-                JVM_ArrayCopy;
-                JVM_AssertionStatusDirectives;
-                JVM_Available;
-                JVM_Bind;
-                JVM_ClassDepth;
-                JVM_ClassLoaderDepth;
-                JVM_Clone;
-                JVM_Close;
-                JVM_CX8Field;
-                JVM_CompileClass;
-                JVM_CompileClasses;
-                JVM_CompilerCommand;
-                JVM_Connect;
-                JVM_ConstantPoolGetClassAt;
-                JVM_ConstantPoolGetClassAtIfLoaded;
-                JVM_ConstantPoolGetDoubleAt;
-                JVM_ConstantPoolGetFieldAt;
-                JVM_ConstantPoolGetFieldAtIfLoaded;
-                JVM_ConstantPoolGetFloatAt;
-                JVM_ConstantPoolGetIntAt;
-                JVM_ConstantPoolGetLongAt;
-                JVM_ConstantPoolGetMethodAt;
-                JVM_ConstantPoolGetMethodAtIfLoaded;
-                JVM_ConstantPoolGetMemberRefInfoAt;
-                JVM_ConstantPoolGetSize;
-                JVM_ConstantPoolGetStringAt;
-                JVM_ConstantPoolGetUTF8At;
-                JVM_CountStackFrames;
-                JVM_CurrentClassLoader;
-                JVM_CurrentLoadedClass;
-                JVM_CurrentThread;
-                JVM_CurrentTimeMillis;
-                JVM_DefineClass;
-                JVM_DefineClassWithSource;
-                JVM_DefineClassWithSourceCond;
-                JVM_DesiredAssertionStatus;
-                JVM_DisableCompiler;
-                JVM_DoPrivileged;
-                JVM_DTraceGetVersion;
-                JVM_DTraceActivate;
-                JVM_DTraceIsProbeEnabled;
-                JVM_DTraceIsSupported;
-                JVM_DTraceDispose;
-                JVM_DumpAllStacks;
-                JVM_DumpThreads;
-                JVM_EnableCompiler;
-                JVM_Exit;
-                JVM_FillInStackTrace;
-                JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
-                JVM_FindClassFromBootLoader;
-                JVM_FindLibraryEntry;
-                JVM_FindLoadedClass;
-                JVM_FindPrimitiveClass;
-                JVM_FindSignal;
-                JVM_FreeMemory;
-                JVM_GC;
-                JVM_GetAllThreads;
-                JVM_GetArrayElement;
-                JVM_GetArrayLength;
-                JVM_GetCPClassNameUTF;
-                JVM_GetCPFieldClassNameUTF;
-                JVM_GetCPFieldModifiers;
-                JVM_GetCPFieldNameUTF;
-                JVM_GetCPFieldSignatureUTF;
-                JVM_GetCPMethodClassNameUTF;
-                JVM_GetCPMethodModifiers;
-                JVM_GetCPMethodNameUTF;
-                JVM_GetCPMethodSignatureUTF;
-                JVM_GetCallerClass;
-                JVM_GetClassAccessFlags;
-                JVM_GetClassAnnotations;
-                JVM_GetClassCPEntriesCount;
-                JVM_GetClassCPTypes;
-                JVM_GetClassConstantPool;
-                JVM_GetClassContext;
-                JVM_GetClassDeclaredConstructors;
-                JVM_GetClassDeclaredFields;
-                JVM_GetClassDeclaredMethods;
-                JVM_GetClassFieldsCount;
-                JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
-                JVM_GetClassMethodsCount;
-                JVM_GetClassModifiers;
-                JVM_GetClassName;
-                JVM_GetClassNameUTF;
-                JVM_GetClassSignature;
-                JVM_GetClassSigners;
-                JVM_GetClassTypeAnnotations;
-                JVM_GetComponentType;
-                JVM_GetDeclaredClasses;
-                JVM_GetDeclaringClass;
-                JVM_GetEnclosingMethodInfo;
-                JVM_GetFieldAnnotations;
-                JVM_GetFieldIxModifiers;
-                JVM_GetFieldTypeAnnotations;
-                JVM_GetHostName;
-                JVM_GetInheritedAccessControlContext;
-                JVM_GetInterfaceVersion;
-                JVM_GetLastErrorString;
-                JVM_GetManagement;
-                JVM_GetMethodAnnotations;
-                JVM_GetMethodDefaultAnnotationValue;
-                JVM_GetMethodIxArgsSize;
-                JVM_GetMethodIxByteCode;
-                JVM_GetMethodIxByteCodeLength;
-                JVM_GetMethodIxExceptionIndexes;
-                JVM_GetMethodIxExceptionTableEntry;
-                JVM_GetMethodIxExceptionTableLength;
-                JVM_GetMethodIxExceptionsCount;
-                JVM_GetMethodIxLocalsCount;
-                JVM_GetMethodIxMaxStack;
-                JVM_GetMethodIxModifiers;
-                JVM_GetMethodIxNameUTF;
-                JVM_GetMethodIxSignatureUTF;
-                JVM_GetMethodParameterAnnotations;
-                JVM_GetMethodParameters;
-                JVM_GetMethodTypeAnnotations;
-                JVM_GetPrimitiveArrayElement;
-                JVM_GetProtectionDomain;
-                JVM_GetSockName;
-                JVM_GetSockOpt;
-                JVM_GetStackAccessControlContext;
-                JVM_GetStackTraceDepth;
-                JVM_GetStackTraceElement;
-                JVM_GetSystemPackage;
-                JVM_GetSystemPackages;
-                JVM_GetThreadStateNames;
-                JVM_GetThreadStateValues;
-                JVM_GetVersionInfo;
-                JVM_Halt;
-                JVM_HoldsLock;
-                JVM_IHashCode;
-                JVM_InitAgentProperties;
-                JVM_InitProperties;
-                JVM_InitializeCompiler;
-                JVM_InitializeSocketLibrary;
-                JVM_InternString;
-                JVM_Interrupt;
-                JVM_InvokeMethod;
-                JVM_IsArrayClass;
-                JVM_IsConstructorIx;
-                JVM_IsInterface;
-                JVM_IsInterrupted;
-                JVM_IsNaN;
-                JVM_IsPrimitiveClass;
-                JVM_IsSameClassPackage;
-                JVM_IsSilentCompiler;
-                JVM_IsSupportedJNIVersion;
-                JVM_IsThreadAlive;
-                JVM_IsVMGeneratedMethodIx;
-                JVM_LatestUserDefinedLoader;
-                JVM_Listen;
-                JVM_LoadClass0;
-                JVM_LoadLibrary;
-                JVM_Lseek;
-                JVM_MaxObjectInspectionAge;
-                JVM_MaxMemory;
-                JVM_MonitorNotify;
-                JVM_MonitorNotifyAll;
-                JVM_MonitorWait;
-                JVM_NanoTime;
-                JVM_NativePath;
-                JVM_NewArray;
-                JVM_NewInstanceFromConstructor;
-                JVM_NewMultiArray;
-                JVM_OnExit;
-                JVM_Open;
-                JVM_RaiseSignal;
-                JVM_RawMonitorCreate;
-                JVM_RawMonitorDestroy;
-                JVM_RawMonitorEnter;
-                JVM_RawMonitorExit;
-                JVM_Read;
-                JVM_Recv;
-                JVM_RecvFrom;
-                JVM_RegisterSignal;
-                JVM_ReleaseUTF;
-                JVM_ResolveClass;
-                JVM_ResumeThread;
-                JVM_Send;
-                JVM_SendTo;
-                JVM_SetArrayElement;
-                JVM_SetClassSigners;
-                JVM_SetLength;
-                JVM_SetPrimitiveArrayElement;
-                JVM_SetProtectionDomain;
-                JVM_SetSockOpt;
-                JVM_SetThreadPriority;
-                JVM_Sleep;
-                JVM_Socket;
-                JVM_SocketAvailable;
-                JVM_SocketClose;
-                JVM_SocketShutdown;
-                JVM_StartThread;
-                JVM_StopThread;
-                JVM_SuspendThread;
-                JVM_SupportsCX8;
-                JVM_Sync;
-                JVM_Timeout;
-                JVM_TotalMemory;
-                JVM_TraceInstructions;
-                JVM_TraceMethodCalls;
-                JVM_UnloadLibrary;
-                JVM_Write;
-                JVM_Yield;
-                JVM_handle_bsd_signal;
-
-                # Old reflection routines
-                # These do not need to be present in the product build in JDK 1.4
-                # but their code has not been removed yet because there will not
-                # be a substantial code savings until JVM_InvokeMethod and
-                # JVM_NewInstanceFromConstructor can also be removed; see
-                # reflectionCompat.hpp.
-                JVM_GetClassConstructor;
-                JVM_GetClassConstructors;
-                JVM_GetClassField;
-                JVM_GetClassFields;
-                JVM_GetClassMethod;
-                JVM_GetClassMethods;
-                JVM_GetField;
-                JVM_GetPrimitiveField;
-                JVM_NewInstance;
-                JVM_SetField;
-                JVM_SetPrimitiveField;
+                # _JVM
+                _JVM_Accept
+                _JVM_ActiveProcessorCount
+                _JVM_AllocateNewArray
+                _JVM_AllocateNewObject
+                _JVM_ArrayCopy
+                _JVM_AssertionStatusDirectives
+                _JVM_Available
+                _JVM_Bind
+                _JVM_ClassDepth
+                _JVM_ClassLoaderDepth
+                _JVM_Clone
+                _JVM_Close
+                _JVM_CX8Field
+                _JVM_CompileClass
+                _JVM_CompileClasses
+                _JVM_CompilerCommand
+                _JVM_Connect
+                _JVM_ConstantPoolGetClassAt
+                _JVM_ConstantPoolGetClassAtIfLoaded
+                _JVM_ConstantPoolGetDoubleAt
+                _JVM_ConstantPoolGetFieldAt
+                _JVM_ConstantPoolGetFieldAtIfLoaded
+                _JVM_ConstantPoolGetFloatAt
+                _JVM_ConstantPoolGetIntAt
+                _JVM_ConstantPoolGetLongAt
+                _JVM_ConstantPoolGetMethodAt
+                _JVM_ConstantPoolGetMethodAtIfLoaded
+                _JVM_ConstantPoolGetMemberRefInfoAt
+                _JVM_ConstantPoolGetSize
+                _JVM_ConstantPoolGetStringAt
+                _JVM_ConstantPoolGetUTF8At
+                _JVM_CountStackFrames
+                _JVM_CurrentClassLoader
+                _JVM_CurrentLoadedClass
+                _JVM_CurrentThread
+                _JVM_CurrentTimeMillis
+                _JVM_DefineClass
+                _JVM_DefineClassWithSource
+                _JVM_DefineClassWithSourceCond
+                _JVM_DesiredAssertionStatus
+                _JVM_DisableCompiler
+                _JVM_DoPrivileged
+                _JVM_DTraceGetVersion
+                _JVM_DTraceActivate
+                _JVM_DTraceIsProbeEnabled
+                _JVM_DTraceIsSupported
+                _JVM_DTraceDispose
+                _JVM_DumpAllStacks
+                _JVM_DumpThreads
+                _JVM_EnableCompiler
+                _JVM_Exit
+                _JVM_FillInStackTrace
+                _JVM_FindClassFromClass
+                _JVM_FindClassFromClassLoader
+                _JVM_FindClassFromBootLoader
+                _JVM_FindLibraryEntry
+                _JVM_FindLoadedClass
+                _JVM_FindPrimitiveClass
+                _JVM_FindSignal
+                _JVM_FreeMemory
+                _JVM_GC
+                _JVM_GetAllThreads
+                _JVM_GetArrayElement
+                _JVM_GetArrayLength
+                _JVM_GetCPClassNameUTF
+                _JVM_GetCPFieldClassNameUTF
+                _JVM_GetCPFieldModifiers
+                _JVM_GetCPFieldNameUTF
+                _JVM_GetCPFieldSignatureUTF
+                _JVM_GetCPMethodClassNameUTF
+                _JVM_GetCPMethodModifiers
+                _JVM_GetCPMethodNameUTF
+                _JVM_GetCPMethodSignatureUTF
+                _JVM_GetCallerClass
+                _JVM_GetClassAccessFlags
+                _JVM_GetClassAnnotations
+                _JVM_GetClassCPEntriesCount
+                _JVM_GetClassCPTypes
+                _JVM_GetClassConstantPool
+                _JVM_GetClassContext
+                _JVM_GetClassDeclaredConstructors
+                _JVM_GetClassDeclaredFields
+                _JVM_GetClassDeclaredMethods
+                _JVM_GetClassFieldsCount
+                _JVM_GetClassInterfaces
+                _JVM_GetClassLoader
+                _JVM_GetClassMethodsCount
+                _JVM_GetClassModifiers
+                _JVM_GetClassName
+                _JVM_GetClassNameUTF
+                _JVM_GetClassSignature
+                _JVM_GetClassSigners
+                _JVM_GetClassTypeAnnotations
+                _JVM_GetComponentType
+                _JVM_GetDeclaredClasses
+                _JVM_GetDeclaringClass
+                _JVM_GetEnclosingMethodInfo
+                _JVM_GetFieldAnnotations
+                _JVM_GetFieldIxModifiers
+                _JVM_GetFieldTypeAnnotations
+                _JVM_GetHostName
+                _JVM_GetInheritedAccessControlContext
+                _JVM_GetInterfaceVersion
+                _JVM_GetLastErrorString
+                _JVM_GetManagement
+                _JVM_GetMethodAnnotations
+                _JVM_GetMethodDefaultAnnotationValue
+                _JVM_GetMethodIxArgsSize
+                _JVM_GetMethodIxByteCode
+                _JVM_GetMethodIxByteCodeLength
+                _JVM_GetMethodIxExceptionIndexes
+                _JVM_GetMethodIxExceptionTableEntry
+                _JVM_GetMethodIxExceptionTableLength
+                _JVM_GetMethodIxExceptionsCount
+                _JVM_GetMethodIxLocalsCount
+                _JVM_GetMethodIxMaxStack
+                _JVM_GetMethodIxModifiers
+                _JVM_GetMethodIxNameUTF
+                _JVM_GetMethodIxSignatureUTF
+                _JVM_GetMethodParameterAnnotations
+                _JVM_GetMethodParameters
+                _JVM_GetMethodTypeAnnotations
+                _JVM_GetPrimitiveArrayElement
+                _JVM_GetProtectionDomain
+                _JVM_GetSockName
+                _JVM_GetSockOpt
+                _JVM_GetStackAccessControlContext
+                _JVM_GetStackTraceDepth
+                _JVM_GetStackTraceElement
+                _JVM_GetSystemPackage
+                _JVM_GetSystemPackages
+                _JVM_GetThreadStateNames
+                _JVM_GetThreadStateValues
+                _JVM_GetVersionInfo
+                _JVM_Halt
+                _JVM_HoldsLock
+                _JVM_IHashCode
+                _JVM_InitAgentProperties
+                _JVM_InitProperties
+                _JVM_InitializeCompiler
+                _JVM_InitializeSocketLibrary
+                _JVM_InternString
+                _JVM_Interrupt
+                _JVM_InvokeMethod
+                _JVM_IsArrayClass
+                _JVM_IsConstructorIx
+                _JVM_IsInterface
+                _JVM_IsInterrupted
+                _JVM_IsNaN
+                _JVM_IsPrimitiveClass
+                _JVM_IsSameClassPackage
+                _JVM_IsSilentCompiler
+                _JVM_IsSupportedJNIVersion
+                _JVM_IsThreadAlive
+                _JVM_IsVMGeneratedMethodIx
+                _JVM_LatestUserDefinedLoader
+                _JVM_Listen
+                _JVM_LoadClass0
+                _JVM_LoadLibrary
+                _JVM_Lseek
+                _JVM_MaxObjectInspectionAge
+                _JVM_MaxMemory
+                _JVM_MonitorNotify
+                _JVM_MonitorNotifyAll
+                _JVM_MonitorWait
+                _JVM_NanoTime
+                _JVM_NativePath
+                _JVM_NewArray
+                _JVM_NewInstanceFromConstructor
+                _JVM_NewMultiArray
+                _JVM_OnExit
+                _JVM_Open
+                _JVM_RaiseSignal
+                _JVM_RawMonitorCreate
+                _JVM_RawMonitorDestroy
+                _JVM_RawMonitorEnter
+                _JVM_RawMonitorExit
+                _JVM_Read
+                _JVM_Recv
+                _JVM_RecvFrom
+                _JVM_RegisterSignal
+                _JVM_ReleaseUTF
+                _JVM_ResolveClass
+                _JVM_ResumeThread
+                _JVM_Send
+                _JVM_SendTo
+                _JVM_SetArrayElement
+                _JVM_SetClassSigners
+                _JVM_SetLength
+                _JVM_SetNativeThreadName
+                _JVM_SetPrimitiveArrayElement
+                _JVM_SetProtectionDomain
+                _JVM_SetSockOpt
+                _JVM_SetThreadPriority
+                _JVM_Sleep
+                _JVM_Socket
+                _JVM_SocketAvailable
+                _JVM_SocketClose
+                _JVM_SocketShutdown
+                _JVM_StartThread
+                _JVM_StopThread
+                _JVM_SuspendThread
+                _JVM_SupportsCX8
+                _JVM_Sync
+                _JVM_Timeout
+                _JVM_TotalMemory
+                _JVM_TraceInstructions
+                _JVM_TraceMethodCalls
+                _JVM_UnloadLibrary
+                _JVM_Write
+                _JVM_Yield
+                _JVM_handle_bsd_signal
 
                 # miscellaneous functions
-                jio_fprintf;
-                jio_printf;
-                jio_snprintf;
-                jio_vfprintf;
-                jio_vsnprintf;
-                fork1;
-                numa_warn;
-                numa_error;
-
-                # Needed because there is no JVM interface for this.
-                sysThreadAvailableStackWithSlack;
+                _jio_fprintf
+                _jio_printf
+                _jio_snprintf
+                _jio_vfprintf
+                _jio_vsnprintf
 
                 # This is for Forte Analyzer profiling support.
-                AsyncGetCallTrace;
+                _AsyncGetCallTrace
 
-		# INSERT VTABLE SYMBOLS HERE
+                # INSERT VTABLE SYMBOLS HERE
 
-        local:
-                *;
-};
-
--- a/make/excludeSrc.make	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/excludeSrc.make	Mon Jul 15 11:07:03 2013 +0100
@@ -112,3 +112,5 @@
 endif
 
 -include $(HS_ALT_MAKE)/excludeSrc.make
+
+.PHONY: $(HS_ALT_MAKE)/excludeSrc.make
--- a/make/hotspot_version	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/hotspot_version	Mon Jul 15 11:07:03 2013 +0100
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=37
+HS_BUILD_NUMBER=40
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/makefiles/gcc.make	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/linux/makefiles/gcc.make	Mon Jul 15 11:07:03 2013 +0100
@@ -214,7 +214,7 @@
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
 
 ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@@ -350,9 +350,9 @@
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
         # Clang doesn't understand -gstabs
-        OPT_CFLAGS += -g
+        DEBUG_CFLAGS += -g
       else
-        OPT_CFLAGS += -gstabs
+        DEBUG_CFLAGS += -gstabs
       endif
   endif
   
@@ -365,9 +365,9 @@
     ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
         # Clang doesn't understand -gstabs
-        OPT_CFLAGS += -g
+        FASTDEBUG_CFLAGS += -g
       else
-        OPT_CFLAGS += -gstabs
+        FASTDEBUG_CFLAGS += -gstabs
       endif
     endif
   
--- a/make/linux/makefiles/vm.make	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/linux/makefiles/vm.make	Mon Jul 15 11:07:03 2013 +0100
@@ -107,6 +107,10 @@
 # File specific flags
 CXXFLAGS += $(CXXFLAGS/BYFILE)
 
+# Large File Support
+ifneq ($(LP64), 1)
+CXXFLAGS/ostream.o += -D_FILE_OFFSET_BITS=64
+endif # ifneq ($(LP64), 1)
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN/BYFILE)
--- a/make/solaris/makefiles/vm.make	Thu Jul 11 12:59:03 2013 -0400
+++ b/make/solaris/makefiles/vm.make	Mon Jul 15 11:07:03 2013 +0100
@@ -95,6 +95,10 @@
 # File specific flags
 CXXFLAGS += $(CXXFLAGS/BYFILE)
 
+# Large File Support
+ifneq ($(LP64), 1)
+CXXFLAGS/ostream.o += -D_FILE_OFFSET_BITS=64
+endif # ifneq ($(LP64), 1)
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN)
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -57,7 +57,6 @@
     fbp_op2   = 5,
     br_op2    = 2,
     bp_op2    = 1,
-    cb_op2    = 7, // V8
     sethi_op2 = 4
   };
 
@@ -145,7 +144,6 @@
     ldsh_op3     = 0x0a,
     ldx_op3      = 0x0b,
 
-    ldstub_op3   = 0x0d,
     stx_op3      = 0x0e,
     swap_op3     = 0x0f,
 
@@ -163,15 +161,6 @@
 
     prefetch_op3 = 0x2d,
 
-
-    ldc_op3      = 0x30,
-    ldcsr_op3    = 0x31,
-    lddc_op3     = 0x33,
-    stc_op3      = 0x34,
-    stcsr_op3    = 0x35,
-    stdcq_op3    = 0x36,
-    stdc_op3     = 0x37,
-
     casa_op3     = 0x3c,
     casxa_op3    = 0x3e,
 
@@ -574,17 +563,11 @@
   static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
 
   // instruction only in v9
-  static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
-
-  // instruction only in v8
-  static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
+  static void v9_only() { } // do nothing
 
   // instruction deprecated in v9
   static void v9_dep()  { } // do nothing for now
 
-  // some float instructions only exist for single prec. on v8
-  static void v8_s_only(FloatRegisterImpl::Width w)  { if (w != FloatRegisterImpl::S)  v9_only(); }
-
   // v8 has no CC field
   static void v8_no_cc(CC cc)  { if (cc)  v9_only(); }
 
@@ -730,11 +713,6 @@
   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
 
-  // pp 121 (V8)
-
-  inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
-  inline void cb( Condition c, bool a, Label& L );
-
   // pp 149
 
   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
@@ -775,8 +753,8 @@
 
   // pp 157
 
-  void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
-  void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
+  void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
+  void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
 
   // pp 159
 
@@ -794,21 +772,11 @@
 
   // pp 162
 
-  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
+  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
 
-  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
+  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
 
-  // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
-  // on v8 to do negation of single, double and quad precision floats.
-
-  void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) |  opf(0x05) | fs2(sd, w)); }
-
-  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
-
-  // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
-  // on v8 to do abs operation on single/double/quad precision floats.
-
-  void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
+  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
 
   // pp 163
 
@@ -839,11 +807,6 @@
   void impdep1( int id1, int const19a ) { v9_only();  emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
   void impdep2( int id1, int const19a ) { v9_only();  emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
 
-  // pp 149 (v8)
-
-  void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
-  void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
-
   // pp 170
 
   void jmpl( Register s1, Register s2, Register d );
@@ -860,16 +823,6 @@
   inline void ldxfsr( Register s1, Register s2 );
   inline void ldxfsr( Register s1, int simm13a);
 
-  // pp 94 (v8)
-
-  inline void ldc(   Register s1, Register s2, int crd );
-  inline void ldc(   Register s1, int simm13a, int crd);
-  inline void lddc(  Register s1, Register s2, int crd );
-  inline void lddc(  Register s1, int simm13a, int crd);
-  inline void ldcsr( Register s1, Register s2, int crd );
-  inline void ldcsr( Register s1, int simm13a, int crd);
-
-
   // 173
 
   void ldfa(  FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only();  emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
@@ -910,18 +863,6 @@
   void lduwa(  Register s1, int simm13a,         Register d ) {             emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void ldxa(   Register s1, Register s2, int ia, Register d ) { v9_only();  emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   void ldxa(   Register s1, int simm13a,         Register d ) { v9_only();  emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void ldda(   Register s1, Register s2, int ia, Register d ) { v9_dep();   emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-  void ldda(   Register s1, int simm13a,         Register d ) { v9_dep();   emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-  // pp 179
-
-  inline void ldstub(  Register s1, Register s2, Register d );
-  inline void ldstub(  Register s1, int simm13a, Register d);
-
-  // pp 180
-
-  void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-  void ldstuba( Register s1, int simm13a,         Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
   // pp 181
 
@@ -992,11 +933,6 @@
   void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
-  // pp 199
-
-  void mulscc(   Register s1, Register s2, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
-  void mulscc(   Register s1, int simm13a, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
   // pp 201
 
   void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
@@ -1116,17 +1052,6 @@
   void stda(  Register d, Register s1, Register s2, int ia ) {             emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   void stda(  Register d, Register s1, int simm13a         ) {             emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
-  // pp 97 (v8)
-
-  inline void stc(   int crd, Register s1, Register s2 );
-  inline void stc(   int crd, Register s1, int simm13a);
-  inline void stdc(  int crd, Register s1, Register s2 );
-  inline void stdc(  int crd, Register s1, int simm13a);
-  inline void stcsr( int crd, Register s1, Register s2 );
-  inline void stcsr( int crd, Register s1, int simm13a);
-  inline void stdcq( int crd, Register s1, Register s2 );
-  inline void stdcq( int crd, Register s1, int simm13a);
-
   // pp 230
 
   void sub(    Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3              ) | rs1(s1) | rs2(s2) ); }
@@ -1153,20 +1078,16 @@
 
   void taddcc(    Register s1, Register s2, Register d ) {            emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | rs2(s2) ); }
   void taddcc(    Register s1, int simm13a, Register d ) {            emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void taddcctv(  Register s1, Register s2, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
-  void taddcctv(  Register s1, int simm13a, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
   // pp 235
 
   void tsubcc(    Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | rs2(s2) ); }
   void tsubcc(    Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void tsubcctv(  Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
-  void tsubcctv(  Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
   // pp 237
 
-  void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc);  emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
-  void trap( Condition c, CC cc, Register s1, int trapa   ) { v8_no_cc(cc);  emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
+  void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
+  void trap( Condition c, CC cc, Register s1, int trapa   ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
   // simple uncond. trap
   void trap( int trapa ) { trap( always, icc, G0, trapa ); }
 
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -63,9 +63,6 @@
 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
 
-inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
-inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
-
 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  cti();   emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
 
@@ -88,18 +85,9 @@
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
 
-inline void Assembler::ldfsr(  Register s1, Register s2) { v9_dep();   emit_int32( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only();  emit_int32( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::ldc(   Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldc(   Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::lddc(  Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::lddc(  Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
 inline void Assembler::ldsb(  Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldsb(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
@@ -119,9 +107,6 @@
 inline void Assembler::ldd(   Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldd(   Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
 inline void Assembler::rett( Register s1, Register s2                         ) { cti();  emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti();  emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);  has_delay_slot(); }
 
@@ -132,8 +117,6 @@
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::stfsr(  Register s1, Register s2) { v9_dep();   emit_int32( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only();  emit_int32( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
@@ -152,17 +135,6 @@
 inline void Assembler::std(  Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::std(  Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-// v8 p 99
-
-inline void Assembler::stc(    int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stc(    int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stdc(   int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stdc(   int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stcsr(  int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stcsr(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stdcq(  int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stdcq(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
 // pp 231
 
 inline void Assembler::swap(    Register s1, Register s2, Register d) { v9_dep();  emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -597,13 +597,6 @@
 
   __ sra(Rdividend, 31, Rscratch);
   __ wry(Rscratch);
-  if (!VM_Version::v9_instructions_work()) {
-    // v9 doesn't require these nops
-    __ nop();
-    __ nop();
-    __ nop();
-    __ nop();
-  }
 
   add_debug_info_for_div0_here(op->info());
 
@@ -652,10 +645,6 @@
       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
       default :                         ShouldNotReachHere();
-    };
-
-    if (!VM_Version::v9_instructions_work()) {
-      __ nop();
     }
     __ fb( acond, false, Assembler::pn, *(op->label()));
   } else {
@@ -725,9 +714,6 @@
       Label L;
       // result must be 0 if value is NaN; test by comparing value to itself
       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
-      if (!VM_Version::v9_instructions_work()) {
-        __ nop();
-      }
       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
@@ -1909,7 +1895,7 @@
       switch (code) {
         case lir_add:  __ add  (lreg, rreg, res); break;
         case lir_sub:  __ sub  (lreg, rreg, res); break;
-        case lir_mul:  __ mult (lreg, rreg, res); break;
+        case lir_mul:  __ mulx (lreg, rreg, res); break;
         default: ShouldNotReachHere();
       }
     }
@@ -1924,7 +1910,7 @@
       switch (code) {
         case lir_add:  __ add  (lreg, simm13, res); break;
         case lir_sub:  __ sub  (lreg, simm13, res); break;
-        case lir_mul:  __ mult (lreg, simm13, res); break;
+        case lir_mul:  __ mulx (lreg, simm13, res); break;
         default: ShouldNotReachHere();
       }
     } else {
@@ -1936,7 +1922,7 @@
       switch (code) {
         case lir_add:  __ add  (lreg, (int)con, res); break;
         case lir_sub:  __ sub  (lreg, (int)con, res); break;
-        case lir_mul:  __ mult (lreg, (int)con, res); break;
+        case lir_mul:  __ mulx (lreg, (int)con, res); break;
         default: ShouldNotReachHere();
       }
     }
@@ -2960,6 +2946,9 @@
   }
 }
 
+void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
+  fatal("CRC32 intrinsic is not implemented on this platform");
+}
 
 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
   Register obj = op->obj_opr()->as_register();
@@ -3234,48 +3223,26 @@
     Register base = mem_addr->base()->as_register();
     if (src->is_register() && dest->is_address()) {
       // G4 is high half, G5 is low half
-      if (VM_Version::v9_instructions_work()) {
-        // clear the top bits of G5, and scale up G4
-        __ srl (src->as_register_lo(),  0, G5);
-        __ sllx(src->as_register_hi(), 32, G4);
-        // combine the two halves into the 64 bits of G4
-        __ or3(G4, G5, G4);
-        null_check_offset = __ offset();
-        if (idx == noreg) {
-          __ stx(G4, base, disp);
-        } else {
-          __ stx(G4, base, idx);
-        }
+      // clear the top bits of G5, and scale up G4
+      __ srl (src->as_register_lo(),  0, G5);
+      __ sllx(src->as_register_hi(), 32, G4);
+      // combine the two halves into the 64 bits of G4
+      __ or3(G4, G5, G4);
+      null_check_offset = __ offset();
+      if (idx == noreg) {
+        __ stx(G4, base, disp);
       } else {
-        __ mov (src->as_register_hi(), G4);
-        __ mov (src->as_register_lo(), G5);
-        null_check_offset = __ offset();
-        if (idx == noreg) {
-          __ std(G4, base, disp);
-        } else {
-          __ std(G4, base, idx);
-        }
+        __ stx(G4, base, idx);
       }
     } else if (src->is_address() && dest->is_register()) {
       null_check_offset = __ offset();
-      if (VM_Version::v9_instructions_work()) {
-        if (idx == noreg) {
-          __ ldx(base, disp, G5);
-        } else {
-          __ ldx(base, idx, G5);
-        }
-        __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
-        __ mov (G5, dest->as_register_lo());     // copy low half into lo
+      if (idx == noreg) {
+        __ ldx(base, disp, G5);
       } else {
-        if (idx == noreg) {
-          __ ldd(base, disp, G4);
-        } else {
-          __ ldd(base, idx, G4);
-        }
-        // G4 is high half, G5 is low half
-        __ mov (G4, dest->as_register_hi());
-        __ mov (G5, dest->as_register_lo());
+        __ ldx(base, idx, G5);
       }
+      __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
+      __ mov (G5, dest->as_register_lo());     // copy low half into lo
     } else {
       Unimplemented();
     }
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -784,6 +784,10 @@
   set_no_result(x);
 }
 
+void LIRGenerator::do_update_CRC32(Intrinsic* x) {
+  fatal("CRC32 intrinsic is not implemented on this platform");
+}
+
 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 // _i2b, _i2c, _i2s
 void LIRGenerator::do_Convert(Convert* x) {
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -108,7 +108,7 @@
 
   // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
   assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-  casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+  cas_ptr(mark_addr.base(), Rmark, Rscratch);
   // if compare/exchange succeeded we found an unlocked object and we now have locked it
   // hence we are done
   cmp(Rmark, Rscratch);
@@ -149,7 +149,7 @@
 
   // Check if it is still a light weight lock, this is is true if we see
   // the stack address of the basicLock in the markOop of the object
-  casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+  cas_ptr(mark_addr.base(), Rbox, Rmark);
   cmp(Rbox, Rmark);
 
   brx(Assembler::notEqual, false, Assembler::pn, slow_case);
@@ -276,7 +276,7 @@
     sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
     initialize_body(t1, t2);
 #ifndef _LP64
-  } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
+  } else if (con_size_in_bytes < threshold * 2) {
     // on v9 we can do double word stores to fill twice as much space.
     assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
     assert(con_size_in_bytes % 8 == 0, "double word aligned");
--- a/src/cpu/sparc/vm/c1_globals_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/c1_globals_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -49,8 +49,9 @@
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
-define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx,MetaspaceSize,                     12*M );
+define_pd_global(uintx, CodeCacheMinBlockLength,     1);
+define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
+define_pd_global(uintx, MetaspaceSize,               12*M );
 define_pd_global(bool, NeverActAsServerClassMachine, true );
 define_pd_global(intx, NewSizeThreadIncrease,        16*K );
 define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -86,7 +86,8 @@
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif
-define_pd_global(uintx,CodeCacheMinBlockLength,      4);
+define_pd_global(uintx, CodeCacheMinBlockLength,     4);
+define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
 // Heap related flags
 define_pd_global(uintx,MetaspaceSize,    ScaleForWordSize(16*M));
--- a/src/cpu/sparc/vm/c2_init_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/c2_init_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -30,5 +30,4 @@
 
 void Compile::pd_compiler2_init() {
   guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
-  guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
 }
--- a/src/cpu/sparc/vm/disassembler_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/disassembler_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -30,8 +30,7 @@
   }
 
   static const char* pd_cpu_opts() {
-    return (VM_Version::v9_instructions_work()?
-            (VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
+    return "v9only";
   }
 
 #endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -257,11 +257,6 @@
       return false;
     }
 
-    // Could be a zombie method
-    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
-      return false;
-    }
-
     // It should be safe to construct the sender though it might not be valid
 
     frame sender(_SENDER_SP, younger_sp, adjusted_stack);
@@ -680,7 +675,7 @@
 
   // validate ConstantPoolCache*
   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
-  if (cp == NULL || !cp->is_metadata()) return false;
+  if (cp == NULL || !cp->is_metaspace_object()) return false;
 
   // validate locals
 
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -110,8 +110,5 @@
                                                                             \
   product(uintx,  ArraycopyDstPrefetchDistance, 0,                          \
           "Distance to prefetch destination array in arracopy")             \
-                                                                            \
-  develop(intx, V8AtomicOperationUnderLockSpinCount,    50,                 \
-          "Number of times to spin wait on a v8 atomic operation lock")     \
 
 #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1210,8 +1210,7 @@
     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
     // compare and exchange object_addr, markOop | 1, stack address of basicLock
     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-    casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    cas_ptr(mark_addr.base(), mark_reg, temp_reg);
 
     // if the compare and exchange succeeded we are done (we saw an unlocked object)
     cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
@@ -1291,8 +1290,7 @@
     // we expect to see the stack address of the basicLock in case the
     // lock is still a light weight lock (lock_reg)
     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-    casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
     cmp(lock_reg, displaced_header_reg);
     brx(Assembler::equal, true, Assembler::pn, done);
     delayed()->st_ptr(G0, lockobj_addr);  // free entry
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -118,7 +118,6 @@
       case bp_op2:     m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
       case fb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
       case br_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
-      case cb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
       case bpr_op2: {
         if (is_cbcond(inst)) {
           m = wdisp10(word_aligned_ones, 0);
@@ -149,7 +148,6 @@
       case bp_op2:     r = inv_wdisp(  inst, pos, 19);  break;
       case fb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
       case br_op2:     r = inv_wdisp(  inst, pos, 22);  break;
-      case cb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
       case bpr_op2: {
         if (is_cbcond(inst)) {
           r = inv_wdisp10(inst, pos);
@@ -325,12 +323,6 @@
   trap(ST_RESERVED_FOR_USER_0);
 }
 
-// flush windows (except current) using flushw instruction if avail.
-void MacroAssembler::flush_windows() {
-  if (VM_Version::v9_instructions_work())  flushw();
-  else                                     flush_windows_trap();
-}
-
 // Write serialization page so VM thread can do a pseudo remote membar
 // We use the current thread pointer to calculate a thread specific
 // offset to write to within the page. This minimizes bus traffic
@@ -358,88 +350,6 @@
   Unimplemented();
 }
 
-void MacroAssembler::mult(Register s1, Register s2, Register d) {
-  if(VM_Version::v9_instructions_work()) {
-    mulx (s1, s2, d);
-  } else {
-    smul (s1, s2, d);
-  }
-}
-
-void MacroAssembler::mult(Register s1, int simm13a, Register d) {
-  if(VM_Version::v9_instructions_work()) {
-    mulx (s1, simm13a, d);
-  } else {
-    smul (s1, simm13a, d);
-  }
-}
-
-
-#ifdef ASSERT
-void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
-  const Register s1 = G3_scratch;
-  const Register s2 = G4_scratch;
-  Label get_psr_test;
-  // Get the condition codes the V8 way.
-  read_ccr_trap(s1);
-  mov(ccr_save, s2);
-  // This is a test of V8 which has icc but not xcc
-  // so mask off the xcc bits
-  and3(s2, 0xf, s2);
-  // Compare condition codes from the V8 and V9 ways.
-  subcc(s2, s1, G0);
-  br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
-  delayed()->breakpoint_trap();
-  bind(get_psr_test);
-}
-
-void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
-  const Register s1 = G3_scratch;
-  const Register s2 = G4_scratch;
-  Label set_psr_test;
-  // Write out the saved condition codes the V8 way
-  write_ccr_trap(ccr_save, s1, s2);
-  // Read back the condition codes using the V9 instruction
-  rdccr(s1);
-  mov(ccr_save, s2);
-  // This is a test of V8 which has icc but not xcc
-  // so mask off the xcc bits
-  and3(s2, 0xf, s2);
-  and3(s1, 0xf, s1);
-  // Compare the V8 way with the V9 way.
-  subcc(s2, s1, G0);
-  br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
-  delayed()->breakpoint_trap();
-  bind(set_psr_test);
-}
-#else
-#define read_ccr_v8_assert(x)
-#define write_ccr_v8_assert(x)
-#endif // ASSERT
-
-void MacroAssembler::read_ccr(Register ccr_save) {
-  if (VM_Version::v9_instructions_work()) {
-    rdccr(ccr_save);
-    // Test code sequence used on V8.  Do not move above rdccr.
-    read_ccr_v8_assert(ccr_save);
-  } else {
-    read_ccr_trap(ccr_save);
-  }
-}
-
-void MacroAssembler::write_ccr(Register ccr_save) {
-  if (VM_Version::v9_instructions_work()) {
-    // Test code sequence used on V8.  Do not move below wrccr.
-    write_ccr_v8_assert(ccr_save);
-    wrccr(ccr_save);
-  } else {
-    const Register temp_reg1 = G3_scratch;
-    const Register temp_reg2 = G4_scratch;
-    write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
-  }
-}
-
-
 // Calls to C land
 
 #ifdef ASSERT
@@ -465,8 +375,8 @@
 #ifdef ASSERT
   AddressLiteral last_get_thread_addrlit(&last_get_thread);
   set(last_get_thread_addrlit, L3);
-  inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
-  st_ptr(L4, L3, 0);
+  rdpc(L4);
+  inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call  st_ptr(L4, L3, 0);
 #endif
   call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
   delayed()->nop();
@@ -1251,12 +1161,6 @@
   while (offset() % modulus != 0) nop();
 }
 
-
-void MacroAssembler::safepoint() {
-  relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
-}
-
-
 void RegistersForDebugging::print(outputStream* s) {
   FlagSetting fs(Debugging, true);
   int j;
@@ -1327,7 +1231,7 @@
 
 void RegistersForDebugging::save_registers(MacroAssembler* a) {
   a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
-  a->flush_windows();
+  a->flushw();
   int i;
   for (i = 0; i < 8; ++i) {
     a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, i_offset(i));
@@ -1338,7 +1242,7 @@
   for (i = 0;  i < 32; ++i) {
     a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
   }
-  for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+  for (i = 0; i < 64; i += 2) {
     a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
   }
 }
@@ -1350,7 +1254,7 @@
   for (int j = 0; j < 32; ++j) {
     a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
   }
-  for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
+  for (int k = 0; k < 64; k += 2) {
     a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
   }
 }
@@ -1465,8 +1369,6 @@
 // the high bits of the O-regs if they contain Long values.  Acts as a 'leaf'
 // call.
 void MacroAssembler::verify_oop_subroutine() {
-  assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
-
   // Leaf call; no frame.
   Label succeed, fail, null_or_fail;
 
@@ -1870,26 +1772,17 @@
   // And the equals case for the high part does not need testing,
   // since that triplet is reached only after finding the high halves differ.
 
-  if (VM_Version::v9_instructions_work()) {
-    mov(-1, Rresult);
-    ba(done);  delayed()-> movcc(greater, false, icc,  1, Rresult);
-  } else {
-    br(less,    true, pt, done); delayed()-> set(-1, Rresult);
-    br(greater, true, pt, done); delayed()-> set( 1, Rresult);
-  }
-
-  bind( check_low_parts );
-
-  if (VM_Version::v9_instructions_work()) {
-    mov(                               -1, Rresult);
-    movcc(equal,           false, icc,  0, Rresult);
-    movcc(greaterUnsigned, false, icc,  1, Rresult);
-  } else {
-    set(-1, Rresult);
-    br(equal,           true, pt, done); delayed()->set( 0, Rresult);
-    br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
-  }
-  bind( done );
+  mov(-1, Rresult);
+  ba(done);
+  delayed()->movcc(greater, false, icc,  1, Rresult);
+
+  bind(check_low_parts);
+
+  mov(                               -1, Rresult);
+  movcc(equal,           false, icc,  0, Rresult);
+  movcc(greaterUnsigned, false, icc,  1, Rresult);
+
+  bind(done);
 }
 
 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
@@ -2117,119 +2010,24 @@
 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
                                 FloatRegister Fa, FloatRegister Fb,
                                 Register Rresult) {
-
-  fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
-
-  Condition lt = unordered_result == -1 ? f_unorderedOrLess    : f_less;
-  Condition eq =                          f_equal;
-  Condition gt = unordered_result ==  1 ? f_unorderedOrGreater : f_greater;
-
-  if (VM_Version::v9_instructions_work()) {
-
-    mov(-1, Rresult);
-    movcc(eq, true, fcc0, 0, Rresult);
-    movcc(gt, true, fcc0, 1, Rresult);
-
+  if (is_float) {
+    fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
   } else {
-    Label done;
-
-    set( -1, Rresult );
-    //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
-    fb( eq, true, pn, done);  delayed()->set(  0, Rresult );
-    fb( gt, true, pn, done);  delayed()->set(  1, Rresult );
-
-    bind (done);
+    fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
+  }
+
+  if (unordered_result == 1) {
+    mov(                                    -1, Rresult);
+    movcc(f_equal,              true, fcc0,  0, Rresult);
+    movcc(f_unorderedOrGreater, true, fcc0,  1, Rresult);
+  } else {
+    mov(                                    -1, Rresult);
+    movcc(f_equal,              true, fcc0,  0, Rresult);
+    movcc(f_greater,            true, fcc0,  1, Rresult);
   }
 }
 
 
-void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
-  if (VM_Version::v9_instructions_work()) {
-    Assembler::fneg(w, s, d);
-  } else {
-    if (w == FloatRegisterImpl::S) {
-      Assembler::fneg(w, s, d);
-    } else if (w == FloatRegisterImpl::D) {
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
-      Assembler::fneg(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-    } else {
-      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
-      Assembler::fneg(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
-    }
-  }
-}
-
-void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
-  if (VM_Version::v9_instructions_work()) {
-    Assembler::fmov(w, s, d);
-  } else {
-    if (w == FloatRegisterImpl::S) {
-      Assembler::fmov(w, s, d);
-    } else if (w == FloatRegisterImpl::D) {
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
-      Assembler::fmov(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-    } else {
-      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
-      Assembler::fmov(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
-    }
-  }
-}
-
-void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
-  if (VM_Version::v9_instructions_work()) {
-    Assembler::fabs(w, s, d);
-  } else {
-    if (w == FloatRegisterImpl::S) {
-      Assembler::fabs(w, s, d);
-    } else if (w == FloatRegisterImpl::D) {
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
-      Assembler::fabs(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-    } else {
-      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
-       ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
-      Assembler::fabs(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
-    }
-  }
-}
-
 void MacroAssembler::save_all_globals_into_locals() {
   mov(G1,L1);
   mov(G2,L2);
@@ -2250,135 +2048,6 @@
   mov(L7,G7);
 }
 
-// Use for 64 bit operation.
-void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
-{
-  // store ptr_reg as the new top value
-#ifdef _LP64
-  casx(top_ptr_reg, top_reg, ptr_reg);
-#else
-  cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
-#endif // _LP64
-}
-
-// [RGV] This routine does not handle 64 bit operations.
-//       use casx_under_lock() or casx directly!!!
-void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
-{
-  // store ptr_reg as the new top value
-  if (VM_Version::v9_instructions_work()) {
-    cas(top_ptr_reg, top_reg, ptr_reg);
-  } else {
-
-    // If the register is not an out nor global, it is not visible
-    // after the save.  Allocate a register for it, save its
-    // value in the register save area (the save may not flush
-    // registers to the save area).
-
-    Register top_ptr_reg_after_save;
-    Register top_reg_after_save;
-    Register ptr_reg_after_save;
-
-    if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
-      top_ptr_reg_after_save = top_ptr_reg->after_save();
-    } else {
-      Address reg_save_addr = top_ptr_reg->address_in_saved_window();
-      top_ptr_reg_after_save = L0;
-      st(top_ptr_reg, reg_save_addr);
-    }
-
-    if (top_reg->is_out() || top_reg->is_global()) {
-      top_reg_after_save = top_reg->after_save();
-    } else {
-      Address reg_save_addr = top_reg->address_in_saved_window();
-      top_reg_after_save = L1;
-      st(top_reg, reg_save_addr);
-    }
-
-    if (ptr_reg->is_out() || ptr_reg->is_global()) {
-      ptr_reg_after_save = ptr_reg->after_save();
-    } else {
-      Address reg_save_addr = ptr_reg->address_in_saved_window();
-      ptr_reg_after_save = L2;
-      st(ptr_reg, reg_save_addr);
-    }
-
-    const Register& lock_reg = L3;
-    const Register& lock_ptr_reg = L4;
-    const Register& value_reg = L5;
-    const Register& yield_reg = L6;
-    const Register& yieldall_reg = L7;
-
-    save_frame();
-
-    if (top_ptr_reg_after_save == L0) {
-      ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
-    }
-
-    if (top_reg_after_save == L1) {
-      ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
-    }
-
-    if (ptr_reg_after_save == L2) {
-      ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
-    }
-
-    Label(retry_get_lock);
-    Label(not_same);
-    Label(dont_yield);
-
-    assert(lock_addr, "lock_address should be non null for v8");
-    set((intptr_t)lock_addr, lock_ptr_reg);
-    // Initialize yield counter
-    mov(G0,yield_reg);
-    mov(G0, yieldall_reg);
-    set(StubRoutines::Sparc::locked, lock_reg);
-
-    bind(retry_get_lock);
-    cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
-
-    if(use_call_vm) {
-      Untested("Need to verify global reg consistancy");
-      call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
-    } else {
-      // Save the regs and make space for a C call
-      save(SP, -96, SP);
-      save_all_globals_into_locals();
-      call(CAST_FROM_FN_PTR(address,os::yield_all));
-      delayed()->mov(yieldall_reg, O0);
-      restore_globals_from_locals();
-      restore();
-    }
-
-    // reset the counter
-    mov(G0,yield_reg);
-    add(yieldall_reg, 1, yieldall_reg);
-
-    bind(dont_yield);
-    // try to get lock
-    Assembler::swap(lock_ptr_reg, 0, lock_reg);
-
-    // did we get the lock?
-    cmp(lock_reg, StubRoutines::Sparc::unlocked);
-    br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
-    delayed()->add(yield_reg,1,yield_reg);
-
-    // yes, got lock.  do we have the same top?
-    ld(top_ptr_reg_after_save, 0, value_reg);
-    cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
-
-    // yes, same top.
-    st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
-    membar(Assembler::StoreStore);
-
-    bind(not_same);
-    mov(value_reg, ptr_reg_after_save);
-    st(lock_reg, lock_ptr_reg, 0); // unlock
-
-    restore();
-  }
-}
-
 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
                                                       Register tmp,
                                                       int offset) {
@@ -2970,7 +2639,7 @@
                   markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
                   mark_reg);
   or3(G2_thread, mark_reg, temp_reg);
-  casn(mark_addr.base(), mark_reg, temp_reg);
+  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   // If the biasing toward our thread failed, this means that
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
@@ -2998,7 +2667,7 @@
   load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
   or3(G2_thread, temp_reg, temp_reg);
-  casn(mark_addr.base(), mark_reg, temp_reg);
+  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   // If the biasing toward our thread failed, this means that
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
@@ -3027,7 +2696,7 @@
   // bits in this situation. Should attempt to preserve them.
   load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
-  casn(mark_addr.base(), mark_reg, temp_reg);
+  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
   // removing the bias bit from the object's header.
@@ -3058,15 +2727,6 @@
 }
 
 
-// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
-// Solaris/SPARC's "as".  Another apt name would be cas_ptr()
-
-void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
-  casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
-}
-
-
-
 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
 // of i486.ad fast_lock() and fast_unlock().  See those methods for detailed comments.
 // The code could be tightened up considerably.
@@ -3129,8 +2789,7 @@
 
      // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-     casx_under_lock(mark_addr.base(), Rmark, Rscratch,
-        (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+     cas_ptr(mark_addr.base(), Rmark, Rscratch);
 
      // if compare/exchange succeeded we found an unlocked object and we now have locked it
      // hence we are done
@@ -3176,7 +2835,7 @@
       mov(Rbox,  Rscratch);
       or3(Rmark, markOopDesc::unlocked_value, Rmark);
       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-      casn(mark_addr.base(), Rmark, Rscratch);
+      cas_ptr(mark_addr.base(), Rmark, Rscratch);
       cmp(Rmark, Rscratch);
       brx(Assembler::equal, false, Assembler::pt, done);
       delayed()->sub(Rscratch, SP, Rscratch);
@@ -3207,7 +2866,7 @@
       // Invariant: if we acquire the lock then _recursions should be 0.
       add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
       mov(G2_thread, Rscratch);
-      casn(Rmark, G0, Rscratch);
+      cas_ptr(Rmark, G0, Rscratch);
       cmp(Rscratch, G0);
       // Intentional fall-through into done
    } else {
@@ -3240,7 +2899,7 @@
       mov(0, Rscratch);
       or3(Rmark, markOopDesc::unlocked_value, Rmark);
       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-      casn(mark_addr.base(), Rmark, Rscratch);
+      cas_ptr(mark_addr.base(), Rmark, Rscratch);
 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
       cmp(Rscratch, Rmark);
       brx(Assembler::notZero, false, Assembler::pn, Recursive);
@@ -3266,7 +2925,7 @@
       // the fast-path stack-lock code from the interpreter and always passed
       // control to the "slow" operators in synchronizer.cpp.
 
-      // RScratch contains the fetched obj->mark value from the failed CASN.
+      // RScratch contains the fetched obj->mark value from the failed CAS.
 #ifdef _LP64
       sub(Rscratch, STACK_BIAS, Rscratch);
 #endif
@@ -3300,7 +2959,7 @@
       // Invariant: if we acquire the lock then _recursions should be 0.
       add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
       mov(G2_thread, Rscratch);
-      casn(Rmark, G0, Rscratch);
+      cas_ptr(Rmark, G0, Rscratch);
       cmp(Rscratch, G0);
       // ST box->displaced_header = NonZero.
       // Any non-zero value suffices:
@@ -3336,8 +2995,7 @@
      // Check if it is still a light weight lock, this is is true if we see
      // the stack address of the basicLock in the markOop of the object
      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-     casx_under_lock(mark_addr.base(), Rbox, Rmark,
-       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+     cas_ptr(mark_addr.base(), Rbox, Rmark);
      ba(done);
      delayed()->cmp(Rbox, Rmark);
      bind(done);
@@ -3398,7 +3056,7 @@
       delayed()->andcc(G0, G0, G0);
       add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
       mov(G2_thread, Rscratch);
-      casn(Rmark, G0, Rscratch);
+      cas_ptr(Rmark, G0, Rscratch);
       // invert icc.zf and goto done
       br_notnull(Rscratch, false, Assembler::pt, done);
       delayed()->cmp(G0, G0);
@@ -3440,7 +3098,7 @@
    // A prototype implementation showed excellent results, although
    // the scavenger and timeout code was rather involved.
 
-   casn(mark_addr.base(), Rbox, Rscratch);
+   cas_ptr(mark_addr.base(), Rbox, Rscratch);
    cmp(Rbox, Rscratch);
    // Intentional fall through into done ...
 
@@ -3540,7 +3198,8 @@
 
   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
-    ba_short(slow_case);
+    ba(slow_case);
+    delayed()->nop();
   } else {
     // get eden boundaries
     // note: we need both top & top_addr!
@@ -3583,7 +3242,7 @@
     // Compare obj with the value at top_addr; if still equal, swap the value of
     // end with the value at top_addr. If not equal, read the value at top_addr
     // into end.
-    casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    cas_ptr(top_addr, obj, end);
     // if someone beat us on the allocation, try again, otherwise continue
     cmp(obj, end);
     brx(Assembler::notEqual, false, Assembler::pn, retry);
--- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -963,7 +963,7 @@
   inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
 
   using Assembler::swap;
-  inline void swap(Address& a, Register d, int offset = 0);
+  inline void swap(const Address& a, Register d, int offset = 0);
 
   // address pseudos: make these names unlike instruction names to avoid confusion
   inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
@@ -1056,13 +1056,6 @@
 
   void breakpoint_trap();
   void breakpoint_trap(Condition c, CC cc);
-  void flush_windows_trap();
-  void clean_windows_trap();
-  void get_psr_trap();
-  void set_psr_trap();
-
-  // V8/V9 flush_windows
-  void flush_windows();
 
   // Support for serializing memory accesses between threads
   void serialize_memory(Register thread, Register tmp1, Register tmp2);
@@ -1071,14 +1064,6 @@
   void enter();
   void leave();
 
-  // V8/V9 integer multiply
-  void mult(Register s1, Register s2, Register d);
-  void mult(Register s1, int simm13a, Register d);
-
-  // V8/V9 read and write of condition codes.
-  void read_ccr(Register d);
-  void write_ccr(Register s);
-
   // Manipulation of C++ bools
   // These are idioms to flag the need for care with accessing bools but on
   // this platform we assume byte size
@@ -1162,21 +1147,6 @@
   // check_and_forward_exception to handle exceptions when it is safe
   void check_and_forward_exception(Register scratch_reg);
 
- private:
-  // For V8
-  void read_ccr_trap(Register ccr_save);
-  void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
-
-#ifdef ASSERT
-  // For V8 debugging.  Uses V8 instruction sequence and checks
-  // result with V9 insturctions rdccr and wrccr.
-  // Uses Gscatch and Gscatch2
-  void read_ccr_v8_assert(Register ccr_save);
-  void write_ccr_v8_assert(Register ccr_save);
-#endif // ASSERT
-
- public:
-
   // Write to card table for - register is destroyed afterwards.
   void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
 
@@ -1314,20 +1284,9 @@
                   FloatRegister Fa, FloatRegister Fb,
                   Register Rresult);
 
-  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-  void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
-  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-
   void save_all_globals_into_locals();
   void restore_globals_from_locals();
 
-  void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
-    address lock_addr=0, bool use_call_vm=false);
-  void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
-    address lock_addr=0, bool use_call_vm=false);
-  void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
-
   // These set the icc condition code to equal if the lock succeeded
   // and notEqual if it failed and requires a slow case
   void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
--- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -229,10 +229,7 @@
 // Use the right branch for the platform
 
 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work())
-    Assembler::bp(c, a, icc, p, d, rt);
-  else
-    Assembler::br(c, a, d, rt);
+  Assembler::bp(c, a, icc, p, d, rt);
 }
 
 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
@@ -268,10 +265,7 @@
 }
 
 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work())
-    fbp(c, a, fcc0, p, d, rt);
-  else
-    Assembler::fb(c, a, d, rt);
+  fbp(c, a, fcc0, p, d, rt);
 }
 
 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
@@ -334,7 +328,7 @@
 
 // prefetch instruction
 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work())
+  Assembler::bp( never, true, xcc, pt, d, rt );
     Assembler::bp( never, true, xcc, pt, d, rt );
 }
 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
@@ -344,15 +338,7 @@
 // returns delta from gotten pc to addr after
 inline int MacroAssembler::get_pc( Register d ) {
   int x = offset();
-  if (VM_Version::v9_instructions_work())
-    rdpc(d);
-  else {
-    Label lbl;
-    Assembler::call(lbl, relocInfo::none);  // No relocation as this is call to pc+0x8
-    if (d == O7)  delayed()->nop();
-    else          delayed()->mov(O7, d);
-    bind(lbl);
-  }
+  rdpc(d);
   return offset() - x;
 }
 
@@ -646,41 +632,26 @@
 // returns if membar generates anything, obviously this code should mirror
 // membar below.
 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
-  if( !os::is_MP() ) return false;  // Not needed on single CPU
-  if( VM_Version::v9_instructions_work() ) {
-    const Membar_mask_bits effective_mask =
-        Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
-    return (effective_mask != 0);
-  } else {
-    return true;
-  }
+  if (!os::is_MP())
+    return false;  // Not needed on single CPU
+  const Membar_mask_bits effective_mask =
+      Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
+  return (effective_mask != 0);
 }
 
 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
   // Uniprocessors do not need memory barriers
-  if (!os::is_MP()) return;
+  if (!os::is_MP())
+    return;
   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
   // 8.4.4.3, a.31 and a.50.
-  if( VM_Version::v9_instructions_work() ) {
-    // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
-    // of the mmask subfield of const7a that does anything that isn't done
-    // implicitly is StoreLoad.
-    const Membar_mask_bits effective_mask =
-        Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
-    if ( effective_mask != 0 ) {
-      Assembler::membar( effective_mask );
-    }
-  } else {
-    // stbar is the closest there is on v8.  Equivalent to membar(StoreStore).  We
-    // do not issue the stbar because to my knowledge all v8 machines implement TSO,
-    // which guarantees that all stores behave as if an stbar were issued just after
-    // each one of them.  On these machines, stbar ought to be a nop.  There doesn't
-    // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
-    // it can't be specified by stbar, nor have I come up with a way to simulate it.
-    //
-    // Addendum.  Dave says that ldstub guarantees a write buffer flush to coherent
-    // space.  Put one here to be on the safe side.
-    Assembler::ldstub(SP, 0, G0);
+  // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
+  // of the mmask subfield of const7a that does anything that isn't done
+  // implicitly is StoreLoad.
+  const Membar_mask_bits effective_mask =
+      Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
+  if (effective_mask != 0) {
+    Assembler::membar(effective_mask);
   }
 }
 
@@ -748,7 +719,7 @@
   if (offset != 0)       sub(d,  offset,                    d);
 }
 
-inline void MacroAssembler::swap(Address& a, Register d, int offset) {
+inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
   relocate(a.rspec(offset));
   if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d        ); }
   else               {                          swap(a.base(), a.disp() + offset, d); }
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -162,7 +162,7 @@
    int i1 = ((int*)code_buffer)[1];
    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
    n_call->set_long_at(1*BytesPerInstWord, i1);
@@ -181,7 +181,7 @@
    // Make sure the first-patched instruction, which may co-exist
    // briefly with the call, will do something harmless.
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
 }
 
@@ -933,11 +933,7 @@
   int code_size = 1 * BytesPerInstWord;
   CodeBuffer cb(verified_entry, code_size + 1);
   MacroAssembler* a = new MacroAssembler(&cb);
-  if (VM_Version::v9_instructions_work()) {
-    a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
-  } else {
-    a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
-  }
+  a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
   ICache::invalidate_range(verified_entry, code_size);
 }
 
@@ -1024,7 +1020,7 @@
    int i1 = ((int*)code_buffer)[1];
    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
    h_jump->set_long_at(1*BytesPerInstWord, i1);
@@ -1043,6 +1039,6 @@
    // Make sure the first-patched instruction, which may co-exist
    // briefly with the call, will do something harmless.
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
 }
--- a/src/cpu/sparc/vm/nativeInst_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -70,8 +70,7 @@
   bool is_zombie() {
     int x = long_at(0);
     return is_op3(x,
-                  VM_Version::v9_instructions_work() ?
-                    Assembler::ldsw_op3 : Assembler::lduw_op3,
+                  Assembler::ldsw_op3,
                   Assembler::ldst_op)
         && Assembler::inv_rs1(x) == G0
         && Assembler::inv_rd(x) == O7;
--- a/src/cpu/sparc/vm/register_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/register_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -249,12 +249,10 @@
 
       case D:
         assert(c < 64  &&  (c & 1) == 0, "bad double float register");
-        assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
         return (c & 0x1e) | ((c & 0x20) >> 5);
 
       case Q:
         assert(c < 64  &&  (c & 3) == 0, "bad quad float register");
-        assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
         return (c & 0x1c) | ((c & 0x20) >> 5);
     }
     ShouldNotReachHere();
--- a/src/cpu/sparc/vm/relocInfo_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -193,36 +193,6 @@
   return *(address*)addr();
 }
 
-
-int Relocation::pd_breakpoint_size() {
-  // minimum breakpoint size, in short words
-  return NativeIllegalInstruction::instruction_size / sizeof(short);
-}
-
-void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) {
-  Untested("pd_swap_in_breakpoint");
-  // %%% probably do not need a general instrlen; just use the trap size
-  if (instrs != NULL) {
-    assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data");
-    for (int i = 0; i < instrlen; i++) {
-      instrs[i] = ((short*)x)[i];
-    }
-  }
-  NativeIllegalInstruction::insert(x);
-}
-
-
-void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) {
-  Untested("pd_swap_out_breakpoint");
-  assert(instrlen * sizeof(short) == sizeof(int), "enough buf");
-  union { int l; short s[1]; } u;
-  for (int i = 0; i < instrlen; i++) {
-    u.s[i] = instrs[i];
-  }
-  NativeInstruction* ni = nativeInstruction_at(x);
-  ni->set_long_at(0, u.l);
-}
-
 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
 }
 
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2459,7 +2459,7 @@
 
   // Finally just about ready to make the JNI call
 
-  __ flush_windows();
+  __ flushw();
   if (inner_frame_created) {
     __ restore();
   } else {
--- a/src/cpu/sparc/vm/sparc.ad	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/sparc.ad	Mon Jul 15 11:07:03 2013 +0100
@@ -2778,10 +2778,7 @@
     Register Rold = reg_to_register_object($old$$reg);
     Register Rnew = reg_to_register_object($new$$reg);
 
-    // casx_under_lock picks 1 of 3 encodings:
-    // For 32-bit pointers you get a 32-bit CAS
-    // For 64-bit pointers you get a 64-bit CASX
-    __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
+    __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
     __ cmp( Rold, Rnew );
   %}
 
@@ -3067,7 +3064,7 @@
     AddressLiteral last_rethrow_addrlit(&last_rethrow);
     __ sethi(last_rethrow_addrlit, L1);
     Address addr(L1, last_rethrow_addrlit.low10());
-    __ get_pc(L2);
+    __ rdpc(L2);
     __ inc(L2, 3 * BytesPerInstWord);  // skip this & 2 more insns to point at jump_to
     __ st_ptr(L2, addr);
     __ restore();
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -566,7 +566,7 @@
     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
     address start = __ pc();
 
-    __ flush_windows();
+    __ flushw();
     __ retl(false);
     __ delayed()->add( FP, STACK_BIAS, O0 );
     // The returned value must be a stack pointer whose register save area
@@ -575,67 +575,9 @@
     return start;
   }
 
-  // Helper functions for v8 atomic operations.
-  //
-  void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
-    if (mark_oop_reg == noreg) {
-      address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
-      __ set((intptr_t)lock_ptr, lock_ptr_reg);
-    } else {
-      assert(scratch_reg != noreg, "just checking");
-      address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
-      __ set((intptr_t)lock_ptr, lock_ptr_reg);
-      __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
-      __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
-    }
-  }
-
-  void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
-
-    get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
-    __ set(StubRoutines::Sparc::locked, lock_reg);
-    // Initialize yield counter
-    __ mov(G0,yield_reg);
-
-    __ BIND(retry);
-    __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
-
-    // This code can only be called from inside the VM, this
-    // stub is only invoked from Atomic::add().  We do not
-    // want to use call_VM, because _last_java_sp and such
-    // must already be set.
-    //
-    // Save the regs and make space for a C call
-    __ save(SP, -96, SP);
-    __ save_all_globals_into_locals();
-    BLOCK_COMMENT("call os::naked_sleep");
-    __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
-    __ delayed()->nop();
-    __ restore_globals_from_locals();
-    __ restore();
-    // reset the counter
-    __ mov(G0,yield_reg);
-
-    __ BIND(dontyield);
-
-    // try to get lock
-    __ swap(lock_ptr_reg, 0, lock_reg);
-
-    // did we get the lock?
-    __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
-    __ br(Assembler::notEqual, true, Assembler::pn, retry);
-    __ delayed()->add(yield_reg,1,yield_reg);
-
-    // yes, got lock. do the operation here.
-  }
-
-  void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
-    __ st(lock_reg, lock_ptr_reg, 0); // unlock
-  }
-
   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O0
   //      dest:           O1
@@ -656,33 +598,14 @@
       __ mov(O0, O3);       // scratch copy of exchange value
       __ ld(O1, 0, O2);     // observe the previous value
       // try to replace O2 with O3
-      __ cas_under_lock(O1, O2, O3,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
+      __ cas(O1, O2, O3);
       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 
       __ retl(false);
       __ delayed()->mov(O2, O0);  // report previous value to caller
-
     } else {
-      if (VM_Version::v9_instructions_work()) {
-        __ retl(false);
-        __ delayed()->swap(O1, 0, O0);
-      } else {
-        const Register& lock_reg = O2;
-        const Register& lock_ptr_reg = O3;
-        const Register& yield_reg = O4;
-
-        Label retry;
-        Label dontyield;
-
-        generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-        // got the lock, do the swap
-        __ swap(O1, 0, O0);
-
-        generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-        __ retl(false);
-        __ delayed()->nop();
-      }
+      __ retl(false);
+      __ delayed()->swap(O1, 0, O0);
     }
 
     return start;
@@ -691,7 +614,7 @@
 
   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O0
   //      dest:           O1
@@ -701,15 +624,12 @@
   //
   //     O0: the value previously stored in dest
   //
-  // Overwrites (v8): O3,O4,O5
-  //
   address generate_atomic_cmpxchg() {
     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
     address start = __ pc();
 
     // cmpxchg(dest, compare_value, exchange_value)
-    __ cas_under_lock(O1, O2, O0,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
+    __ cas(O1, O2, O0);
     __ retl(false);
     __ delayed()->nop();
 
@@ -718,7 +638,7 @@
 
   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O1:O0
   //      dest:           O2
@@ -728,17 +648,12 @@
   //
   //     O1:O0: the value previously stored in dest
   //
-  // This only works on V9, on V8 we don't generate any
-  // code and just return NULL.
-  //
   // Overwrites: G1,G2,G3
   //
   address generate_atomic_cmpxchg_long() {
     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
     address start = __ pc();
 
-    if (!VM_Version::supports_cx8())
-        return NULL;;
     __ sllx(O0, 32, O0);
     __ srl(O1, 0, O1);
     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
@@ -756,7 +671,7 @@
 
   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
   //
-  // Arguments :
+  // Arguments:
   //
   //      add_value: O0   (e.g., +1 or -1)
   //      dest:      O1
@@ -765,47 +680,22 @@
   //
   //     O0: the new value stored in dest
   //
-  // Overwrites (v9): O3
-  // Overwrites (v8): O3,O4,O5
+  // Overwrites: O3
   //
   address generate_atomic_add() {
     StubCodeMark mark(this, "StubRoutines", "atomic_add");
     address start = __ pc();
     __ BIND(_atomic_add_stub);
 
-    if (VM_Version::v9_instructions_work()) {
-      Label(retry);
-      __ BIND(retry);
-
-      __ lduw(O1, 0, O2);
-      __ add(O0, O2, O3);
-      __ cas(O1, O2, O3);
-      __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
-      __ retl(false);
-      __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
-    } else {
-      const Register& lock_reg = O2;
-      const Register& lock_ptr_reg = O3;
-      const Register& value_reg = O4;
-      const Register& yield_reg = O5;
-
-      Label(retry);
-      Label(dontyield);
-
-      generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-      // got lock, do the increment
-      __ ld(O1, 0, value_reg);
-      __ add(O0, value_reg, value_reg);
-      __ st(value_reg, O1, 0);
-
-      // %%% only for RMO and PSO
-      __ membar(Assembler::StoreStore);
-
-      generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-
-      __ retl(false);
-      __ delayed()->mov(value_reg, O0);
-    }
+    Label(retry);
+    __ BIND(retry);
+
+    __ lduw(O1, 0, O2);
+    __ add(O0, O2, O3);
+    __ cas(O1, O2, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
+    __ retl(false);
+    __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 
     return start;
   }
@@ -841,7 +731,7 @@
     __ mov(G3, L3);
     __ mov(G4, L4);
     __ mov(G5, L5);
-    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+    for (i = 0; i < 64; i += 2) {
       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
     }
 
@@ -855,7 +745,7 @@
     __ mov(L3, G3);
     __ mov(L4, G4);
     __ mov(L5, G5);
-    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+    for (i = 0; i < 64; i += 2) {
       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
     }
 
--- a/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -52,7 +52,3 @@
 address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
 
 address StubRoutines::Sparc::_partial_subtype_check = NULL;
-
-int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
-
-int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -47,46 +47,14 @@
 class Sparc {
  friend class StubGenerator;
 
- public:
-  enum { nof_instance_allocators = 10 };
-
-  // allocator lock values
-  enum {
-    unlocked = 0,
-    locked   = 1
-  };
-
-  enum {
-    v8_oop_lock_ignore_bits = 2,
-    v8_oop_lock_bits = 4,
-    nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
-    v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
-    v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
-  };
-
-  static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
-
  private:
   static address _test_stop_entry;
   static address _stop_subroutine_entry;
   static address _flush_callers_register_windows_entry;
 
-  static int _atomic_memory_operation_lock;
-
   static address _partial_subtype_check;
 
  public:
-  // %%% global lock for everyone who needs to use atomic_compare_and_exchange
-  // %%% or atomic_increment -- should probably use more locks for more
-  // %%% scalability-- for instance one for each eden space or group of
-
-  // address of the lock for atomic_compare_and_exchange
-  static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
-
-  // accessor and mutator for _atomic_memory_operation_lock
-  static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
-  static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
-
   // test assembler stop routine by setting registers
   static void (*test_stop_entry()) ()                     { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
 
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1054,7 +1054,7 @@
   // flush the windows now. We don't care about the current (protection) frame
   // only the outer frames
 
-  __ flush_windows();
+  __ flushw();
 
   // mark windows as flushed
   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1338,14 +1338,13 @@
 
 void TemplateTable::fneg() {
   transition(ftos, ftos);
-  __ fneg(FloatRegisterImpl::S, Ftos_f);
+  __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
 }
 
 
 void TemplateTable::dneg() {
   transition(dtos, dtos);
-  // v8 has fnegd if source and dest are the same
-  __ fneg(FloatRegisterImpl::D, Ftos_f);
+  __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
 }
 
 
@@ -1470,19 +1469,10 @@
     __ st_long(Otos_l, __ d_tmp);
     __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
 
-    if (VM_Version::v9_instructions_work()) {
-      if (bytecode() == Bytecodes::_l2f) {
-        __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
-      } else {
-        __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
-      }
+    if (bytecode() == Bytecodes::_l2f) {
+      __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
     } else {
-      __ call_VM_leaf(
-        Lscratch,
-        bytecode() == Bytecodes::_l2f
-          ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
-          : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
-      );
+      __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
     }
     break;
 
@@ -1490,11 +1480,6 @@
       Label isNaN;
       // result must be 0 if value is NaN; test by comparing value to itself
       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
-      // According to the v8 manual, you have to have a non-fp instruction
-      // between fcmp and fb.
-      if (!VM_Version::v9_instructions_work()) {
-        __ nop();
-      }
       __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
       __ delayed()->clr(Otos_i);                                     // NaN
       __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
@@ -1537,16 +1522,7 @@
     break;
 
     case Bytecodes::_d2f:
-    if (VM_Version::v9_instructions_work()) {
       __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
-    }
-    else {
-      // must uncache tos
-      __ push_d();
-      __ pop_i(O0);
-      __ pop_i(O1);
-      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
-    }
     break;
 
     default: ShouldNotReachHere();
@@ -1956,17 +1932,8 @@
     __ ld( Rarray, Rscratch, Rscratch );
     // (Rscratch is already in the native byte-ordering.)
     __ cmp( Rkey, Rscratch );
-    if ( VM_Version::v9_instructions_work() ) {
-      __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
-      __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
-    }
-    else {
-      Label end_of_if;
-      __ br( Assembler::less, true, Assembler::pt, end_of_if );
-      __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
-      __ mov( Rh, Ri );            // else i = h
-      __ bind(end_of_if);          // }
-    }
+    __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
+    __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
 
     // while (i+1 < j)
     __ bind( entry );
@@ -3418,9 +3385,7 @@
     // has been allocated.
     __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
 
-    __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
-      VM_Version::v9_instructions_work() ? NULL :
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
 
     // if someone beat us on the allocation, try again, otherwise continue
     __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
@@ -3701,14 +3666,7 @@
 
     __ verify_oop(O4);          // verify each monitor's oop
     __ tst(O4); // is this entry unused?
-    if (VM_Version::v9_instructions_work())
-      __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
-    else {
-      Label L;
-      __ br( Assembler::zero, true, Assembler::pn, L );
-      __ delayed()->mov(O3, O1); // rememeber this one if match
-      __ bind(L);
-    }
+    __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
 
     __ cmp(O4, O0); // check if current entry is for same object
     __ brx( Assembler::equal, false, Assembler::pn, exit );
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -75,23 +75,14 @@
     FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
   }
 
-  if (has_v9()) {
-    assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
-    if (ArraycopySrcPrefetchDistance >= 4096)
-      ArraycopySrcPrefetchDistance = 4064;
-    assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
-    if (ArraycopyDstPrefetchDistance >= 4096)
-      ArraycopyDstPrefetchDistance = 4064;
-  } else {
-    if (ArraycopySrcPrefetchDistance > 0) {
-      warning("prefetch instructions are not available on this CPU");
-      FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
-    }
-    if (ArraycopyDstPrefetchDistance > 0) {
-      warning("prefetch instructions are not available on this CPU");
-      FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
-    }
-  }
+  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
+
+  assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
+  if (ArraycopySrcPrefetchDistance >= 4096)
+    ArraycopySrcPrefetchDistance = 4064;
+  assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
+  if (ArraycopyDstPrefetchDistance >= 4096)
+    ArraycopyDstPrefetchDistance = 4064;
 
   UseSSE = 0; // Only on x86 and x64
 
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -177,10 +177,6 @@
     return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
   }
 
-  // Legacy
-  static bool v8_instructions_work() { return has_v8() && !has_v9(); }
-  static bool v9_instructions_work() { return has_v9(); }
-
   // Assembler testing
   static void allow_all();
   static void revert();
--- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1673,6 +1673,11 @@
   emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
 }
 
+void Assembler::movdqa(XMMRegister dst, Address src) {
+  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
+}
+
 void Assembler::movdqu(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
@@ -2286,6 +2291,38 @@
   emit_int8(imm8);
 }
 
+void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
+  assert(VM_Version::supports_sse4_1(), "");
+  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
+  emit_int8(0x16);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
+void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
+  assert(VM_Version::supports_sse4_1(), "");
+  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
+  emit_int8(0x16);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
+void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
+  assert(VM_Version::supports_sse4_1(), "");
+  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
+  emit_int8(0x22);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
+void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
+  assert(VM_Version::supports_sse4_1(), "");
+  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
+  emit_int8(0x22);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
   assert(VM_Version::supports_sse4_1(), "");
   InstructionMark im(this);
@@ -3691,6 +3728,16 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+// Carry-Less Multiplication Quadword
+void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
+  assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
+  bool vector256 = false;
+  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
 void Assembler::vzeroupper() {
   assert(VM_Version::supports_avx(), "");
   (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
--- a/src/cpu/x86/vm/assembler_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1266,6 +1266,7 @@
 
   // Move Aligned Double Quadword
   void movdqa(XMMRegister dst, XMMRegister src);
+  void movdqa(XMMRegister dst, Address src);
 
   // Move Unaligned Double Quadword
   void movdqu(Address     dst, XMMRegister src);
@@ -1404,6 +1405,14 @@
   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
 
+  // SSE 4.1 extract
+  void pextrd(Register dst, XMMRegister src, int imm8);
+  void pextrq(Register dst, XMMRegister src, int imm8);
+
+  // SSE 4.1 insert
+  void pinsrd(XMMRegister dst, Register src, int imm8);
+  void pinsrq(XMMRegister dst, Register src, int imm8);
+
   // SSE4.1 packed move
   void pmovzxbw(XMMRegister dst, XMMRegister src);
   void pmovzxbw(XMMRegister dst, Address src);
@@ -1764,6 +1773,9 @@
   // duplicate 4-bytes integer data from src into 8 locations in dest
   void vpbroadcastd(XMMRegister dst, XMMRegister src);
 
+  // Carry-Less Multiplication Quadword
+  void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
+
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
   // to avoid transaction penalty between AVX and SSE states. There is no
   // penalty if legacy SSE instructions are encoded using VEX prefix because
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3512,6 +3512,22 @@
   __ bind(*stub->continuation());
 }
 
+void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
+  assert(op->crc()->is_single_cpu(),  "crc must be register");
+  assert(op->val()->is_single_cpu(),  "byte value must be register");
+  assert(op->result_opr()->is_single_cpu(), "result must be register");
+  Register crc = op->crc()->as_register();
+  Register val = op->val()->as_register();
+  Register res = op->result_opr()->as_register();
+
+  assert_different_registers(val, crc, res);
+
+  __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
+  __ notl(crc); // ~crc
+  __ update_byte_crc32(crc, val, res);
+  __ notl(crc); // ~crc
+  __ mov(res, crc);
+}
 
 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
   Register obj = op->obj_opr()->as_register();  // may not be an oop
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -932,6 +932,81 @@
   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 }
 
+void LIRGenerator::do_update_CRC32(Intrinsic* x) {
+  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
+  // Make all state_for calls early since they can emit code
+  LIR_Opr result = rlock_result(x);
+  int flags = 0;
+  switch (x->id()) {
+    case vmIntrinsics::_updateCRC32: {
+      LIRItem crc(x->argument_at(0), this);
+      LIRItem val(x->argument_at(1), this);
+      crc.load_item();
+      val.load_item();
+      __ update_crc32(crc.result(), val.result(), result);
+      break;
+    }
+    case vmIntrinsics::_updateBytesCRC32:
+    case vmIntrinsics::_updateByteBufferCRC32: {
+      bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
+
+      LIRItem crc(x->argument_at(0), this);
+      LIRItem buf(x->argument_at(1), this);
+      LIRItem off(x->argument_at(2), this);
+      LIRItem len(x->argument_at(3), this);
+      buf.load_item();
+      off.load_nonconstant();
+
+      LIR_Opr index = off.result();
+      int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
+      if(off.result()->is_constant()) {
+        index = LIR_OprFact::illegalOpr;
+       offset += off.result()->as_jint();
+      }
+      LIR_Opr base_op = buf.result();
+
+#ifndef _LP64
+      if (!is_updateBytes) { // long b raw address
+         base_op = new_register(T_INT);
+         __ convert(Bytecodes::_l2i, buf.result(), base_op);
+      }
+#else
+      if (index->is_valid()) {
+        LIR_Opr tmp = new_register(T_LONG);
+        __ convert(Bytecodes::_i2l, index, tmp);
+        index = tmp;
+      }
+#endif
+
+      LIR_Address* a = new LIR_Address(base_op,
+                                       index,
+                                       LIR_Address::times_1,
+                                       offset,
+                                       T_BYTE);
+      BasicTypeList signature(3);
+      signature.append(T_INT);
+      signature.append(T_ADDRESS);
+      signature.append(T_INT);
+      CallingConvention* cc = frame_map()->c_calling_convention(&signature);
+      const LIR_Opr result_reg = result_register_for(x->type());
+
+      LIR_Opr addr = new_pointer_register();
+      __ leal(LIR_OprFact::address(a), addr);
+
+      crc.load_item_force(cc->at(0));
+      __ move(addr, cc->at(1));
+      len.load_item_force(cc->at(2));
+
+      __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
+      __ move(result_reg, result);
+
+      break;
+    }
+    default: {
+      ShouldNotReachHere();
+    }
+  }
+}
 
 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 // _i2b, _i2c, _i2s
--- a/src/cpu/x86/vm/c1_globals_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/c1_globals_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -50,8 +50,9 @@
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
-define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx,MetaspaceSize,                     12*M );
+define_pd_global(uintx, CodeCacheMinBlockLength,     1);
+define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
+define_pd_global(uintx, MetaspaceSize,               12*M );
 define_pd_global(bool, NeverActAsServerClassMachine, true );
 define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(bool, CICompileOSR,                 true );
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -85,7 +85,8 @@
 define_pd_global(bool, OptoBundling,                 false);
 
 define_pd_global(intx, ReservedCodeCacheSize,        48*M);
-define_pd_global(uintx,CodeCacheMinBlockLength,      4);
+define_pd_global(uintx, CodeCacheMinBlockLength,     4);
+define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
 // Heap related flags
 define_pd_global(uintx,MetaspaceSize,    ScaleForWordSize(16*M));
--- a/src/cpu/x86/vm/frame_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/frame_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -587,7 +587,7 @@
 
   // validate ConstantPoolCache*
   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
-  if (cp == NULL || !cp->is_metadata()) return false;
+  if (cp == NULL || !cp->is_metaspace_object()) return false;
 
   // validate locals
 
--- a/src/cpu/x86/vm/globals_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/globals_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@
 define_pd_global(intx, InlineFrequencyCount,     100);
 define_pd_global(intx, InlineSmallCode,          1000);
 
-define_pd_global(intx, StackYellowPages, 2);
+define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
 define_pd_global(intx, StackRedPages, 1);
 #ifdef AMD64
 // Very large C++ stack frames using solaris-amd64 optimized builds
@@ -96,6 +96,9 @@
   product(intx, UseAVX, 99,                                                 \
           "Highest supported AVX instructions set on x86/x64")              \
                                                                             \
+  product(bool, UseCLMUL, false,                                            \
+          "Control whether CLMUL instructions can be used on x86/x64")      \
+                                                                            \
   diagnostic(bool, UseIncDec, true,                                         \
           "Use INC, DEC instructions on x86")                               \
                                                                             \
--- a/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,8 @@
   address generate_empty_entry(void);
   address generate_accessor_entry(void);
   address generate_Reference_get_entry();
+  address generate_CRC32_update_entry();
+  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
   void lock_method(void);
   void generate_stack_overflow_check(void);
 
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2794,6 +2794,15 @@
   }
 }
 
+void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
+  if (reachable(src)) {
+    Assembler::movdqa(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    Assembler::movdqa(dst, Address(rscratch1, 0));
+  }
+}
+
 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
   if (reachable(src)) {
     Assembler::movsd(dst, as_Address(src));
@@ -6388,6 +6397,193 @@
   bind(L_done);
 }
 
+/**
+ * Emits code to update CRC-32 with a byte value according to constants in table
+ *
+ * @param [in,out]crc   Register containing the crc.
+ * @param [in]val       Register containing the byte to fold into the CRC.
+ * @param [in]table     Register containing the table of crc constants.
+ *
+ * uint32_t crc;
+ * val = crc_table[(val ^ crc) & 0xFF];
+ * crc = val ^ (crc >> 8);
+ *
+ */
+void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
+  xorl(val, crc);
+  andl(val, 0xFF);
+  shrl(crc, 8); // unsigned shift
+  xorl(crc, Address(table, val, Address::times_4, 0));
+}
+
+/**
+ * Fold 128-bit data chunk
+ */
+void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
+  vpclmulhdq(xtmp, xK, xcrc); // [123:64]
+  vpclmulldq(xcrc, xK, xcrc); // [63:0]
+  vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
+  pxor(xcrc, xtmp);
+}
+
+void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
+  vpclmulhdq(xtmp, xK, xcrc);
+  vpclmulldq(xcrc, xK, xcrc);
+  pxor(xcrc, xbuf);
+  pxor(xcrc, xtmp);
+}
+
+/**
+ * 8-bit folds to compute 32-bit CRC
+ *
+ * uint64_t xcrc;
+ * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
+ */
+void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
+  movdl(tmp, xcrc);
+  andl(tmp, 0xFF);
+  movdl(xtmp, Address(table, tmp, Address::times_4, 0));
+  psrldq(xcrc, 1); // unsigned shift one byte
+  pxor(xcrc, xtmp);
+}
+
+/**
+ * uint32_t crc;
+ * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
+ */
+void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
+  movl(tmp, crc);
+  andl(tmp, 0xFF);
+  shrl(crc, 8);
+  xorl(crc, Address(table, tmp, Address::times_4, 0));
+}
+
+/**
+ * @param crc   register containing existing CRC (32-bit)
+ * @param buf   register pointing to input byte buffer (byte*)
+ * @param len   register containing number of bytes
+ * @param table register that will contain address of CRC table
+ * @param tmp   scratch register
+ */
+void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
+  assert_different_registers(crc, buf, len, table, tmp, rax);
+
+  Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
+  Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
+
+  lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
+  notl(crc); // ~crc
+  cmpl(len, 16);
+  jcc(Assembler::less, L_tail);
+
+  // Align buffer to 16 bytes
+  movl(tmp, buf);
+  andl(tmp, 0xF);
+  jccb(Assembler::zero, L_aligned);
+  subl(tmp,  16);
+  addl(len, tmp);
+
+  align(4);
+  BIND(L_align_loop);
+  movsbl(rax, Address(buf, 0)); // load byte with sign extension
+  update_byte_crc32(crc, rax, table);
+  increment(buf);
+  incrementl(tmp);
+  jccb(Assembler::less, L_align_loop);
+
+  BIND(L_aligned);
+  movl(tmp, len); // save
+  shrl(len, 4);
+  jcc(Assembler::zero, L_tail_restore);
+
+  // Fold crc into first bytes of vector
+  movdqa(xmm1, Address(buf, 0));
+  movdl(rax, xmm1);
+  xorl(crc, rax);
+  pinsrd(xmm1, crc, 0);
+  addptr(buf, 16);
+  subl(len, 4); // len > 0
+  jcc(Assembler::less, L_fold_tail);
+
+  movdqa(xmm2, Address(buf,  0));
+  movdqa(xmm3, Address(buf, 16));
+  movdqa(xmm4, Address(buf, 32));
+  addptr(buf, 48);
+  subl(len, 3);
+  jcc(Assembler::lessEqual, L_fold_512b);
+
+  // Fold total 512 bits of polynomial on each iteration,
+  // 128 bits per each of 4 parallel streams.
+  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
+
+  align(32);
+  BIND(L_fold_512b_loop);
+  fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
+  fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
+  fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
+  fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
+  addptr(buf, 64);
+  subl(len, 4);
+  jcc(Assembler::greater, L_fold_512b_loop);
+
+  // Fold 512 bits to 128 bits.
+  BIND(L_fold_512b);
+  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
+  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
+  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
+  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
+
+  // Fold the rest of 128 bits data chunks
+  BIND(L_fold_tail);
+  addl(len, 3);
+  jccb(Assembler::lessEqual, L_fold_128b);
+  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
+
+  BIND(L_fold_tail_loop);
+  fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
+  addptr(buf, 16);
+  decrementl(len);
+  jccb(Assembler::greater, L_fold_tail_loop);
+
+  // Fold 128 bits in xmm1 down into 32 bits in crc register.
+  BIND(L_fold_128b);
+  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
+  vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
+  vpand(xmm3, xmm0, xmm2, false /* vector256 */);
+  vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  psrldq(xmm1, 8);
+  psrldq(xmm2, 4);
+  pxor(xmm0, xmm1);
+  pxor(xmm0, xmm2);
+
+  // 8 8-bit folds to compute 32-bit CRC.
+  for (int j = 0; j < 4; j++) {
+    fold_8bit_crc32(xmm0, table, xmm1, rax);
+  }
+  movdl(crc, xmm0); // mov 32 bits to general register
+  for (int j = 0; j < 4; j++) {
+    fold_8bit_crc32(crc, table, rax);
+  }
+
+  BIND(L_tail_restore);
+  movl(len, tmp); // restore
+  BIND(L_tail);
+  andl(len, 0xf);
+  jccb(Assembler::zero, L_exit);
+
+  // Fold the rest of bytes
+  align(4);
+  BIND(L_tail_loop);
+  movsbl(rax, Address(buf, 0)); // load byte with sign extension
+  update_byte_crc32(crc, rax, table);
+  increment(buf);
+  decrementl(len);
+  jccb(Assembler::greater, L_tail_loop);
+
+  BIND(L_exit);
+  notl(crc); // ~c
+}
+
 #undef BIND
 #undef BLOCK_COMMENT
 
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -899,6 +899,11 @@
   void movdqu(XMMRegister dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
   void movdqu(XMMRegister dst, AddressLiteral src);
 
+  // Move Aligned Double Quadword
+  void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
+  void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
+  void movdqa(XMMRegister dst, AddressLiteral src);
+
   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
@@ -1027,6 +1032,16 @@
       Assembler::vinsertf128h(dst, nds, src);
   }
 
+  // Carry-Less Multiplication Quadword
+  void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::vpclmulqdq(dst, nds, src, 0x00);
+  }
+  void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::vpclmulqdq(dst, nds, src, 0x11);
+  }
+
   // Data
 
   void cmov32( Condition cc, Register dst, Address  src);
@@ -1143,6 +1158,16 @@
                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
                         XMMRegister tmp4, Register tmp5, Register result);
 
+  // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
+  void update_byte_crc32(Register crc, Register val, Register table);
+  void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
+  // Fold 128-bit data chunk
+  void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
+  void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
+  // Fold 8-bit data
+  void fold_8bit_crc32(Register crc, Register table, Register tmp);
+  void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
+
 #undef VIRTUAL
 
 };
--- a/src/cpu/x86/vm/relocInfo_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/relocInfo_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -177,30 +177,6 @@
   return *pd_address_in_code();
 }
 
-int Relocation::pd_breakpoint_size() {
-  // minimum breakpoint size, in short words
-  return NativeIllegalInstruction::instruction_size / sizeof(short);
-}
-
-void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) {
-  Untested("pd_swap_in_breakpoint");
-  if (instrs != NULL) {
-    assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data");
-    for (int i = 0; i < instrlen; i++) {
-      instrs[i] = ((short*)x)[i];
-    }
-  }
-  NativeIllegalInstruction::insert(x);
-}
-
-
-void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) {
-  Untested("pd_swap_out_breakpoint");
-  assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
-  NativeInstruction* ni = nativeInstruction_at(x);
-  *(short*)ni->addr_at(0) = instrs[0];
-}
-
 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
 #ifdef _LP64
   if (!Assembler::is_polling_page_far()) {
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1429,6 +1429,8 @@
   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
          "possible collision");
 
+  __ block_comment("unpack_array_argument {");
+
   // Pass the length, ptr pair
   Label is_null, done;
   VMRegPair tmp;
@@ -1453,6 +1455,8 @@
   move_ptr(masm, tmp, body_arg);
   move32_64(masm, tmp, length_arg);
   __ bind(done);
+
+  __ block_comment("} unpack_array_argument");
 }
 
 
@@ -2170,27 +2174,34 @@
     }
   }
 
-  // point c_arg at the first arg that is already loaded in case we
-  // need to spill before we call out
-  int c_arg = total_c_args - total_in_args;
+  int c_arg;
 
   // Pre-load a static method's oop into r14.  Used both by locking code and
   // the normal JNI call code.
-  if (method->is_static() && !is_critical_native) {
-
-    //  load oop into a register
-    __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
-
-    // Now handlize the static class mirror it's known not-null.
-    __ movptr(Address(rsp, klass_offset), oop_handle_reg);
-    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
-
-    // Now get the handle
-    __ lea(oop_handle_reg, Address(rsp, klass_offset));
-    // store the klass handle as second argument
-    __ movptr(c_rarg1, oop_handle_reg);
-    // and protect the arg if we must spill
-    c_arg--;
+  if (!is_critical_native) {
+    // point c_arg at the first arg that is already loaded in case we
+    // need to spill before we call out
+    c_arg = total_c_args - total_in_args;
+
+    if (method->is_static()) {
+
+      //  load oop into a register
+      __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
+
+      // Now handlize the static class mirror it's known not-null.
+      __ movptr(Address(rsp, klass_offset), oop_handle_reg);
+      map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
+
+      // Now get the handle
+      __ lea(oop_handle_reg, Address(rsp, klass_offset));
+      // store the klass handle as second argument
+      __ movptr(c_rarg1, oop_handle_reg);
+      // and protect the arg if we must spill
+      c_arg--;
+    }
+  } else {
+    // For JNI critical methods we need to save all registers in save_args.
+    c_arg = 0;
   }
 
   // Change state to native (we save the return address in the thread, since it might not
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,7 +83,7 @@
  private:
 
 #ifdef PRODUCT
-#define inc_counter_np(counter) (0)
+#define inc_counter_np(counter) ((void)0)
 #else
   void inc_counter_np_(int& counter) {
     __ incrementl(ExternalAddress((address)&counter));
@@ -2713,6 +2713,59 @@
     return start;
   }
 
+  /**
+   *  Arguments:
+   *
+   * Inputs:
+   *   rsp(4)   - int crc
+   *   rsp(8)   - byte* buf
+   *   rsp(12)  - int length
+   *
+   * Ouput:
+   *       rax   - int crc result
+   */
+  address generate_updateBytesCRC32() {
+    assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
+
+    address start = __ pc();
+
+    const Register crc   = rdx;  // crc
+    const Register buf   = rsi;  // source java byte array address
+    const Register len   = rcx;  // length
+    const Register table = rdi;  // crc_table address (reuse register)
+    const Register tmp   = rbx;
+    assert_different_registers(crc, buf, len, table, tmp, rax);
+
+    BLOCK_COMMENT("Entry:");
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ push(rsi);
+    __ push(rdi);
+    __ push(rbx);
+
+    Address crc_arg(rbp, 8 + 0);
+    Address buf_arg(rbp, 8 + 4);
+    Address len_arg(rbp, 8 + 8);
+
+    // Load up:
+    __ movl(crc,   crc_arg);
+    __ movptr(buf, buf_arg);
+    __ movl(len,   len_arg);
+
+    __ kernel_crc32(crc, buf, len, table, tmp);
+
+    __ movl(rax, crc);
+    __ pop(rbx);
+    __ pop(rdi);
+    __ pop(rsi);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
 
  public:
   // Information about frame layout at time of blocking runtime call.
@@ -2887,6 +2940,12 @@
 
     // Build this early so it's available for the interpreter
     StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
+
+    if (UseCRC32Intrinsics) {
+      // set table address before stub generation which use it
+      StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
+      StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
+    }
   }
 
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@
  private:
 
 #ifdef PRODUCT
-#define inc_counter_np(counter) (0)
+#define inc_counter_np(counter) ((void)0)
 #else
   void inc_counter_np_(int& counter) {
     // This can destroy rscratch1 if counter is far from the code cache
@@ -3584,7 +3584,45 @@
     return start;
   }
 
-
+  /**
+   *  Arguments:
+   *
+   * Inputs:
+   *   c_rarg0   - int crc
+   *   c_rarg1   - byte* buf
+   *   c_rarg2   - int length
+   *
+   * Ouput:
+   *       rax   - int crc result
+   */
+  address generate_updateBytesCRC32() {
+    assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
+
+    address start = __ pc();
+    // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
+    // Unix:  rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
+    // rscratch1: r10
+    const Register crc   = c_rarg0;  // crc
+    const Register buf   = c_rarg1;  // source java byte array address
+    const Register len   = c_rarg2;  // length
+    const Register table = c_rarg3;  // crc_table address (reuse register)
+    const Register tmp   = r11;
+    assert_different_registers(crc, buf, len, table, tmp, rax);
+
+    BLOCK_COMMENT("Entry:");
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ kernel_crc32(crc, buf, len, table, tmp);
+
+    __ movl(rax, crc);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
 
 #undef __
 #define __ masm->
@@ -3736,6 +3774,11 @@
                                CAST_FROM_FN_PTR(address,
                                                 SharedRuntime::
                                                 throw_StackOverflowError));
+    if (UseCRC32Intrinsics) {
+      // set table address before stub generation which use it
+      StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
+      StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
+    }
   }
 
   void generate_all() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/stubRoutines_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
+
+// Implementation of the platform-specific part of StubRoutines - for
+// a description of how to extend it, see the stubRoutines.hpp file.
+
+address StubRoutines::x86::_verify_mxcsr_entry = NULL;
+address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
+
+uint64_t StubRoutines::x86::_crc_by128_masks[] =
+{
+  /* The fields in this structure are arranged so that they can be
+   * picked up two at a time with 128-bit loads.
+   *
+   * Because of flipped bit order for this CRC polynomials
+   * the constant for X**N is left-shifted by 1.  This is because
+   * a 64 x 64 polynomial multiply produces a 127-bit result
+   * but the highest term is always aligned to bit 0 in the container.
+   * Pre-shifting by one fixes this, at the cost of potentially making
+   * the 32-bit constant no longer fit in a 32-bit container (thus the
+   * use of uint64_t, though this is also the size used by the carry-
+   * less multiply instruction.
+   *
+   * In addition, the flipped bit order and highest-term-at-least-bit
+   * multiply changes the constants used.  The 96-bit result will be
+   * aligned to the high-term end of the target 128-bit container,
+   * not the low-term end; that is, instead of a 512-bit or 576-bit fold,
+   * instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold.
+   *
+   * This cause additional problems in the 128-to-64-bit reduction; see the
+   * code for details.  By storing a mask in the otherwise unused half of
+   * a 128-bit constant, bits can be cleared before multiplication without
+   * storing and reloading.  Note that staying on a 128-bit datapath means
+   * that some data is uselessly stored and some unused data is intersected
+   * with an irrelevant constant.
+   */
+
+  ((uint64_t) 0xffffffffUL),     /* low  of K_M_64    */
+  ((uint64_t) 0xb1e6b092U << 1), /* high of K_M_64    */
+  ((uint64_t) 0xba8ccbe8U << 1), /* low  of K_160_96  */
+  ((uint64_t) 0x6655004fU << 1), /* high of K_160_96  */
+  ((uint64_t) 0xaa2215eaU << 1), /* low  of K_544_480 */
+  ((uint64_t) 0xe3720acbU << 1)  /* high of K_544_480 */
+};
+
+/**
+ *  crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
+ */
+juint StubRoutines::x86::_crc_table[] =
+{
+    0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
+    0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
+    0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
+    0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
+    0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
+    0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
+    0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
+    0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
+    0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
+    0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
+    0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
+    0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
+    0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
+    0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
+    0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
+    0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
+    0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
+    0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
+    0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
+    0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
+    0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
+    0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
+    0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
+    0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
+    0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
+    0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
+    0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
+    0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
+    0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
+    0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
+    0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
+    0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
+    0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
+    0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
+    0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
+    0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
+    0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
+    0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
+    0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
+    0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
+    0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
+    0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
+    0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
+    0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
+    0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
+    0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
+    0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
+    0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
+    0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
+    0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
+    0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
+    0x2d02ef8dUL
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/stubRoutines_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_STUBROUTINES_X86_HPP
+#define CPU_X86_VM_STUBROUTINES_X86_HPP
+
+// This file holds the platform specific parts of the StubRoutines
+// definition. See stubRoutines.hpp for a description on how to
+// extend it.
+
+ private:
+  static address _verify_mxcsr_entry;
+  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
+  static address _key_shuffle_mask_addr;
+  // masks and table for CRC32
+  static uint64_t _crc_by128_masks[];
+  static juint    _crc_table[];
+
+ public:
+  static address verify_mxcsr_entry()    { return _verify_mxcsr_entry; }
+  static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
+  static address crc_by128_masks_addr()  { return (address)_crc_by128_masks; }
+
+#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
--- a/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,4 @@
 // Implementation of the platform-specific part of StubRoutines - for
 // a description of how to extend it, see the stubRoutines.hpp file.
 
-address StubRoutines::x86::_verify_mxcsr_entry         = NULL;
 address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
-address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,15 +39,12 @@
  friend class VMStructs;
 
  private:
-  static address _verify_mxcsr_entry;
   static address _verify_fpu_cntrl_wrd_entry;
-  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
-  static address _key_shuffle_mask_addr;
 
  public:
-  static address verify_mxcsr_entry()                        { return _verify_mxcsr_entry; }
   static address verify_fpu_cntrl_wrd_entry()                { return _verify_fpu_cntrl_wrd_entry; }
-  static address key_shuffle_mask_addr()                     { return _key_shuffle_mask_addr; }
+
+# include "stubRoutines_x86.hpp"
 
 };
 
--- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,6 @@
 address StubRoutines::x86::_get_previous_fp_entry = NULL;
 address StubRoutines::x86::_get_previous_sp_entry = NULL;
 
-address StubRoutines::x86::_verify_mxcsr_entry = NULL;
-
 address StubRoutines::x86::_f2i_fixup = NULL;
 address StubRoutines::x86::_f2l_fixup = NULL;
 address StubRoutines::x86::_d2i_fixup = NULL;
@@ -45,4 +43,3 @@
 address StubRoutines::x86::_double_sign_mask = NULL;
 address StubRoutines::x86::_double_sign_flip = NULL;
 address StubRoutines::x86::_mxcsr_std = NULL;
-address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,6 @@
  private:
   static address _get_previous_fp_entry;
   static address _get_previous_sp_entry;
-  static address _verify_mxcsr_entry;
 
   static address _f2i_fixup;
   static address _f2l_fixup;
@@ -54,8 +53,6 @@
   static address _double_sign_mask;
   static address _double_sign_flip;
   static address _mxcsr_std;
-  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
-  static address _key_shuffle_mask_addr;
 
  public:
 
@@ -69,11 +66,6 @@
     return _get_previous_sp_entry;
   }
 
-  static address verify_mxcsr_entry()
-  {
-    return _verify_mxcsr_entry;
-  }
-
   static address f2i_fixup()
   {
     return _f2i_fixup;
@@ -119,7 +111,7 @@
     return _mxcsr_std;
   }
 
-  static address key_shuffle_mask_addr()                     { return _key_shuffle_mask_addr; }
+# include "stubRoutines_x86.hpp"
 
 };
 
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -868,6 +868,120 @@
   return generate_accessor_entry();
 }
 
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register val = rdx;  // source java byte value
+    const Register tbl = rdi;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ movl(val, Address(rsp,   wordSize)); // byte value
+    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
+
+    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
+    __ notl(crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ notl(crc); // ~crc
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+
+    (void) generate_native_entry(false);
+
+    return entry;
+  }
+  return generate_native_entry(false);
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register buf = rdx;  // source java byte array address
+    const Register len = rdi;  // length
+
+    // Arguments are reversed on java expression stack
+    __ movl(len,   Address(rsp,   wordSize)); // Length
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
+      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
+    }
+
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+
+    (void) generate_native_entry(false);
+
+    return entry;
+  }
+  return generate_native_entry(false);
+}
+
 //
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the native method
@@ -1501,15 +1615,16 @@
   // determine code generation flags
   bool synchronized = false;
   address entry_point = NULL;
+  InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
 
   switch (kind) {
-    case Interpreter::zerolocals             :                                                                             break;
-    case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
-    case Interpreter::native                 : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false);  break;
-    case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true);   break;
-    case Interpreter::empty                  : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry();        break;
-    case Interpreter::accessor               : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();     break;
-    case Interpreter::abstract               : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();     break;
+    case Interpreter::zerolocals             :                                                       break;
+    case Interpreter::zerolocals_synchronized: synchronized = true;                                  break;
+    case Interpreter::native                 : entry_point = ig_this->generate_native_entry(false);  break;
+    case Interpreter::native_synchronized    : entry_point = ig_this->generate_native_entry(true);   break;
+    case Interpreter::empty                  : entry_point = ig_this->generate_empty_entry();        break;
+    case Interpreter::accessor               : entry_point = ig_this->generate_accessor_entry();     break;
+    case Interpreter::abstract               : entry_point = ig_this->generate_abstract_entry();     break;
 
     case Interpreter::java_lang_math_sin     : // fall thru
     case Interpreter::java_lang_math_cos     : // fall thru
@@ -1519,9 +1634,15 @@
     case Interpreter::java_lang_math_log10   : // fall thru
     case Interpreter::java_lang_math_sqrt    : // fall thru
     case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);     break;
+    case Interpreter::java_lang_math_exp     : entry_point = ig_this->generate_math_entry(kind);      break;
     case Interpreter::java_lang_ref_reference_get
-                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
+                                             : entry_point = ig_this->generate_Reference_get_entry(); break;
+    case Interpreter::java_util_zip_CRC32_update
+                                             : entry_point = ig_this->generate_CRC32_update_entry();  break;
+    case Interpreter::java_util_zip_CRC32_updateBytes
+                                             : // fall thru
+    case Interpreter::java_util_zip_CRC32_updateByteBuffer
+                                             : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
     default:
       fatal(err_msg("unexpected method kind: %d", kind));
       break;
@@ -1529,7 +1650,7 @@
 
   if (entry_point) return entry_point;
 
-  return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
+  return ig_this->generate_normal_entry(synchronized);
 
 }
 
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -840,6 +840,117 @@
   return generate_accessor_entry();
 }
 
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register val = rdx;  // source java byte value
+    const Register tbl = rdi;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ movl(val, Address(rsp,   wordSize)); // byte value
+    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
+
+    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
+    __ notl(crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ notl(crc); // ~crc
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+
+    (void) generate_native_entry(false);
+
+    return entry;
+  }
+  return generate_native_entry(false);
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // r13: senderSP must preserved for slow path, set SP to it on fast path
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;  // length
+
+    // Arguments are reversed on java expression stack
+    __ movl(len,   Address(rsp,   wordSize)); // Length
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
+      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
+    }
+
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+
+    (void) generate_native_entry(false);
+
+    return entry;
+  }
+  return generate_native_entry(false);
+}
 
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the
@@ -1510,15 +1621,16 @@
   // determine code generation flags
   bool synchronized = false;
   address entry_point = NULL;
+  InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
 
   switch (kind) {
-  case Interpreter::zerolocals             :                                                                             break;
-  case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
-  case Interpreter::native                 : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
-  case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true);  break;
-  case Interpreter::empty                  : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry();       break;
-  case Interpreter::accessor               : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();    break;
-  case Interpreter::abstract               : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();    break;
+  case Interpreter::zerolocals             :                                                      break;
+  case Interpreter::zerolocals_synchronized: synchronized = true;                                 break;
+  case Interpreter::native                 : entry_point = ig_this->generate_native_entry(false); break;
+  case Interpreter::native_synchronized    : entry_point = ig_this->generate_native_entry(true);  break;
+  case Interpreter::empty                  : entry_point = ig_this->generate_empty_entry();       break;
+  case Interpreter::accessor               : entry_point = ig_this->generate_accessor_entry();    break;
+  case Interpreter::abstract               : entry_point = ig_this->generate_abstract_entry();    break;
 
   case Interpreter::java_lang_math_sin     : // fall thru
   case Interpreter::java_lang_math_cos     : // fall thru
@@ -1528,9 +1640,15 @@
   case Interpreter::java_lang_math_log10   : // fall thru
   case Interpreter::java_lang_math_sqrt    : // fall thru
   case Interpreter::java_lang_math_pow     : // fall thru
-  case Interpreter::java_lang_math_exp     : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);    break;
+  case Interpreter::java_lang_math_exp     : entry_point = ig_this->generate_math_entry(kind);      break;
   case Interpreter::java_lang_ref_reference_get
-                                           : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
+                                           : entry_point = ig_this->generate_Reference_get_entry(); break;
+  case Interpreter::java_util_zip_CRC32_update
+                                           : entry_point = ig_this->generate_CRC32_update_entry();  break;
+  case Interpreter::java_util_zip_CRC32_updateBytes
+                                           : // fall thru
+  case Interpreter::java_util_zip_CRC32_updateByteBuffer
+                                           : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
   default:
     fatal(err_msg("unexpected method kind: %d", kind));
     break;
@@ -1540,8 +1658,7 @@
     return entry_point;
   }
 
-  return ((InterpreterGenerator*) this)->
-                                generate_normal_entry(synchronized);
+  return ig_this->generate_normal_entry(synchronized);
 }
 
 // These should never be compiled since the interpreter will prefer
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -446,6 +446,7 @@
                (supports_avx()    ? ", avx" : ""),
                (supports_avx2()   ? ", avx2" : ""),
                (supports_aes()    ? ", aes" : ""),
+               (supports_clmul()    ? ", clmul" : ""),
                (supports_erms()   ? ", erms" : ""),
                (supports_mmx_ext() ? ", mmxext" : ""),
                (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
@@ -489,6 +490,27 @@
     FLAG_SET_DEFAULT(UseAES, false);
   }
 
+  // Use CLMUL instructions if available.
+  if (supports_clmul()) {
+    if (FLAG_IS_DEFAULT(UseCLMUL)) {
+      UseCLMUL = true;
+    }
+  } else if (UseCLMUL) {
+    if (!FLAG_IS_DEFAULT(UseCLMUL))
+      warning("CLMUL instructions not available on this CPU (AVX may also be required)");
+    FLAG_SET_DEFAULT(UseCLMUL, false);
+  }
+
+  if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
+    if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
+      UseCRC32Intrinsics = true;
+    }
+  } else if (UseCRC32Intrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
+      warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)");
+    FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
+  }
+
   // The AES intrinsic stubs require AES instruction support (of course)
   // but also require sse3 mode for instructions it use.
   if (UseAES && (UseSSE > 2)) {
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,8 @@
     uint32_t value;
     struct {
       uint32_t sse3     : 1,
-                        : 2,
+               clmul    : 1,
+                        : 1,
                monitor  : 1,
                         : 1,
                vmx      : 1,
@@ -249,7 +250,8 @@
     CPU_AVX    = (1 << 17),
     CPU_AVX2   = (1 << 18),
     CPU_AES    = (1 << 19),
-    CPU_ERMS   = (1 << 20) // enhanced 'rep movsb/stosb' instructions
+    CPU_ERMS   = (1 << 20), // enhanced 'rep movsb/stosb' instructions
+    CPU_CLMUL  = (1 << 21) // carryless multiply for CRC
   } cpuFeatureFlags;
 
   enum {
@@ -429,6 +431,8 @@
       result |= CPU_AES;
     if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0)
       result |= CPU_ERMS;
+    if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
+      result |= CPU_CLMUL;
 
     // AMD features.
     if (is_amd()) {
@@ -555,6 +559,7 @@
   static bool supports_tsc()      { return (_cpuFeatures & CPU_TSC)    != 0; }
   static bool supports_aes()      { return (_cpuFeatures & CPU_AES) != 0; }
   static bool supports_erms()     { return (_cpuFeatures & CPU_ERMS) != 0; }
+  static bool supports_clmul()    { return (_cpuFeatures & CPU_CLMUL) != 0; }
 
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
--- a/src/cpu/zero/vm/relocInfo_zero.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/zero/vm/relocInfo_zero.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -52,22 +52,6 @@
   return (address *) addr();
 }
 
-int Relocation::pd_breakpoint_size() {
-  ShouldNotCallThis();
-}
-
-void Relocation::pd_swap_in_breakpoint(address x,
-                                       short*  instrs,
-                                       int     instrlen) {
-  ShouldNotCallThis();
-}
-
-void Relocation::pd_swap_out_breakpoint(address x,
-                                        short*  instrs,
-                                        int     instrlen) {
-  ShouldNotCallThis();
-}
-
 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src,
                                                 CodeBuffer*       dst) {
   ShouldNotCallThis();
--- a/src/cpu/zero/vm/shark_globals_zero.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/cpu/zero/vm/shark_globals_zero.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -58,7 +58,9 @@
 define_pd_global(bool,     ProfileInterpreter,           false);
 define_pd_global(intx,     CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,    CodeCacheMinBlockLength,      1    );
-define_pd_global(uintx,    MetaspaceSize,                     12*M );
+define_pd_global(uintx,    CodeCacheMinimumUseSpace,     200*K);
+
+define_pd_global(uintx,    MetaspaceSize,                12*M );
 define_pd_global(bool,     NeverActAsServerClassMachine, true );
 define_pd_global(uint64_t, MaxRAM,                       1ULL*G);
 define_pd_global(bool,     CICompileOSR,                 true );
--- a/src/os/bsd/dtrace/jvm_dtrace.c	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/bsd/dtrace/jvm_dtrace.c	Mon Jul 15 11:07:03 2013 +0100
@@ -122,9 +122,7 @@
 }
 
 static int file_close(int fd) {
-    int ret;
-    RESTARTABLE(close(fd), ret);
-    return ret;
+    return close(fd);
 }
 
 static int file_read(int fd, char* buf, int len) {
--- a/src/os/bsd/vm/attachListener_bsd.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/bsd/vm/attachListener_bsd.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -199,7 +199,7 @@
   ::unlink(initial_path);
   int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
   if (res == -1) {
-    RESTARTABLE(::close(listener), res);
+    ::close(listener);
     return -1;
   }
 
@@ -217,7 +217,7 @@
     }
   }
   if (res == -1) {
-    RESTARTABLE(::close(listener), res);
+    ::close(listener);
     ::unlink(initial_path);
     return -1;
   }
@@ -345,24 +345,21 @@
     uid_t puid;
     gid_t pgid;
     if (::getpeereid(s, &puid, &pgid) != 0) {
-      int res;
-      RESTARTABLE(::close(s), res);
+      ::close(s);
       continue;
     }
     uid_t euid = geteuid();
     gid_t egid = getegid();
 
     if (puid != euid || pgid != egid) {
-      int res;
-      RESTARTABLE(::close(s), res);
+      ::close(s);
       continue;
     }
 
     // peer credential look okay so we read the request
     BsdAttachOperation* op = read_request(s);
     if (op == NULL) {
-      int res;
-      RESTARTABLE(::close(s), res);
+      ::close(s);
       continue;
     } else {
       return op;
@@ -413,7 +410,7 @@
   }
 
   // done
-  RESTARTABLE(::close(this->socket()), rc);
+  ::close(this->socket());
 
   // were we externally suspended while we were waiting?
   thread->check_and_wait_while_suspended();
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/bsd/vm/os_bsd.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2074,6 +2074,13 @@
   }
 }
 
+static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
+                                    int err) {
+  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
+          strerror(err), err);
+}
+
 // NOTE: Bsd kernel does not really reserve the pages for us.
 //       All it does is to check if there are enough free pages
 //       left at the time of mmap(). This could be a potential
@@ -2082,18 +2089,45 @@
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
 #ifdef __OpenBSD__
   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
-  return ::mprotect(addr, size, prot) == 0;
+  if (::mprotect(addr, size, prot) == 0) {
+    return true;
+  }
 #else
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
-  return res != (uintptr_t) MAP_FAILED;
+  if (res != (uintptr_t) MAP_FAILED) {
+    return true;
+  }
 #endif
+
+  // Warn about any commit errors we see in non-product builds just
+  // in case mmap() doesn't work as described on the man page.
+  NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
+
+  return false;
 }
 
-
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
-  return commit_memory(addr, size, exec);
+  // alignment_hint is ignored on this OS
+  return pd_commit_memory(addr, size, exec);
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  if (!pd_commit_memory(addr, size, exec)) {
+    // add extra info in product mode for vm_exit_out_of_memory():
+    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
+    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
+  }
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size,
+                                  size_t alignment_hint, bool exec,
+                                  const char* mesg) {
+  // alignment_hint is ignored on this OS
+  pd_commit_memory_or_exit(addr, size, exec, mesg);
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -2148,7 +2182,7 @@
 }
 
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-  return os::commit_memory(addr, size);
+  return os::commit_memory(addr, size, !ExecMem);
 }
 
 // If this is a growable mapping, remove the guard pages entirely by
@@ -2320,21 +2354,20 @@
   }
 
   // The memory is committed
-  address pc = CALLER_PC;
-  MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
-  MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
 
   return addr;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
+  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   // detaching the SHM segment will also delete it, see reserve_memory_special()
   int rslt = shmdt(base);
   if (rslt == 0) {
-    MemTracker::record_virtual_memory_uncommit((address)base, bytes);
-    MemTracker::record_virtual_memory_release((address)base, bytes);
+    tkr.record((address)base, bytes);
     return true;
   } else {
+    tkr.discard();
     return false;
   }
 
@@ -3512,7 +3545,7 @@
 
   if (!UseMembar) {
     address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
+    guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
     os::set_memory_serialize_page( mem_serialize_page );
 
 #ifndef PRODUCT
--- a/src/os/bsd/vm/os_bsd.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/bsd/vm/os_bsd.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -178,11 +178,11 @@
 }
 
 inline int os::close(int fd) {
-  RESTARTABLE_RETURN_INT(::close(fd));
+  return ::close(fd);
 }
 
 inline int os::socket_close(int fd) {
-  RESTARTABLE_RETURN_INT(::close(fd));
+  return ::close(fd);
 }
 
 inline int os::socket(int domain, int type, int protocol) {
--- a/src/os/bsd/vm/perfMemory_bsd.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
   }
 
   // commit memory
-  if (!os::commit_memory(mapAddress, size)) {
+  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
     if (PrintMiscellaneous && Verbose) {
       warning("Could not commit PerfData memory\n");
     }
@@ -120,7 +120,7 @@
       addr += result;
     }
 
-    RESTARTABLE(::close(fd), result);
+    result = ::close(fd);
     if (PrintMiscellaneous && Verbose) {
       if (result == OS_ERR) {
         warning("Could not close %s: %s\n", destfile, strerror(errno));
@@ -632,7 +632,7 @@
     if (PrintMiscellaneous && Verbose) {
       warning("could not set shared memory file size: %s\n", strerror(errno));
     }
-    RESTARTABLE(::close(fd), result);
+    ::close(fd);
     return -1;
   }
 
@@ -656,7 +656,7 @@
   if (result != -1) {
     return fd;
   } else {
-    RESTARTABLE(::close(fd), result);
+    ::close(fd);
     return -1;
   }
 }
@@ -734,9 +734,7 @@
 
   mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
 
-  // attempt to close the file - restart it if it was interrupted,
-  // but ignore other failures
-  RESTARTABLE(::close(fd), result);
+  result = ::close(fd);
   assert(result != OS_ERR, "could not close file");
 
   if (mapAddress == MAP_FAILED) {
@@ -755,8 +753,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   return mapAddress;
 }
@@ -909,7 +906,7 @@
 
   // attempt to close the file - restart if it gets interrupted,
   // but ignore other failures
-  RESTARTABLE(::close(fd), result);
+  result = ::close(fd);
   assert(result != OS_ERR, "could not close file");
 
   if (mapAddress == MAP_FAILED) {
@@ -921,8 +918,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   *addr = mapAddress;
   *sizep = size;
--- a/src/os/linux/vm/attachListener_linux.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/linux/vm/attachListener_linux.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -199,7 +199,7 @@
   ::unlink(initial_path);
   int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
   if (res == -1) {
-    RESTARTABLE(::close(listener), res);
+    ::close(listener);
     return -1;
   }
 
@@ -212,7 +212,7 @@
       }
   }
   if (res == -1) {
-    RESTARTABLE(::close(listener), res);
+    ::close(listener);
     ::unlink(initial_path);
     return -1;
   }
@@ -340,24 +340,21 @@
     struct ucred cred_info;
     socklen_t optlen = sizeof(cred_info);
     if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
-      int res;
-      RESTARTABLE(::close(s), res);
+      ::close(s);
       continue;
     }
     uid_t euid = geteuid();
     gid_t egid = getegid();
 
     if (cred_info.uid != euid || cred_info.gid != egid) {
-      int res;
-      RESTARTABLE(::close(s), res);
+      ::close(s);
       continue;
     }
 
     // peer credential look okay so we read the request
     LinuxAttachOperation* op = read_request(s);
     if (op == NULL) {
-      int res;
-      RESTARTABLE(::close(s), res);
+      ::close(s);
       continue;
     } else {
       return op;
@@ -408,7 +405,7 @@
   }
 
   // done
-  RESTARTABLE(::close(this->socket()), rc);
+  ::close(this->socket());
 
   // were we externally suspended while we were waiting?
   thread->check_and_wait_while_suspended();
--- a/src/os/linux/vm/os_linux.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/linux/vm/os_linux.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2612,11 +2612,49 @@
   }
 }
 
+static bool recoverable_mmap_error(int err) {
+  // See if the error is one we can let the caller handle. This
+  // list of errno values comes from JBS-6843484. I can't find a
+  // Linux man page that documents this specific set of errno
+  // values so while this list currently matches Solaris, it may
+  // change as we gain experience with this failure mode.
+  switch (err) {
+  case EBADF:
+  case EINVAL:
+  case ENOTSUP:
+    // let the caller deal with these errors
+    return true;
+
+  default:
+    // Any remaining errors on this OS can cause our reserved mapping
+    // to be lost. That can cause confusion where different data
+    // structures think they have the same memory mapped. The worst
+    // scenario is if both the VM and a library think they have the
+    // same memory mapped.
+    return false;
+  }
+}
+
+static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
+                                    int err) {
+  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
+          strerror(err), err);
+}
+
+static void warn_fail_commit_memory(char* addr, size_t size,
+                                    size_t alignment_hint, bool exec,
+                                    int err) {
+  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+          ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
+          alignment_hint, exec, strerror(err), err);
+}
+
 // NOTE: Linux kernel does not really reserve the pages for us.
 //       All it does is to check if there are enough free pages
 //       left at the time of mmap(). This could be a potential
 //       problem.
-bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
+int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
@@ -2624,9 +2662,32 @@
     if (UseNUMAInterleaving) {
       numa_make_global(addr, size);
     }
-    return true;
-  }
-  return false;
+    return 0;
+  }
+
+  int err = errno;  // save errno from mmap() call above
+
+  if (!recoverable_mmap_error(err)) {
+    warn_fail_commit_memory(addr, size, exec, err);
+    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
+  }
+
+  return err;
+}
+
+bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
+  return os::Linux::commit_memory_impl(addr, size, exec) == 0;
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  int err = os::Linux::commit_memory_impl(addr, size, exec);
+  if (err != 0) {
+    // the caller wants all commit errors to exit with the specified mesg:
+    warn_fail_commit_memory(addr, size, exec, err);
+    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
+  }
 }
 
 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
@@ -2639,8 +2700,9 @@
 #define MADV_HUGEPAGE 14
 #endif
 
-bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
-                       bool exec) {
+int os::Linux::commit_memory_impl(char* addr, size_t size,
+                                  size_t alignment_hint, bool exec) {
+  int err;
   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
     int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
     uintptr_t res =
@@ -2651,16 +2713,46 @@
       if (UseNUMAInterleaving) {
         numa_make_global(addr, size);
       }
-      return true;
+      return 0;
+    }
+
+    err = errno;  // save errno from mmap() call above
+
+    if (!recoverable_mmap_error(err)) {
+      // However, it is not clear that this loss of our reserved mapping
+      // happens with large pages on Linux or that we cannot recover
+      // from the loss. For now, we just issue a warning and we don't
+      // call vm_exit_out_of_memory(). This issue is being tracked by
+      // JBS-8007074.
+      warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
+//    vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
+//                          "committing reserved memory.");
     }
     // Fall through and try to use small pages
   }
 
-  if (commit_memory(addr, size, exec)) {
+  err = os::Linux::commit_memory_impl(addr, size, exec);
+  if (err == 0) {
     realign_memory(addr, size, alignment_hint);
-    return true;
-  }
-  return false;
+  }
+  return err;
+}
+
+bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
+                          bool exec) {
+  return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size,
+                                  size_t alignment_hint, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
+  if (err != 0) {
+    // the caller wants all commit errors to exit with the specified mesg:
+    warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
+    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
+  }
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -2678,7 +2770,7 @@
   // small pages on top of the SHM segment. This method always works for small pages, so we
   // allow that in any case.
   if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
-    commit_memory(addr, bytes, alignment_hint, false);
+    commit_memory(addr, bytes, alignment_hint, !ExecMem);
   }
 }
 
@@ -2931,7 +3023,7 @@
       ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
   }
 
-  return os::commit_memory(addr, size);
+  return os::commit_memory(addr, size, !ExecMem);
 }
 
 // If this is a growable mapping, remove the guard pages entirely by
@@ -3053,7 +3145,7 @@
                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
                   -1, 0);
 
-  if (p != (void *) -1) {
+  if (p != MAP_FAILED) {
     // We don't know if this really is a huge page or not.
     FILE *fp = fopen("/proc/self/maps", "r");
     if (fp) {
@@ -3271,22 +3363,21 @@
   }
 
   // The memory is committed
-  address pc = CALLER_PC;
-  MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
-  MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
 
   return addr;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
+  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   // detaching the SHM segment will also delete it, see reserve_memory_special()
   int rslt = shmdt(base);
   if (rslt == 0) {
-    MemTracker::record_virtual_memory_uncommit((address)base, bytes);
-    MemTracker::record_virtual_memory_release((address)base, bytes);
+    tkr.record((address)base, bytes);
     return true;
   } else {
-   return false;
+    tkr.discard();
+    return false;
   }
 }
 
@@ -4393,7 +4484,7 @@
 
   if (!UseMembar) {
     address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
+    guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
     os::set_memory_serialize_page( mem_serialize_page );
 
 #ifndef PRODUCT
--- a/src/os/linux/vm/os_linux.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/linux/vm/os_linux.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -76,6 +76,10 @@
   static julong physical_memory() { return _physical_memory; }
   static void initialize_system_info();
 
+  static int commit_memory_impl(char* addr, size_t bytes, bool exec);
+  static int commit_memory_impl(char* addr, size_t bytes,
+                                size_t alignment_hint, bool exec);
+
   static void set_glibc_version(const char *s)      { _glibc_version = s; }
   static void set_libpthread_version(const char *s) { _libpthread_version = s; }
 
--- a/src/os/linux/vm/perfMemory_linux.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
   }
 
   // commit memory
-  if (!os::commit_memory(mapAddress, size)) {
+  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
     if (PrintMiscellaneous && Verbose) {
       warning("Could not commit PerfData memory\n");
     }
@@ -120,7 +120,7 @@
       addr += result;
     }
 
-    RESTARTABLE(::close(fd), result);
+    result = ::close(fd);
     if (PrintMiscellaneous && Verbose) {
       if (result == OS_ERR) {
         warning("Could not close %s: %s\n", destfile, strerror(errno));
@@ -632,7 +632,7 @@
     if (PrintMiscellaneous && Verbose) {
       warning("could not set shared memory file size: %s\n", strerror(errno));
     }
-    RESTARTABLE(::close(fd), result);
+    ::close(fd);
     return -1;
   }
 
@@ -656,7 +656,7 @@
   if (result != -1) {
     return fd;
   } else {
-    RESTARTABLE(::close(fd), result);
+    ::close(fd);
     return -1;
   }
 }
@@ -734,9 +734,7 @@
 
   mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
 
-  // attempt to close the file - restart it if it was interrupted,
-  // but ignore other failures
-  RESTARTABLE(::close(fd), result);
+  result = ::close(fd);
   assert(result != OS_ERR, "could not close file");
 
   if (mapAddress == MAP_FAILED) {
@@ -755,8 +753,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   return mapAddress;
 }
@@ -907,9 +904,7 @@
 
   mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
 
-  // attempt to close the file - restart if it gets interrupted,
-  // but ignore other failures
-  RESTARTABLE(::close(fd), result);
+  result = ::close(fd);
   assert(result != OS_ERR, "could not close file");
 
   if (mapAddress == MAP_FAILED) {
@@ -921,8 +916,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   *addr = mapAddress;
   *sizep = size;
--- a/src/os/solaris/dtrace/jhelper.d	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/dtrace/jhelper.d	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -332,12 +332,15 @@
 
   this->nameSymbol = copyin_ptr(this->constantPool +
       this->nameIndex * sizeof (pointer) + SIZE_ConstantPool);
+  /* The symbol is a CPSlot and has lower bit set to indicate metadata */
+  this->nameSymbol &= (~1); /* remove metadata lsb */
 
   this->nameSymbolLength = copyin_uint16(this->nameSymbol +
       OFFSET_Symbol_length);
 
   this->signatureSymbol = copyin_ptr(this->constantPool +
       this->signatureIndex * sizeof (pointer) + SIZE_ConstantPool);
+  this->signatureSymbol &= (~1); /* remove metadata lsb */
 
   this->signatureSymbolLength = copyin_uint16(this->signatureSymbol +
       OFFSET_Symbol_length);
--- a/src/os/solaris/dtrace/jvm_dtrace.c	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/dtrace/jvm_dtrace.c	Mon Jul 15 11:07:03 2013 +0100
@@ -122,9 +122,7 @@
 }
 
 static int file_close(int fd) {
-    int ret;
-    RESTARTABLE(close(fd), ret);
-    return ret;
+    return close(fd);
 }
 
 static int file_read(int fd, char* buf, int len) {
--- a/src/os/solaris/vm/attachListener_solaris.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/vm/attachListener_solaris.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -392,7 +392,7 @@
     return -1;
   }
   assert(fd >= 0, "bad file descriptor");
-  RESTARTABLE(::close(fd), res);
+  ::close(fd);
 
   // attach the door descriptor to the file
   if ((res = ::fattach(dd, initial_path)) == -1) {
@@ -410,7 +410,7 @@
   // rename file so that clients can attach
   if (dd >= 0) {
     if (::rename(initial_path, door_path) == -1) {
-        RESTARTABLE(::close(dd), res);
+        ::close(dd);
         ::fdetach(initial_path);
         dd = -1;
     }
@@ -549,7 +549,7 @@
     }
 
     // close socket and we're done
-    RESTARTABLE(::close(this->socket()), rc);
+    ::close(this->socket());
 
     // were we externally suspended while we were waiting?
     thread->check_and_wait_while_suspended();
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/vm/os_solaris.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2784,7 +2784,42 @@
   return page_size;
 }
 
-bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
+static bool recoverable_mmap_error(int err) {
+  // See if the error is one we can let the caller handle. This
+  // list of errno values comes from the Solaris mmap(2) man page.
+  switch (err) {
+  case EBADF:
+  case EINVAL:
+  case ENOTSUP:
+    // let the caller deal with these errors
+    return true;
+
+  default:
+    // Any remaining errors on this OS can cause our reserved mapping
+    // to be lost. That can cause confusion where different data
+    // structures think they have the same memory mapped. The worst
+    // scenario is if both the VM and a library think they have the
+    // same memory mapped.
+    return false;
+  }
+}
+
+static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
+                                    int err) {
+  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+          ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
+          strerror(err), err);
+}
+
+static void warn_fail_commit_memory(char* addr, size_t bytes,
+                                    size_t alignment_hint, bool exec,
+                                    int err) {
+  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+          ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
+          alignment_hint, exec, strerror(err), err);
+}
+
+int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   size_t size = bytes;
   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
@@ -2792,14 +2827,38 @@
     if (UseNUMAInterleaving) {
       numa_make_global(addr, bytes);
     }
-    return true;
-  }
-  return false;
-}
-
-bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
-                       bool exec) {
-  if (commit_memory(addr, bytes, exec)) {
+    return 0;
+  }
+
+  int err = errno;  // save errno from mmap() call in mmap_chunk()
+
+  if (!recoverable_mmap_error(err)) {
+    warn_fail_commit_memory(addr, bytes, exec, err);
+    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
+  }
+
+  return err;
+}
+
+bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
+  return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
+  if (err != 0) {
+    // the caller wants all commit errors to exit with the specified mesg:
+    warn_fail_commit_memory(addr, bytes, exec, err);
+    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
+  }
+}
+
+int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
+                                    size_t alignment_hint, bool exec) {
+  int err = Solaris::commit_memory_impl(addr, bytes, exec);
+  if (err == 0) {
     if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
       // If the large page size has been set and the VM
       // is using large pages, use the large page size
@@ -2821,9 +2880,25 @@
       // Since this is a hint, ignore any failures.
       (void)Solaris::set_mpss_range(addr, bytes, page_size);
     }
-    return true;
-  }
-  return false;
+  }
+  return err;
+}
+
+bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
+                          bool exec) {
+  return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
+                                  size_t alignment_hint, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
+  if (err != 0) {
+    // the caller wants all commit errors to exit with the specified mesg:
+    warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
+    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
+  }
 }
 
 // Uncommit the pages in a specified region.
@@ -2835,7 +2910,7 @@
 }
 
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-  return os::commit_memory(addr, size);
+  return os::commit_memory(addr, size, !ExecMem);
 }
 
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
@@ -3457,22 +3532,21 @@
   }
 
   // The memory is committed
-  address pc = CALLER_PC;
-  MemTracker::record_virtual_memory_reserve((address)retAddr, size, pc);
-  MemTracker::record_virtual_memory_commit((address)retAddr, size, pc);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)retAddr, size, mtNone, CURRENT_PC);
 
   return retAddr;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
+  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   // detaching the SHM segment will also delete it, see reserve_memory_special()
   int rslt = shmdt(base);
   if (rslt == 0) {
-    MemTracker::record_virtual_memory_uncommit((address)base, bytes);
-    MemTracker::record_virtual_memory_release((address)base, bytes);
+    tkr.record((address)base, bytes);
     return true;
   } else {
-   return false;
+    tkr.discard();
+    return false;
   }
 }
 
@@ -6604,11 +6678,11 @@
 }
 
 int os::close(int fd) {
-  RESTARTABLE_RETURN_INT(::close(fd));
+  return ::close(fd);
 }
 
 int os::socket_close(int fd) {
-  RESTARTABLE_RETURN_INT(::close(fd));
+  return ::close(fd);
 }
 
 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
--- a/src/os/solaris/vm/os_solaris.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/vm/os_solaris.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -168,6 +168,9 @@
   static int _dev_zero_fd;
   static int get_dev_zero_fd() { return _dev_zero_fd; }
   static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; }
+  static int commit_memory_impl(char* addr, size_t bytes, bool exec);
+  static int commit_memory_impl(char* addr, size_t bytes,
+                                size_t alignment_hint, bool exec);
   static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
   static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
   static bool mpss_sanity_check(bool warn, size_t * page_size);
--- a/src/os/solaris/vm/os_solaris.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/vm/os_solaris.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -89,7 +89,7 @@
 
 inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) {
   assert(dirp != NULL, "just checking");
-#if defined(_LP64) || defined(_GNU_SOURCE)
+#if defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
   dirent* p;
   int status;
 
@@ -98,9 +98,9 @@
     return NULL;
   } else
     return p;
-#else  // defined(_LP64) || defined(_GNU_SOURCE)
+#else  // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
   return ::readdir_r(dirp, dbuf);
-#endif // defined(_LP64) || defined(_GNU_SOURCE)
+#endif // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
 }
 
 inline int os::closedir(DIR *dirp) {
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
   }
 
   // commit memory
-  if (!os::commit_memory(mapAddress, size)) {
+  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
     if (PrintMiscellaneous && Verbose) {
       warning("Could not commit PerfData memory\n");
     }
@@ -122,7 +122,7 @@
       addr += result;
     }
 
-    RESTARTABLE(::close(fd), result);
+    result = ::close(fd);
     if (PrintMiscellaneous && Verbose) {
       if (result == OS_ERR) {
         warning("Could not close %s: %s\n", destfile, strerror(errno));
@@ -437,7 +437,7 @@
       addr+=result;
     }
 
-    RESTARTABLE(::close(fd), result);
+    ::close(fd);
 
     // get the user name for the effective user id of the process
     char* user_name = get_user_name(psinfo.pr_euid);
@@ -669,7 +669,7 @@
     if (PrintMiscellaneous && Verbose) {
       warning("could not set shared memory file size: %s\n", strerror(errno));
     }
-    RESTARTABLE(::close(fd), result);
+    ::close(fd);
     return -1;
   }
 
@@ -749,9 +749,7 @@
 
   mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
 
-  // attempt to close the file - restart it if it was interrupted,
-  // but ignore other failures
-  RESTARTABLE(::close(fd), result);
+  result = ::close(fd);
   assert(result != OS_ERR, "could not close file");
 
   if (mapAddress == MAP_FAILED) {
@@ -770,8 +768,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   return mapAddress;
 }
@@ -922,9 +919,7 @@
 
   mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
 
-  // attempt to close the file - restart if it gets interrupted,
-  // but ignore other failures
-  RESTARTABLE(::close(fd), result);
+  result = ::close(fd);
   assert(result != OS_ERR, "could not close file");
 
   if (mapAddress == MAP_FAILED) {
@@ -936,8 +931,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   *addr = mapAddress;
   *sizep = size;
--- a/src/os/windows/vm/os_windows.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/windows/vm/os_windows.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2524,7 +2524,7 @@
                   addr = (address)((uintptr_t)addr &
                          (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
                   os::commit_memory((char *)addr, thread->stack_base() - addr,
-                                    false );
+                                    !ExecMem);
                   return EXCEPTION_CONTINUE_EXECUTION;
           }
           else
@@ -2875,7 +2875,7 @@
                                 PAGE_READWRITE);
   // If reservation failed, return NULL
   if (p_buf == NULL) return NULL;
-  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
+  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
   os::release_memory(p_buf, bytes + chunk_size);
 
   // we still need to round up to a page boundary (in case we are using large pages)
@@ -2941,7 +2941,7 @@
         // need to create a dummy 'reserve' record to match
         // the release.
         MemTracker::record_virtual_memory_reserve((address)p_buf,
-          bytes_to_release, CALLER_PC);
+          bytes_to_release, mtNone, CALLER_PC);
         os::release_memory(p_buf, bytes_to_release);
       }
 #ifdef ASSERT
@@ -2961,9 +2961,10 @@
   // Although the memory is allocated individually, it is returned as one.
   // NMT records it as one block.
   address pc = CALLER_PC;
-  MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
   if ((flags & MEM_COMMIT) != 0) {
-    MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
+  } else {
+    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
   }
 
   // made it this far, success
@@ -3154,8 +3155,7 @@
     char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
     if (res != NULL) {
       address pc = CALLER_PC;
-      MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
-      MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
+      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
     }
 
     return res;
@@ -3164,14 +3164,21 @@
 
 bool os::release_memory_special(char* base, size_t bytes) {
   assert(base != NULL, "Sanity check");
-  // Memory allocated via reserve_memory_special() is committed
-  MemTracker::record_virtual_memory_uncommit((address)base, bytes);
   return release_memory(base, bytes);
 }
 
 void os::print_statistics() {
 }
 
+static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
+  int err = os::get_last_error();
+  char buf[256];
+  size_t buf_len = os::lasterror(buf, sizeof(buf));
+  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+          ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
+          exec, buf_len != 0 ? buf : "<no_error_string>", err);
+}
+
 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
   if (bytes == 0) {
     // Don't bother the OS with noops.
@@ -3186,11 +3193,17 @@
   // is always within a reserve covered by a single VirtualAlloc
   // in that case we can just do a single commit for the requested size
   if (!UseNUMAInterleaving) {
-    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
+    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
+      NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
+      return false;
+    }
     if (exec) {
       DWORD oldprot;
       // Windows doc says to use VirtualProtect to get execute permissions
-      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
+      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
+        NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
+        return false;
+      }
     }
     return true;
   } else {
@@ -3205,12 +3218,20 @@
       MEMORY_BASIC_INFORMATION alloc_info;
       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
-      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
+      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
+                       PAGE_READWRITE) == NULL) {
+        NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
+                                            exec);)
         return false;
+      }
       if (exec) {
         DWORD oldprot;
-        if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
+        if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
+                            PAGE_EXECUTE_READWRITE, &oldprot)) {
+          NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
+                                              exec);)
           return false;
+        }
       }
       bytes_remaining -= bytes_to_rq;
       next_alloc_addr += bytes_to_rq;
@@ -3222,7 +3243,24 @@
 
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
-  return commit_memory(addr, size, exec);
+  // alignment_hint is ignored on this OS
+  return pd_commit_memory(addr, size, exec);
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  if (!pd_commit_memory(addr, size, exec)) {
+    warn_fail_commit_memory(addr, size, exec);
+    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
+  }
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size,
+                                  size_t alignment_hint, bool exec,
+                                  const char* mesg) {
+  // alignment_hint is ignored on this OS
+  pd_commit_memory_or_exit(addr, size, exec, mesg);
 }
 
 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
@@ -3240,7 +3278,7 @@
 }
 
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-  return os::commit_memory(addr, size);
+  return os::commit_memory(addr, size, !ExecMem);
 }
 
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
@@ -3264,8 +3302,9 @@
 
   // Strange enough, but on Win32 one can change protection only for committed
   // memory, not a big deal anyway, as bytes less or equal than 64K
-  if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
-    fatal("cannot commit protection page");
+  if (!is_committed) {
+    commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
+                          "cannot commit protection page");
   }
   // One cannot use os::guard_memory() here, as on Win32 guard page
   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
--- a/src/os/windows/vm/perfMemory_windows.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os/windows/vm/perfMemory_windows.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
   }
 
   // commit memory
-  if (!os::commit_memory(mapAddress, size)) {
+  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
     if (PrintMiscellaneous && Verbose) {
       warning("Could not commit PerfData memory\n");
     }
@@ -1498,8 +1498,7 @@
   (void)memset(mapAddress, '\0', size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
   return (char*) mapAddress;
 }
@@ -1681,8 +1680,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
-  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
 
 
   *addrp = (char*)mapAddress;
@@ -1836,9 +1834,10 @@
     return;
   }
 
+  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   remove_file_mapping(addr);
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_release((address)addr, bytes);
+  tkr.record((address)addr, bytes);
 }
 
 char* PerfMemory::backing_store_filename() {
--- a/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-#include <asm-sparc/traps.h>
-
-void MacroAssembler::read_ccr_trap(Register ccr_save) {
-  // No implementation
-  breakpoint_trap();
-}
-
-void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
-  // No implementation
-  breakpoint_trap();
-}
-
-void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
-void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
-
-// Use software breakpoint trap until we figure out how to do this on Linux
-void MacroAssembler::get_psr_trap()       { trap(SP_TRAP_SBPT); }
-void MacroAssembler::set_psr_trap()       { trap(SP_TRAP_SBPT); }
--- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -169,7 +169,6 @@
     : "memory");
   return rv;
 #else
-  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
   volatile jlong_accessor evl, cvl, rv;
   evl.long_value = exchange_value;
   cvl.long_value = compare_value;
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -289,6 +289,16 @@
     }
 #endif // AMD64
 
+#ifndef AMD64
+    // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
+    // This can happen in any running code (currently more frequently in
+    // interpreter code but has been seen in compiled code)
+    if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
+      fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
+            "to unstable signal handling in this distribution.");
+    }
+#endif // AMD64
+
     // Handle ALL stack overflow variations here
     if (sig == SIGSEGV) {
       address addr = (address) info->si_addr;
--- a/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-#include <sys/trap.h>          // For trap numbers
-#include <v9/sys/psr_compat.h> // For V8 compatibility
-
-void MacroAssembler::read_ccr_trap(Register ccr_save) {
-  // Execute a trap to get the PSR, mask and shift
-  // to get the condition codes.
-  get_psr_trap();
-  nop();
-  set(PSR_ICC, ccr_save);
-  and3(O0, ccr_save, ccr_save);
-  srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
-}
-
-void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
-  // Execute a trap to get the PSR, shift back
-  // the condition codes, mask the condition codes
-  // back into and PSR and trap to write back the
-  // PSR.
-  sll(ccr_save, PSR_ICC_SHIFT, scratch2);
-  get_psr_trap();
-  nop();
-  set(~PSR_ICC, scratch1);
-  and3(O0, scratch1, O0);
-  or3(O0, scratch2, O0);
-  set_psr_trap();
-  nop();
-}
-
-void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
-void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
-void MacroAssembler::get_psr_trap()       { trap(ST_GETPSR); }
-void MacroAssembler::set_psr_trap()       { trap(ST_SETPSR); }
--- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -60,21 +60,10 @@
 
 #else
 
-extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
 extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
 
 inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
-#ifdef COMPILER2
-  // Compiler2 does not support v8, it is used only for v9.
   _Atomic_move_long_v9(src, dst);
-#else
-  // The branch is cheaper then emulated LDD.
-  if (VM_Version::v9_instructions_work()) {
-    _Atomic_move_long_v9(src, dst);
-  } else {
-    _Atomic_move_long_v8(src, dst);
-  }
-#endif
 }
 
 inline jlong Atomic::load(volatile jlong* src) {
@@ -209,7 +198,6 @@
     : "memory");
   return rv;
 #else  //_LP64
-  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
   volatile jlong_accessor evl, cvl, rv;
   evl.long_value = exchange_value;
   cvl.long_value = compare_value;
@@ -318,7 +306,6 @@
   // Return 64 bit value in %o0
   return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
 #else  // _LP64
-  assert (VM_Version::v9_instructions_work(), "only supported on v9");
   // Return 64 bit value in %o0,%o1 by hand
   return _Atomic_casl(exchange_value, dest, compare_value);
 #endif // _LP64
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Mon Jul 15 11:07:03 2013 +0100
@@ -152,23 +152,6 @@
         .nonvolatile
         .end
 
-  // Support for jlong Atomic::load and Atomic::store on v8.
-  //
-  // void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
-  //
-  // Arguments:
-  //      src:  O0
-  //      dest: O1
-  //
-  // Overwrites O2 and O3
-
-        .inline _Atomic_move_long_v8,2
-        .volatile
-        ldd     [%o0], %o2
-        std     %o2, [%o1]
-        .nonvolatile
-        .end
-
   // Support for jlong Atomic::load and Atomic::store on v9.
   //
   // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
--- a/src/share/vm/adlc/formssel.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/adlc/formssel.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -235,6 +235,9 @@
   return false;
 }
 
+bool InstructForm::is_ideal_negD() const {
+  return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0);
+}
 
 // Return 'true' if this instruction matches an ideal 'Copy*' node
 int InstructForm::is_ideal_copy() const {
@@ -533,6 +536,12 @@
   if( data_type != Form::none )
     rematerialize = true;
 
+  // Ugly: until a better fix is implemented, disable rematerialization for
+  // negD nodes because they are proved to be problematic.
+  if (is_ideal_negD()) {
+    return false;
+  }
+
   // Constants
   if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
     rematerialize = true;
--- a/src/share/vm/adlc/formssel.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/adlc/formssel.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -147,6 +147,7 @@
   virtual int         is_empty_encoding() const; // _size=0 and/or _insencode empty
   virtual int         is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
   virtual int         is_ideal_copy() const;    // node matches ideal 'Copy*'
+  virtual bool        is_ideal_negD() const;    // node matches ideal 'NegD'
   virtual bool        is_ideal_if()   const;    // node matches ideal 'If'
   virtual bool        is_ideal_fastlock() const; // node matches 'FastLock'
   virtual bool        is_ideal_membar() const;  // node matches ideal 'MemBarXXX'
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3461,6 +3461,14 @@
       preserves_state = true;
       break;
 
+    case vmIntrinsics::_updateCRC32:
+    case vmIntrinsics::_updateBytesCRC32:
+    case vmIntrinsics::_updateByteBufferCRC32:
+      if (!UseCRC32Intrinsics) return false;
+      cantrap = false;
+      preserves_state = true;
+      break;
+
     case vmIntrinsics::_loadFence :
     case vmIntrinsics::_storeFence:
     case vmIntrinsics::_fullFence :
--- a/src/share/vm/c1/c1_IR.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_IR.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -506,7 +506,7 @@
   _loop_map(0, 0),          // initialized later with correct size
   _compilation(c)
 {
-  TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
+  TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
 
   init_visited();
   count_edges(start_block, NULL);
@@ -683,7 +683,7 @@
 }
 
 void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) {
-  TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight");
+  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight"));
   init_visited();
 
   assert(_work_list.is_empty(), "work list must be empty before processing");
@@ -868,7 +868,7 @@
 }
 
 void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
-  TRACE_LINEAR_SCAN(3, "----- computing final block order");
+  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order"));
 
   // the start block is always the first block in the linear scan order
   _linear_scan_order = new BlockList(_num_blocks);
--- a/src/share/vm/c1/c1_LIR.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_LIR.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -201,23 +201,24 @@
 
 #ifdef ASSERT
   if (!is_pointer() && !is_illegal()) {
+    OprKind kindfield = kind_field(); // Factored out because of compiler bug, see 8002160
     switch (as_BasicType(type_field())) {
     case T_LONG:
-      assert((kind_field() == cpu_register || kind_field() == stack_value) &&
+      assert((kindfield == cpu_register || kindfield == stack_value) &&
              size_field() == double_size, "must match");
       break;
     case T_FLOAT:
       // FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
-      assert((kind_field() == fpu_register || kind_field() == stack_value
-             ARM_ONLY(|| kind_field() == cpu_register)
-             PPC_ONLY(|| kind_field() == cpu_register) ) &&
+      assert((kindfield == fpu_register || kindfield == stack_value
+             ARM_ONLY(|| kindfield == cpu_register)
+             PPC_ONLY(|| kindfield == cpu_register) ) &&
              size_field() == single_size, "must match");
       break;
     case T_DOUBLE:
       // FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
-      assert((kind_field() == fpu_register || kind_field() == stack_value
-             ARM_ONLY(|| kind_field() == cpu_register)
-             PPC_ONLY(|| kind_field() == cpu_register) ) &&
+      assert((kindfield == fpu_register || kindfield == stack_value
+             ARM_ONLY(|| kindfield == cpu_register)
+             PPC_ONLY(|| kindfield == cpu_register) ) &&
              size_field() == double_size, "must match");
       break;
     case T_BOOLEAN:
@@ -229,7 +230,7 @@
     case T_OBJECT:
     case T_METADATA:
     case T_ARRAY:
-      assert((kind_field() == cpu_register || kind_field() == stack_value) &&
+      assert((kindfield == cpu_register || kindfield == stack_value) &&
              size_field() == single_size, "must match");
       break;
 
@@ -429,6 +430,11 @@
   _stub = new ArrayCopyStub(this);
 }
 
+LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)
+  : LIR_Op(lir_updatecrc32, res, NULL)
+  , _crc(crc)
+  , _val(val) {
+}
 
 //-------------------verify--------------------------
 
@@ -875,6 +881,20 @@
     }
 
 
+// LIR_OpUpdateCRC32
+    case lir_updatecrc32: {
+      assert(op->as_OpUpdateCRC32() != NULL, "must be");
+      LIR_OpUpdateCRC32* opUp = (LIR_OpUpdateCRC32*)op;
+
+      assert(opUp->_crc->is_valid(), "used");          do_input(opUp->_crc);     do_temp(opUp->_crc);
+      assert(opUp->_val->is_valid(), "used");          do_input(opUp->_val);     do_temp(opUp->_val);
+      assert(opUp->_result->is_valid(), "used");       do_output(opUp->_result);
+      assert(opUp->_info == NULL, "no info for LIR_OpUpdateCRC32");
+
+      break;
+    }
+
+
 // LIR_OpLock
     case lir_lock:
     case lir_unlock: {
@@ -1055,6 +1075,10 @@
   masm->emit_code_stub(stub());
 }
 
+void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) {
+  masm->emit_updatecrc32(this);
+}
+
 void LIR_Op0::emit_code(LIR_Assembler* masm) {
   masm->emit_op0(this);
 }
@@ -1762,6 +1786,8 @@
      case lir_dynamic_call:          s = "dynamic";       break;
      // LIR_OpArrayCopy
      case lir_arraycopy:             s = "arraycopy";     break;
+     // LIR_OpUpdateCRC32
+     case lir_updatecrc32:           s = "updatecrc32";   break;
      // LIR_OpLock
      case lir_lock:                  s = "lock";          break;
      case lir_unlock:                s = "unlock";        break;
@@ -1814,6 +1840,13 @@
   tmp()->print(out);     out->print(" ");
 }
 
+// LIR_OpUpdateCRC32
+void LIR_OpUpdateCRC32::print_instr(outputStream* out) const {
+  crc()->print(out);     out->print(" ");
+  val()->print(out);     out->print(" ");
+  result_opr()->print(out); out->print(" ");
+}
+
 // LIR_OpCompareAndSwap
 void LIR_OpCompareAndSwap::print_instr(outputStream* out) const {
   addr()->print(out);      out->print(" ");
--- a/src/share/vm/c1/c1_LIR.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_LIR.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -877,6 +877,7 @@
 class      LIR_OpJavaCall;
 class      LIR_OpRTCall;
 class    LIR_OpArrayCopy;
+class    LIR_OpUpdateCRC32;
 class    LIR_OpLock;
 class    LIR_OpTypeCheck;
 class    LIR_OpCompareAndSwap;
@@ -982,6 +983,9 @@
   , begin_opArrayCopy
       , lir_arraycopy
   , end_opArrayCopy
+  , begin_opUpdateCRC32
+      , lir_updatecrc32
+  , end_opUpdateCRC32
   , begin_opLock
     , lir_lock
     , lir_unlock
@@ -1137,6 +1141,7 @@
   virtual LIR_Op2* as_Op2() { return NULL; }
   virtual LIR_Op3* as_Op3() { return NULL; }
   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
+  virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
@@ -1293,6 +1298,25 @@
   void print_instr(outputStream* out) const PRODUCT_RETURN;
 };
 
+// LIR_OpUpdateCRC32
+class LIR_OpUpdateCRC32: public LIR_Op {
+  friend class LIR_OpVisitState;
+
+private:
+  LIR_Opr   _crc;
+  LIR_Opr   _val;
+
+public:
+
+  LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
+
+  LIR_Opr crc() const                            { return _crc; }
+  LIR_Opr val() const                            { return _val; }
+
+  virtual void emit_code(LIR_Assembler* masm);
+  virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32()  { return this; }
+  void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
 
 // --------------------------------------------------
 // LIR_Op0
@@ -2212,6 +2236,8 @@
 
   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
 
+  void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)  { append(new LIR_OpUpdateCRC32(crc, val, res)); }
+
   void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
 
   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -195,6 +195,7 @@
   void emit_opBranch(LIR_OpBranch* op);
   void emit_opLabel(LIR_OpLabel* op);
   void emit_arraycopy(LIR_OpArrayCopy* op);
+  void emit_updatecrc32(LIR_OpUpdateCRC32* op);
   void emit_opConvert(LIR_OpConvert* op);
   void emit_alloc_obj(LIR_OpAllocObj* op);
   void emit_alloc_array(LIR_OpAllocArray* op);
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2994,6 +2994,12 @@
     do_Reference_get(x);
     break;
 
+  case vmIntrinsics::_updateCRC32:
+  case vmIntrinsics::_updateBytesCRC32:
+  case vmIntrinsics::_updateByteBufferCRC32:
+    do_update_CRC32(x);
+    break;
+
   default: ShouldNotReachHere(); break;
   }
 }
--- a/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,6 +247,7 @@
   void do_NIOCheckIndex(Intrinsic* x);
   void do_FPIntrinsics(Intrinsic* x);
   void do_Reference_get(Intrinsic* x);
+  void do_update_CRC32(Intrinsic* x);
 
   void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
 
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -299,6 +299,7 @@
 #ifdef TRACE_HAVE_INTRINSICS
   FUNCTION_CASE(entry, TRACE_TIME_METHOD);
 #endif
+  FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 
 #undef FUNCTION_CASE
 
--- a/src/share/vm/ci/ciObjectFactory.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -265,8 +265,6 @@
 ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
   ASSERT_IN_VM;
 
-  assert(key == NULL || key->is_metadata(), "must be");
-
 #ifdef ASSERT
   if (CIObjectFactoryVerify) {
     Metadata* last = NULL;
--- a/src/share/vm/ci/ciUtilities.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/ci/ciUtilities.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -96,7 +96,7 @@
     CLEAR_PENDING_EXCEPTION;                     \
     return (result);                             \
   }                                              \
-  (0
+  (void)(0
 
 #define KILL_COMPILE_ON_ANY                      \
   THREAD);                                       \
@@ -104,7 +104,7 @@
     fatal("unhandled ci exception");             \
     CLEAR_PENDING_EXCEPTION;                     \
   }                                              \
-(0
+(void)(0
 
 
 inline const char* bool_to_str(bool b) {
--- a/src/share/vm/classfile/dictionary.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/classfile/dictionary.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -555,7 +555,7 @@
                 loader_data->class_loader() == NULL ||
                 loader_data->class_loader()->is_instance(),
                 "checking type of class_loader");
-      e->verify();
+      e->verify(/*check_dictionary*/false);
       probe->verify_protection_domain_set();
       element_count++;
     }
--- a/src/share/vm/classfile/genericSignatures.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/classfile/genericSignatures.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -124,7 +124,7 @@
       fatal(STREAM->parse_error());      \
     }                                   \
     return NULL;                        \
-  } 0
+  } (void)0
 
 #define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
 #define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
@@ -133,7 +133,7 @@
 #define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
 #define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
 
-#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0
+#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0
 
 #ifndef PRODUCT
 void Identifier::print_on(outputStream* str) const {
--- a/src/share/vm/classfile/symbolTable.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/classfile/symbolTable.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -598,6 +598,8 @@
 
 bool StringTable::_needs_rehashing = false;
 
+volatile int StringTable::_parallel_claimed_idx = 0;
+
 // Pick hashing algorithm
 unsigned int StringTable::hash_string(const jchar* s, int len) {
   return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
@@ -761,8 +763,18 @@
   }
 }
 
-void StringTable::oops_do(OopClosure* f) {
-  for (int i = 0; i < the_table()->table_size(); ++i) {
+void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
+  const int limit = the_table()->table_size();
+
+  assert(0 <= start_idx && start_idx <= limit,
+         err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx));
+  assert(0 <= end_idx && end_idx <= limit,
+         err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx));
+  assert(start_idx <= end_idx,
+         err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+                 start_idx, end_idx));
+
+  for (int i = start_idx; i < end_idx; i += 1) {
     HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
       assert(!entry->is_shared(), "CDS not used for the StringTable");
@@ -774,6 +786,27 @@
   }
 }
 
+void StringTable::oops_do(OopClosure* f) {
+  buckets_do(f, 0, the_table()->table_size());
+}
+
+void StringTable::possibly_parallel_oops_do(OopClosure* f) {
+  const int ClaimChunkSize = 32;
+  const int limit = the_table()->table_size();
+
+  for (;;) {
+    // Grab next set of buckets to scan
+    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
+    if (start_idx >= limit) {
+      // End of table
+      break;
+    }
+
+    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
+    buckets_do(f, start_idx, end_idx);
+  }
+}
+
 void StringTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
     HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
--- a/src/share/vm/classfile/symbolTable.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/classfile/symbolTable.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -246,12 +246,19 @@
   // Set if one bucket is out of balance due to hash algorithm deficiency
   static bool _needs_rehashing;
 
+  // Claimed high water mark for parallel chunked scanning
+  static volatile int _parallel_claimed_idx;
+
   static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
   oop basic_add(int index, Handle string_or_null, jchar* name, int len,
                 unsigned int hashValue, TRAPS);
 
   oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
 
+  // Apply the give oop closure to the entries to the buckets
+  // in the range [start_idx, end_idx).
+  static void buckets_do(OopClosure* f, int start_idx, int end_idx);
+
   StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
                               sizeof (HashtableEntry<oop, mtSymbol>)) {}
 
@@ -277,9 +284,12 @@
     unlink_or_oops_do(cl, NULL);
   }
 
-  // Invoke "f->do_oop" on the locations of all oops in the table.
+  // Serially invoke "f->do_oop" on the locations of all oops in the table.
   static void oops_do(OopClosure* f);
 
+  // Possibly parallel version of the above
+  static void possibly_parallel_oops_do(OopClosure* f);
+
   // Hashing algorithm, used as the hash value used by the
   //     StringTable for bucket selection and comparison (stored in the
   //     HashtableEntry structures).  This is used in the String.intern() method.
@@ -315,5 +325,8 @@
   // Rehash the symbol table if it gets out of balance
   static void rehash_table();
   static bool needs_rehashing() { return _needs_rehashing; }
+
+  // Parallel chunked scanning
+  static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
 };
 #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
--- a/src/share/vm/classfile/verifier.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/classfile/verifier.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -86,9 +86,9 @@
 // These macros are used similarly to CHECK macros but also check
 // the status of the verifier and return if that has an error.
 #define CHECK_VERIFY(verifier) \
-  CHECK); if ((verifier)->has_error()) return; (0
+  CHECK); if ((verifier)->has_error()) return; ((void)0
 #define CHECK_VERIFY_(verifier, result) \
-  CHECK_(result)); if ((verifier)->has_error()) return (result); (0
+  CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0
 
 class TypeOrigin VALUE_OBJ_CLASS_SPEC {
  private:
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/classfile/vmSymbols.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -771,6 +771,17 @@
    do_name(     decrypt_name,                                      "decrypt")                                           \
    do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)V")                                        \
                                                                                                                         \
+  /* support for java.util.zip */                                                                                       \
+  do_class(java_util_zip_CRC32,           "java/util/zip/CRC32")                                                        \
+  do_intrinsic(_updateCRC32,               java_util_zip_CRC32,   update_name, int2_int_signature,               F_SN)  \
+   do_name(     update_name,                                      "update")                                             \
+  do_intrinsic(_updateBytesCRC32,          java_util_zip_CRC32,   updateBytes_name, updateBytes_signature,       F_SN)  \
+   do_name(     updateBytes_name,                                "updateBytes")                                         \
+   do_signature(updateBytes_signature,                           "(I[BII)I")                                            \
+  do_intrinsic(_updateByteBufferCRC32,     java_util_zip_CRC32,   updateByteBuffer_name, updateByteBuffer_signature, F_SN) \
+   do_name(     updateByteBuffer_name,                           "updateByteBuffer")                                    \
+   do_signature(updateByteBuffer_signature,                      "(IJII)I")                                             \
+                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
--- a/src/share/vm/code/debugInfo.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/code/debugInfo.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -274,7 +274,7 @@
   Method* read_method() {
     Method* o = (Method*)(code()->metadata_at(read_int()));
     assert(o == NULL ||
-           o->is_metadata(), "meta data only");
+           o->is_metaspace_object(), "meta data only");
     return o;
   }
   ScopeValue* read_object_value();
--- a/src/share/vm/code/dependencies.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/code/dependencies.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -655,8 +655,8 @@
   } else {
     o = _deps->oop_recorder()->metadata_at(i);
   }
-  assert(o == NULL || o->is_metadata(),
-         err_msg("Should be perm " PTR_FORMAT, o));
+  assert(o == NULL || o->is_metaspace_object(),
+         err_msg("Should be metadata " PTR_FORMAT, o));
   return o;
 }
 
@@ -989,7 +989,7 @@
   assert(changes.involves_context(context_type), "irrelevant dependency");
   Klass* new_type = changes.new_type();
 
-  count_find_witness_calls();
+  (void)count_find_witness_calls();
   NOT_PRODUCT(deps_find_witness_singles++);
 
   // Current thread must be in VM (not native mode, as in CI):
--- a/src/share/vm/code/nmethod.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/code/nmethod.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1081,11 +1081,6 @@
       metadata_Relocation* reloc = iter.metadata_reloc();
       reloc->fix_metadata_relocation();
     }
-
-    // There must not be any interfering patches or breakpoints.
-    assert(!(iter.type() == relocInfo::breakpoint_type
-             && iter.breakpoint_reloc()->active()),
-           "no active breakpoint");
   }
 }
 
@@ -2615,7 +2610,8 @@
                       relocation_begin()-1+ip[1]);
       for (; ip < index_end; ip++)
         tty->print_cr("  (%d ?)", ip[0]);
-      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
+      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
+      ip++;
       tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
     }
   }
--- a/src/share/vm/code/relocInfo.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/code/relocInfo.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -338,31 +338,6 @@
   _limit = limit;
 }
 
-
-void PatchingRelocIterator:: prepass() {
-  // turn breakpoints off during patching
-  _init_state = (*this);        // save cursor
-  while (next()) {
-    if (type() == relocInfo::breakpoint_type) {
-      breakpoint_reloc()->set_active(false);
-    }
-  }
-  (RelocIterator&)(*this) = _init_state;        // reset cursor for client
-}
-
-
-void PatchingRelocIterator:: postpass() {
-  // turn breakpoints back on after patching
-  (RelocIterator&)(*this) = _init_state;        // reset cursor again
-  while (next()) {
-    if (type() == relocInfo::breakpoint_type) {
-      breakpoint_Relocation* bpt = breakpoint_reloc();
-      bpt->set_active(bpt->enabled());
-    }
-  }
-}
-
-
 // All the strange bit-encodings are in here.
 // The idea is to encode relocation data which are small integers
 // very efficiently (a single extra halfword).  Larger chunks of
@@ -704,51 +679,6 @@
   _target  = address_from_scaled_offset(offset, base);
 }
 
-
-void breakpoint_Relocation::pack_data_to(CodeSection* dest) {
-  short* p = (short*) dest->locs_end();
-  address point = dest->locs_point();
-
-  *p++ = _bits;
-
-  assert(_target != NULL, "sanity");
-
-  if (internal())  normalize_address(_target, dest);
-
-  jint target_bits =
-    (jint)( internal() ? scaled_offset           (_target, point)
-                       : runtime_address_to_index(_target) );
-  if (settable()) {
-    // save space for set_target later
-    p = add_jint(p, target_bits);
-  } else {
-    p = add_var_int(p, target_bits);
-  }
-
-  for (int i = 0; i < instrlen(); i++) {
-    // put placeholder words until bytes can be saved
-    p = add_short(p, (short)0x7777);
-  }
-
-  dest->set_locs_end((relocInfo*) p);
-}
-
-
-void breakpoint_Relocation::unpack_data() {
-  _bits = live_bits();
-
-  int targetlen = datalen() - 1 - instrlen();
-  jint target_bits = 0;
-  if (targetlen == 0)       target_bits = 0;
-  else if (targetlen == 1)  target_bits = *(data()+1);
-  else if (targetlen == 2)  target_bits = relocInfo::jint_from_data(data()+1);
-  else                      { ShouldNotReachHere(); }
-
-  _target = internal() ? address_from_scaled_offset(target_bits, addr())
-                       : index_to_runtime_address  (target_bits);
-}
-
-
 //// miscellaneous methods
 oop* oop_Relocation::oop_addr() {
   int n = _oop_index;
@@ -933,81 +863,6 @@
   return target;
 }
 
-
-breakpoint_Relocation::breakpoint_Relocation(int kind, address target, bool internal) {
-  bool active    = false;
-  bool enabled   = (kind == initialization);
-  bool removable = (kind != safepoint);
-  bool settable  = (target == NULL);
-
-  int bits = kind;
-  if (enabled)    bits |= enabled_state;
-  if (internal)   bits |= internal_attr;
-  if (removable)  bits |= removable_attr;
-  if (settable)   bits |= settable_attr;
-
-  _bits = bits | high_bit;
-  _target = target;
-
-  assert(this->kind()      == kind,      "kind encoded");
-  assert(this->enabled()   == enabled,   "enabled encoded");
-  assert(this->active()    == active,    "active encoded");
-  assert(this->internal()  == internal,  "internal encoded");
-  assert(this->removable() == removable, "removable encoded");
-  assert(this->settable()  == settable,  "settable encoded");
-}
-
-
-address breakpoint_Relocation::target() const {
-  return _target;
-}
-
-
-void breakpoint_Relocation::set_target(address x) {
-  assert(settable(), "must be settable");
-  jint target_bits =
-    (jint)(internal() ? scaled_offset           (x, addr())
-                      : runtime_address_to_index(x));
-  short* p = &live_bits() + 1;
-  p = add_jint(p, target_bits);
-  assert(p == instrs(), "new target must fit");
-  _target = x;
-}
-
-
-void breakpoint_Relocation::set_enabled(bool b) {
-  if (enabled() == b) return;
-
-  if (b) {
-    set_bits(bits() | enabled_state);
-  } else {
-    set_active(false);          // remove the actual breakpoint insn, if any
-    set_bits(bits() & ~enabled_state);
-  }
-}
-
-
-void breakpoint_Relocation::set_active(bool b) {
-  assert(!b || enabled(), "cannot activate a disabled breakpoint");
-
-  if (active() == b) return;
-
-  // %%% should probably seize a lock here (might not be the right lock)
-  //MutexLockerEx ml_patch(Patching_lock, true);
-  //if (active() == b)  return;         // recheck state after locking
-
-  if (b) {
-    set_bits(bits() | active_state);
-    if (instrlen() == 0)
-      fatal("breakpoints in original code must be undoable");
-    pd_swap_in_breakpoint (addr(), instrs(), instrlen());
-  } else {
-    set_bits(bits() & ~active_state);
-    pd_swap_out_breakpoint(addr(), instrs(), instrlen());
-  }
-}
-
-
 //---------------------------------------------------------------------------------
 // Non-product code
 
--- a/src/share/vm/code/relocInfo.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/code/relocInfo.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -49,9 +49,6 @@
 //    RelocIterator
 //      A StackObj which iterates over the relocations associated with
 //      a range of code addresses.  Can be used to operate a copy of code.
-//    PatchingRelocIterator
-//      Specialized subtype of RelocIterator which removes breakpoints
-//      temporarily during iteration, then restores them.
 //    BoundRelocation
 //      An _internal_ type shared by packers and unpackers of relocations.
 //      It pastes together a RelocationHolder with some pointers into
@@ -204,15 +201,6 @@
 //   immediate field must not straddle a unit of memory coherence.
 //   //%note reloc_3
 //
-// relocInfo::breakpoint_type -- a conditional breakpoint in the code
-//   Value:  none
-//   Instruction types: any whatsoever
-//   Data:  [b [T]t  i...]
-//   The b is a bit-packed word representing the breakpoint's attributes.
-//   The t is a target address which the breakpoint calls (when it is enabled).
-//   The i... is a place to store one or two instruction words overwritten
-//   by a trap, so that the breakpoint may be subsequently removed.
-//
 // relocInfo::static_stub_type -- an extra stub for each static_call_type
 //   Value:  none
 //   Instruction types: a virtual call:  { set_oop; jump; }
@@ -271,8 +259,8 @@
     section_word_type       =  9, // internal, but a cross-section reference
     poll_type               = 10, // polling instruction for safepoints
     poll_return_type        = 11, // polling instruction for safepoints at return
-    breakpoint_type         = 12, // an initialization barrier or safepoint
-    metadata_type           = 13, // metadata that used to be oops
+    metadata_type           = 12, // metadata that used to be oops
+    yet_unused_type_1       = 13, // Still unused
     yet_unused_type_2       = 14, // Still unused
     data_prefix_tag         = 15, // tag for a prefix (carries data arguments)
     type_mask               = 15  // A mask which selects only the above values
@@ -312,7 +300,6 @@
     visitor(internal_word) \
     visitor(poll) \
     visitor(poll_return) \
-    visitor(breakpoint) \
     visitor(section_word) \
 
 
@@ -454,7 +441,7 @@
  public:
   enum {
     // Conservatively large estimate of maximum length (in shorts)
-    // of any relocation record (probably breakpoints are largest).
+    // of any relocation record.
     // Extended format is length prefix, data words, and tag/offset suffix.
     length_limit       = 1 + 1 + (3*BytesPerWord/BytesPerShort) + 1,
     have_format        = format_width > 0
@@ -571,8 +558,6 @@
 
   void initialize(nmethod* nm, address begin, address limit);
 
-  friend class PatchingRelocIterator;
-  // make an uninitialized one, for PatchingRelocIterator:
   RelocIterator() { initialize_misc(); }
 
  public:
@@ -779,9 +764,6 @@
   void       pd_verify_data_value    (address x, intptr_t off) { pd_set_data_value(x, off, true); }
   address    pd_call_destination     (address orig_addr = NULL);
   void       pd_set_call_destination (address x);
-  void       pd_swap_in_breakpoint   (address x, short* instrs, int instrlen);
-  void       pd_swap_out_breakpoint  (address x, short* instrs, int instrlen);
-  static int pd_breakpoint_size      ();
 
   // this extracts the address of an address in the code stream instead of the reloc data
   address* pd_address_in_code       ();
@@ -1302,87 +1284,6 @@
   void     fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
 };
 
-
-class breakpoint_Relocation : public Relocation {
-  relocInfo::relocType type() { return relocInfo::breakpoint_type; }
-
-  enum {
-    // attributes which affect the interpretation of the data:
-    removable_attr = 0x0010,   // buffer [i...] allows for undoing the trap
-    internal_attr  = 0x0020,   // the target is an internal addr (local stub)
-    settable_attr  = 0x0040,   // the target is settable
-
-    // states which can change over time:
-    enabled_state  = 0x0100,   // breakpoint must be active in running code
-    active_state   = 0x0200,   // breakpoint instruction actually in code
-
-    kind_mask      = 0x000F,   // mask for extracting kind
-    high_bit       = 0x4000    // extra bit which is always set
-  };
-
- public:
-  enum {
-    // kinds:
-    initialization = 1,
-    safepoint      = 2
-  };
-
-  // If target is NULL, 32 bits are reserved for a later set_target().
-  static RelocationHolder spec(int kind, address target = NULL, bool internal_target = false) {
-    RelocationHolder rh = newHolder();
-    new(rh) breakpoint_Relocation(kind, target, internal_target);
-    return rh;
-  }
-
- private:
-  // We require every bits value to NOT to fit into relocInfo::datalen_width,
-  // because we are going to actually store state in the reloc, and so
-  // cannot allow it to be compressed (and hence copied by the iterator).
-
-  short   _bits;                  // bit-encoded kind, attrs, & state
-  address _target;
-
-  breakpoint_Relocation(int kind, address target, bool internal_target);
-
-  friend class RelocIterator;
-  breakpoint_Relocation() { }
-
-  short    bits()       const { return _bits; }
-  short&   live_bits()  const { return data()[0]; }
-  short*   instrs()     const { return data() + datalen() - instrlen(); }
-  int      instrlen()   const { return removable() ? pd_breakpoint_size() : 0; }
-
-  void set_bits(short x) {
-    assert(live_bits() == _bits, "must be the only mutator of reloc info");
-    live_bits() = _bits = x;
-  }
-
- public:
-  address  target()     const;
-  void set_target(address x);
-
-  int  kind()           const { return  bits() & kind_mask; }
-  bool enabled()        const { return (bits() &  enabled_state) != 0; }
-  bool active()         const { return (bits() &   active_state) != 0; }
-  bool internal()       const { return (bits() &  internal_attr) != 0; }
-  bool removable()      const { return (bits() & removable_attr) != 0; }
-  bool settable()       const { return (bits() &  settable_attr) != 0; }
-
-  void set_enabled(bool b);     // to activate, you must also say set_active
-  void set_active(bool b);      // actually inserts bpt (must be enabled 1st)
-
-  // data is packed as 16 bits, followed by the target (1 or 2 words), followed
-  // if necessary by empty storage for saving away original instruction bytes.
-  void pack_data_to(CodeSection* dest);
-  void unpack_data();
-
-  // during certain operations, breakpoints must be out of the way:
-  void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
-    assert(!active(), "cannot perform relocation on enabled breakpoints");
-  }
-};
-
-
 // We know all the xxx_Relocation classes, so now we can define these:
 #define EACH_CASE(name)                                         \
 inline name##_Relocation* RelocIterator::name##_reloc() {       \
@@ -1401,25 +1302,4 @@
   initialize(nm, begin, limit);
 }
 
-// if you are going to patch code, you should use this subclass of
-// RelocIterator
-class PatchingRelocIterator : public RelocIterator {
- private:
-  RelocIterator _init_state;
-
-  void prepass();               // deactivates all breakpoints
-  void postpass();              // reactivates all enabled breakpoints
-
-  // do not copy these puppies; it would have unpredictable side effects
-  // these are private and have no bodies defined because they should not be called
-  PatchingRelocIterator(const RelocIterator&);
-  void        operator=(const RelocIterator&);
-
- public:
-  PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
-    : RelocIterator(nm, begin, limit)                { prepass();  }
-
-  ~PatchingRelocIterator()                           { postpass(); }
-};
-
 #endif // SHARE_VM_CODE_RELOCINFO_HPP
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -152,12 +152,9 @@
     if (card_num < _committed_max_card_num) {
       count = (uint) _card_counts[card_num];
       if (count < G1ConcRSHotCardLimit) {
-        _card_counts[card_num] += 1;
+        _card_counts[card_num] =
+          (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
       }
-      assert(_card_counts[card_num] <= G1ConcRSHotCardLimit,
-             err_msg("Refinement count overflow? "
-                     "new count: "UINT32_FORMAT,
-                     (uint) _card_counts[card_num]));
     }
   }
   return count;
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -47,7 +47,7 @@
     JavaThread* jt = (JavaThread*)thr;
     jt->satb_mark_queue().enqueue(pre_val);
   } else {
-    MutexLocker x(Shared_SATB_Q_lock);
+    MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
     JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
   }
 }
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -798,7 +798,7 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (obj->is_oop()) {
         Klass* klass = obj->klass();
-        if (!klass->is_metadata()) {
+        if (!klass->is_metaspace_object()) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                  "not metadata", klass, obj);
           *failures = true;
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -70,6 +70,17 @@
   _virtual_space = vs;
 }
 
+void ASPSOldGen::initialize_work(const char* perf_data_name, int level) {
+
+  PSOldGen::initialize_work(perf_data_name, level);
+
+  // The old gen can grow to gen_size_limit().  _reserve reflects only
+  // the current maximum that can be committed.
+  assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
+
+  initialize_performance_counters(perf_data_name, level);
+}
+
 void ASPSOldGen::reset_after_change() {
   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
                         (HeapWord*)virtual_space()->high_boundary());
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -50,6 +50,8 @@
   size_t max_gen_size()                 { return _reserved.byte_size(); }
   void set_gen_size_limit(size_t v)     { _gen_size_limit = v; }
 
+  virtual void initialize_work(const char* perf_data_name, int level);
+
   // After a shrink or expand reset the generation
   void reset_after_change();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -565,11 +565,9 @@
     if(new_start_aligned < new_end_for_commit) {
       MemRegion new_committed =
         MemRegion(new_start_aligned, new_end_for_commit);
-      if (!os::commit_memory((char*)new_committed.start(),
-                             new_committed.byte_size())) {
-        vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
-                              "card table expansion");
-      }
+      os::commit_memory_or_exit((char*)new_committed.start(),
+                                new_committed.byte_size(), !ExecMem,
+                                "card table expansion");
     }
     result = true;
   } else if (new_start_aligned > cur_committed.start()) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1250,14 +1250,13 @@
                   avg_promoted()->deviation());
     }
 
-    gclog_or_tty->print( "  avg_promoted_padded_avg: %f"
+    gclog_or_tty->print_cr( "  avg_promoted_padded_avg: %f"
                 "  avg_pretenured_padded_avg: %f"
                 "  tenuring_thresh: %d"
                 "  target_size: " SIZE_FORMAT,
                 avg_promoted()->padded_average(),
                 _avg_pretenured->padded_average(),
                 tenuring_threshold, target_size);
-    tty->cr();
   }
 
   set_survivor_size(target_size);
@@ -1279,7 +1278,7 @@
   avg_promoted()->sample(promoted + _avg_pretenured->padded_average());
 
   if (PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print(
+    gclog_or_tty->print_cr(
                   "AdaptiveSizePolicy::update_averages:"
                   "  survived: "  SIZE_FORMAT
                   "  promoted: "  SIZE_FORMAT
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -110,7 +110,7 @@
   virtual void initialize(ReservedSpace rs, size_t alignment,
                   const char* perf_data_name, int level);
   void initialize_virtual_space(ReservedSpace rs, size_t alignment);
-  void initialize_work(const char* perf_data_name, int level);
+  virtual void initialize_work(const char* perf_data_name, int level);
   virtual void initialize_performance_counters(const char* perf_data_name, int level);
 
   MemRegion reserved() const                { return _reserved; }
--- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,7 +101,8 @@
   }
 
   char* const base_addr = committed_high_addr();
-  bool result = special() || os::commit_memory(base_addr, bytes, alignment());
+  bool result = special() ||
+         os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
   if (result) {
     _committed_high_addr += bytes;
   }
@@ -154,7 +155,7 @@
   if (tmp_bytes > 0) {
     char* const commit_base = committed_high_addr();
     if (other_space->special() ||
-        os::commit_memory(commit_base, tmp_bytes, alignment())) {
+        os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
       // Reduce the reserved region in the other space.
       other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
                                 other_space->reserved_high_addr(),
@@ -269,7 +270,8 @@
   }
 
   char* const base_addr = committed_low_addr() - bytes;
-  bool result = special() || os::commit_memory(base_addr, bytes, alignment());
+  bool result = special() ||
+         os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
   if (result) {
     _committed_low_addr -= bytes;
   }
@@ -322,7 +324,7 @@
   if (tmp_bytes > 0) {
     char* const commit_base = committed_low_addr() - tmp_bytes;
     if (other_space->special() ||
-        os::commit_memory(commit_base, tmp_bytes, alignment())) {
+        os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
       // Reduce the reserved region in the other space.
       other_space->set_reserved(other_space->reserved_low_addr(),
                                 other_space->reserved_high_addr() - tmp_bytes,
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,6 +102,9 @@
     java_lang_math_pow,                                         // implementation of java.lang.Math.pow   (x,y)
     java_lang_math_exp,                                         // implementation of java.lang.Math.exp   (x)
     java_lang_ref_reference_get,                                // implementation of java.lang.ref.Reference.get()
+    java_util_zip_CRC32_update,                                 // implementation of java.util.zip.CRC32.update()
+    java_util_zip_CRC32_updateBytes,                            // implementation of java.util.zip.CRC32.updateBytes()
+    java_util_zip_CRC32_updateByteBuffer,                       // implementation of java.util.zip.CRC32.updateByteBuffer()
     number_of_method_entries,
     invalid = -1
   };
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -481,9 +481,9 @@
     // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
     // switched off because of the wrong classes.
     if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
-      assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
+      assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
     } else {
-      const int extra_stack_entries = Method::extra_stack_entries_for_indy;
+      const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
       assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
                                                                                                + 1), "bad stack limit");
     }
@@ -1581,7 +1581,7 @@
 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra)                                \
       {                                                                               \
           ARRAY_INTRO(-2);                                                            \
-          extra;                                                                      \
+          (void)extra;                                                                \
           SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
                            -2);                                                       \
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                                      \
@@ -1592,8 +1592,8 @@
       {                                                                                    \
           ARRAY_INTRO(-2);                                                                 \
           SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
-          extra;                                                                           \
-          UPDATE_PC_AND_CONTINUE(1);                                            \
+          (void)extra;                                                                     \
+          UPDATE_PC_AND_CONTINUE(1);                                                       \
       }
 
       CASE(_iaload):
@@ -1617,7 +1617,7 @@
 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra)                            \
       {                                                                              \
           ARRAY_INTRO(-3);                                                           \
-          extra;                                                                     \
+          (void)extra;                                                               \
           *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);                                     \
       }
@@ -1626,7 +1626,7 @@
 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra)                                    \
       {                                                                              \
           ARRAY_INTRO(-4);                                                           \
-          extra;                                                                     \
+          (void)extra;                                                               \
           *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4);                                     \
       }
@@ -2233,7 +2233,7 @@
         }
 
         Method* method = cache->f1_as_method();
-        VERIFY_OOP(method);
+        if (VerifyOops) method->verify();
 
         if (cache->has_appendix()) {
           ConstantPool* constants = METHOD->constants();
@@ -2265,8 +2265,7 @@
         }
 
         Method* method = cache->f1_as_method();
-
-        VERIFY_OOP(method);
+        if (VerifyOops) method->verify();
 
         if (cache->has_appendix()) {
           ConstantPool* constants = METHOD->constants();
--- a/src/share/vm/interpreter/interpreter.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/interpreter/interpreter.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -195,6 +195,17 @@
     return kind;
   }
 
+#ifndef CC_INTERP
+  if (UseCRC32Intrinsics && m->is_native()) {
+    // Use optimized stub code for CRC32 native methods.
+    switch (m->intrinsic_id()) {
+      case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
+      case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
+      case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
+    }
+  }
+#endif
+
   // Native method?
   // Note: This test must come _before_ the test for intrinsic
   //       methods. See also comments below.
@@ -297,6 +308,9 @@
     case java_lang_math_sqrt    : tty->print("java_lang_math_sqrt"    ); break;
     case java_lang_math_log     : tty->print("java_lang_math_log"     ); break;
     case java_lang_math_log10   : tty->print("java_lang_math_log10"   ); break;
+    case java_util_zip_CRC32_update           : tty->print("java_util_zip_CRC32_update"); break;
+    case java_util_zip_CRC32_updateBytes      : tty->print("java_util_zip_CRC32_updateBytes"); break;
+    case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
     default:
       if (kind >= method_handle_invoke_FIRST &&
           kind <= method_handle_invoke_LAST) {
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -373,6 +373,12 @@
   method_entry(java_lang_math_pow  )
   method_entry(java_lang_ref_reference_get)
 
+  if (UseCRC32Intrinsics) {
+    method_entry(java_util_zip_CRC32_update)
+    method_entry(java_util_zip_CRC32_updateBytes)
+    method_entry(java_util_zip_CRC32_updateByteBuffer)
+  }
+
   initialize_method_handle_entries();
 
   // all native method kinds (must be one contiguous block)
--- a/src/share/vm/memory/allocation.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/allocation.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -71,13 +71,6 @@
   return MetaspaceShared::is_in_shared_space(this);
 }
 
-bool MetaspaceObj::is_metadata() const {
-  // GC Verify checks use this in guarantees.
-  // TODO: either replace them with is_metaspace_object() or remove them.
-  // is_metaspace_object() is slower than this test.  This test doesn't
-  // seem very useful for metaspace objects anymore though.
-  return !Universe::heap()->is_in_reserved(this);
-}
 
 bool MetaspaceObj::is_metaspace_object() const {
   return Metaspace::contains((void*)this);
--- a/src/share/vm/memory/allocation.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/allocation.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -264,7 +264,6 @@
 
 class MetaspaceObj {
  public:
-  bool is_metadata() const;
   bool is_metaspace_object() const;  // more specific test but slower
   bool is_shared() const;
   void print_address_on(outputStream* st) const;  // nonvirtual address printing
@@ -643,8 +642,15 @@
 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
   (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
 
+#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
+  (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
+
 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
-  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
+  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))
+
+#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
+  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
+                                    (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
 
 #define FREE_RESOURCE_ARRAY(type, old, size)\
   resource_free_bytes((char*)(old), (size) * sizeof(type))
@@ -655,28 +661,40 @@
 #define NEW_RESOURCE_OBJ(type)\
   NEW_RESOURCE_ARRAY(type, 1)
 
+#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
+  NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
+
+#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
+  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
+
+#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
+  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
+
 #define NEW_C_HEAP_ARRAY(type, size, memflags)\
   (type*) (AllocateHeap((size) * sizeof(type), memflags))
 
+#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
+  NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
+
+#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
+  NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+
 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
   (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
 
+#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
+   (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
+
 #define FREE_C_HEAP_ARRAY(type, old, memflags) \
   FreeHeap((char*)(old), memflags)
 
-#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
-  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
-
-#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
-  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
-
-#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)         \
-  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
-
 // allocate type in heap without calling ctor
 #define NEW_C_HEAP_OBJ(type, memflags)\
   NEW_C_HEAP_ARRAY(type, 1, memflags)
 
+#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
+  NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
+
 // deallocate obj of type in heap without calling dtor
 #define FREE_C_HEAP_OBJ(objname, memflags)\
   FreeHeap((char*)objname, memflags);
@@ -721,13 +739,21 @@
 // is set so that we always use malloc except for Solaris where we set the
 // limit to get mapped memory.
 template <class E, MEMFLAGS F>
-class ArrayAllocator : StackObj {
+class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
   char* _addr;
   bool _use_malloc;
   size_t _size;
+  bool _free_in_destructor;
  public:
-  ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
-  ~ArrayAllocator() { free(); }
+  ArrayAllocator(bool free_in_destructor = true) :
+    _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
+
+  ~ArrayAllocator() {
+    if (_free_in_destructor) {
+      free();
+    }
+  }
+
   E* allocate(size_t length);
   void free();
 };
--- a/src/share/vm/memory/allocation.inline.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/allocation.inline.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -146,10 +146,7 @@
     vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
   }
 
-  bool success = os::commit_memory(_addr, _size, false /* executable */);
-  if (!success) {
-    vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)");
-  }
+  os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
 
   return (E*)_addr;
 }
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -110,11 +110,8 @@
   jbyte* guard_card = &_byte_map[_guard_index];
   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
-  if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
-    // Do better than this for Merlin
-    vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
-  }
-
+  os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
+                            !ExecMem, "card table last card");
   *guard_card = last_card;
 
    _lowest_non_clean =
@@ -312,12 +309,9 @@
         MemRegion(cur_committed.end(), new_end_for_commit);
 
       assert(!new_committed.is_empty(), "Region should not be empty here");
-      if (!os::commit_memory((char*)new_committed.start(),
-                             new_committed.byte_size(), _page_size)) {
-        // Do better than this for Merlin
-        vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
-                "card table expansion");
-      }
+      os::commit_memory_or_exit((char*)new_committed.start(),
+                                new_committed.byte_size(), _page_size,
+                                !ExecMem, "card table expansion");
     // Use new_end_aligned (as opposed to new_end_for_commit) because
     // the cur_committed region may include the guard region.
     } else if (new_end_aligned < cur_committed.end()) {
@@ -418,7 +412,7 @@
   }
   // Touch the last card of the covered region to show that it
   // is committed (or SEGV).
-  debug_only(*byte_for(_covered[ind].last());)
+  debug_only((void) (*byte_for(_covered[ind].last()));)
   debug_only(verify_guard();)
 }
 
--- a/src/share/vm/memory/filemap.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/filemap.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -549,3 +549,13 @@
 
   return false;
 }
+
+void FileMapInfo::print_shared_spaces() {
+  gclog_or_tty->print_cr("Shared Spaces:");
+  for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+    struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+    gclog_or_tty->print("  %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
+                        shared_region_name[i],
+                        si->_base, si->_base + si->_used);
+  }
+}
--- a/src/share/vm/memory/filemap.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/filemap.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -149,6 +149,7 @@
 
   // Return true if given address is in the mapped shared space.
   bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
+  void print_shared_spaces() NOT_CDS_RETURN;
 };
 
 #endif // SHARE_VM_MEMORY_FILEMAP_HPP
--- a/src/share/vm/memory/heapInspection.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/heapInspection.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -157,7 +157,6 @@
 }
 
 uint KlassInfoTable::hash(const Klass* p) {
-  assert(p->is_metadata(), "all klasses are metadata");
   return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
 }
 
--- a/src/share/vm/memory/metaspace.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/metaspace.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -70,7 +70,7 @@
   SpecializedChunk = 128,
   ClassSmallChunk = 256,
   SmallChunk = 512,
-  ClassMediumChunk = 1 * K,
+  ClassMediumChunk = 4 * K,
   MediumChunk = 8 * K,
   HumongousChunkGranularity = 8
 };
@@ -580,7 +580,6 @@
   // Number of small chunks to allocate to a manager
   // If class space manager, small chunks are unlimited
   static uint const _small_chunk_limit;
-  bool has_small_chunk_limit() { return !vs_list()->is_class(); }
 
   // Sum of all space in allocated chunks
   size_t _allocated_blocks_words;
@@ -1298,13 +1297,18 @@
 
 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
 
-  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
   // If the user wants a limit, impose one.
-  size_t max_metaspace_size_bytes = MaxMetaspaceSize;
-  size_t metaspace_size_bytes = MetaspaceSize;
-  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
-      MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
-    return false;
+  // The reason for someone using this flag is to limit reserved space.  So
+  // for non-class virtual space, compare against virtual spaces that are reserved.
+  // For class virtual space, we only compare against the committed space, not
+  // reserved space, because this is a larger space prereserved for compressed
+  // class pointers.
+  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
+    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
+              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
+    if (real_allocated >= MaxMetaspaceSize) {
+      return false;
+    }
   }
 
   // Class virtual space should always be expanded.  Call GC for the other
@@ -1318,11 +1322,12 @@
   }
 
 
-
   // If the capacity is below the minimum capacity, allow the
   // expansion.  Also set the high-water-mark (capacity_until_GC)
   // to that minimum capacity so that a GC will not be induced
   // until that minimum capacity is exceeded.
+  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
+  size_t metaspace_size_bytes = MetaspaceSize;
   if (committed_capacity_bytes < metaspace_size_bytes ||
       capacity_until_GC() == 0) {
     set_capacity_until_GC(metaspace_size_bytes);
@@ -1556,19 +1561,7 @@
 
 // ChunkManager methods
 
-// Verification of _free_chunks_total and _free_chunks_count does not
-// work with the CMS collector because its use of additional locks
-// complicate the mutex deadlock detection but it can still be useful
-// for detecting errors in the chunk accounting with other collectors.
-
 size_t ChunkManager::free_chunks_total() {
-#ifdef ASSERT
-  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
-    MutexLockerEx cl(SpaceManager::expand_lock(),
-                     Mutex::_no_safepoint_check_flag);
-    slow_locked_verify_free_chunks_total();
-  }
-#endif
   return _free_chunks_total;
 }
 
@@ -1866,13 +1859,11 @@
   Metachunk* chunk = chunks_in_use(index);
   // Count the free space in all the chunk but not the
   // current chunk from which allocations are still being done.
-  if (chunk != NULL) {
-    Metachunk* prev = chunk;
-    while (chunk != NULL && chunk != current_chunk()) {
+  while (chunk != NULL) {
+    if (chunk != current_chunk()) {
       result += chunk->free_word_size();
-      prev = chunk;
-      chunk = chunk->next();
     }
+    chunk = chunk->next();
   }
   return result;
 }
@@ -1961,8 +1952,7 @@
   // chunks will be allocated.
   size_t chunk_word_size;
   if (chunks_in_use(MediumIndex) == NULL &&
-      (!has_small_chunk_limit() ||
-       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
+      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
     chunk_word_size = (size_t) small_chunk_size();
     if (word_size + Metachunk::overhead() > small_chunk_size()) {
       chunk_word_size = medium_chunk_size();
@@ -2608,14 +2598,14 @@
                         "->" SIZE_FORMAT
                         "("  SIZE_FORMAT ")",
                         prev_metadata_used,
-                        allocated_capacity_bytes(),
+                        allocated_used_bytes(),
                         reserved_in_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
                         "("  SIZE_FORMAT "K)",
                         prev_metadata_used / K,
-                        allocated_capacity_bytes() / K,
+                        allocated_used_bytes() / K,
                         reserved_in_bytes()/ K);
   }
 
@@ -2671,10 +2661,10 @@
 // Print total fragmentation for class and data metaspaces separately
 void MetaspaceAux::print_waste(outputStream* out) {
 
-  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
-  size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
-  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
-  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
+  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
+  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
+  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
+  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
 
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
@@ -2686,8 +2676,7 @@
       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
-      large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
-      large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
+      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
 
       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
@@ -2695,20 +2684,23 @@
       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
-      cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
-      cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
+      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
     }
   }
   out->print_cr("Total fragmentation waste (words) doesn't count free space");
   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
-                        SIZE_FORMAT " medium(s) " SIZE_FORMAT,
+                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
+                        "large count " SIZE_FORMAT,
              specialized_count, specialized_waste, small_count,
-             small_waste, medium_count, medium_waste);
+             small_waste, medium_count, medium_waste, humongous_count);
   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
-                           SIZE_FORMAT " small(s) " SIZE_FORMAT,
+                           SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
+                           SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
+                           "large count " SIZE_FORMAT,
              cls_specialized_count, cls_specialized_waste,
-             cls_small_count, cls_small_waste);
+             cls_small_count, cls_small_waste,
+             cls_medium_count, cls_medium_waste, cls_humongous_count);
 }
 
 // Dump global metaspace things from the end of ClassLoaderDataGraph
@@ -3049,18 +3041,24 @@
       if (Verbose && TraceMetadataChunkAllocation) {
         gclog_or_tty->print_cr("Metaspace allocation failed for size "
           SIZE_FORMAT, word_size);
-        if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
+        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
         MetaspaceAux::dump(gclog_or_tty);
       }
       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
-      report_java_out_of_memory("Metadata space");
+      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
+                                                         "Metadata space";
+      report_java_out_of_memory(space_string);
 
       if (JvmtiExport::should_post_resource_exhausted()) {
         JvmtiExport::post_resource_exhausted(
             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
-            "Metadata space");
+            space_string);
       }
-      THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
+      if (mdtype == ClassType) {
+        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
+      } else {
+        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
+      }
     }
   }
   return Metablock::initialize(result, word_size);
--- a/src/share/vm/memory/metaspaceShared.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/metaspaceShared.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -826,35 +826,15 @@
   bool reading() const { return true; }
 };
 
-
-// Save bounds of shared spaces mapped in.
-static char* _ro_base = NULL;
-static char* _rw_base = NULL;
-static char* _md_base = NULL;
-static char* _mc_base = NULL;
-
 // Return true if given address is in the mapped shared space.
 bool MetaspaceShared::is_in_shared_space(const void* p) {
-  if (_ro_base == NULL || _rw_base == NULL) {
-    return false;
-  } else {
-    return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
-            (p >= _rw_base && p < (_rw_base + SharedReadWriteSize)));
-  }
+  return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
 }
 
 void MetaspaceShared::print_shared_spaces() {
-  gclog_or_tty->print_cr("Shared Spaces:");
-  gclog_or_tty->print("  read-only " INTPTR_FORMAT "-" INTPTR_FORMAT,
-    _ro_base, _ro_base + SharedReadOnlySize);
-  gclog_or_tty->print("  read-write " INTPTR_FORMAT "-" INTPTR_FORMAT,
-    _rw_base, _rw_base + SharedReadWriteSize);
-  gclog_or_tty->cr();
-  gclog_or_tty->print("  misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT,
-    _md_base, _md_base + SharedMiscDataSize);
-  gclog_or_tty->print("  misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT,
-    _mc_base, _mc_base + SharedMiscCodeSize);
-  gclog_or_tty->cr();
+  if (UseSharedSpaces) {
+    FileMapInfo::current_info()->print_shared_spaces();
+  }
 }
 
 
@@ -874,6 +854,11 @@
 
   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 
+  char* _ro_base = NULL;
+  char* _rw_base = NULL;
+  char* _md_base = NULL;
+  char* _mc_base = NULL;
+
   // Map each shared region
   if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
       (_rw_base = mapinfo->map_region(rw)) != NULL &&
--- a/src/share/vm/memory/referenceProcessorStats.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/referenceProcessorStats.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/sharedHeap.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/sharedHeap.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,6 @@
   SH_PS_SystemDictionary_oops_do,
   SH_PS_ClassLoaderDataGraph_oops_do,
   SH_PS_jvmti_oops_do,
-  SH_PS_StringTable_oops_do,
   SH_PS_CodeCache_oops_do,
   // Leave this one last.
   SH_PS_NumElements
@@ -127,6 +126,8 @@
 {
   if (_active) {
     outer->change_strong_roots_parity();
+    // Zero the claimed high water mark in the StringTable
+    StringTable::clear_parallel_claimed_index();
   }
 }
 
@@ -154,14 +155,16 @@
   // Global (strong) JNI handles
   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
     JNIHandles::oops_do(roots);
+
   // All threads execute this; the individual threads are task groups.
   CLDToOopClosure roots_from_clds(roots);
   CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
-  if (ParallelGCThreads > 0) {
-    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
+  if (CollectedHeap::use_parallel_gc_threads()) {
+    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
   } else {
     Threads::oops_do(roots, roots_from_clds_p, code_roots);
   }
+
   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
     ObjectSynchronizer::oops_do(roots);
   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
@@ -189,8 +192,12 @@
     }
   }
 
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
-    if (so & SO_Strings) {
+  // All threads execute the following. A specific chunk of buckets
+  // from the StringTable are the individual tasks.
+  if (so & SO_Strings) {
+    if (CollectedHeap::use_parallel_gc_threads()) {
+      StringTable::possibly_parallel_oops_do(roots);
+    } else {
       StringTable::oops_do(roots);
     }
   }
--- a/src/share/vm/memory/universe.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/universe.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -111,7 +111,8 @@
 LatestMethodOopCache* Universe::_pd_implies_cache         = NULL;
 ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
 oop Universe::_out_of_memory_error_java_heap          = NULL;
-oop Universe::_out_of_memory_error_perm_gen           = NULL;
+oop Universe::_out_of_memory_error_metaspace          = NULL;
+oop Universe::_out_of_memory_error_class_metaspace    = NULL;
 oop Universe::_out_of_memory_error_array_size         = NULL;
 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
@@ -180,7 +181,8 @@
   f->do_oop((oop*)&_the_null_string);
   f->do_oop((oop*)&_the_min_jint_string);
   f->do_oop((oop*)&_out_of_memory_error_java_heap);
-  f->do_oop((oop*)&_out_of_memory_error_perm_gen);
+  f->do_oop((oop*)&_out_of_memory_error_metaspace);
+  f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
   f->do_oop((oop*)&_out_of_memory_error_array_size);
   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
@@ -531,7 +533,9 @@
   if (vt) vt->initialize_vtable(false, CHECK);
   if (ko->oop_is_instance()) {
     InstanceKlass* ik = (InstanceKlass*)ko;
-    for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
+    for (KlassHandle s_h(THREAD, ik->subklass());
+         s_h() != NULL;
+         s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
       reinitialize_vtable_of(s_h, CHECK);
     }
   }
@@ -561,7 +565,8 @@
   // a potential loop which could happen if an out of memory occurs when attempting
   // to allocate the backtrace.
   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
-          (throwable() != Universe::_out_of_memory_error_perm_gen)  &&
+          (throwable() != Universe::_out_of_memory_error_metaspace)  &&
+          (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_array_size) &&
           (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
 }
@@ -1012,7 +1017,8 @@
     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
     k_h = instanceKlassHandle(THREAD, k);
     Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
-    Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false);
+    Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
+    Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
     Universe::_out_of_memory_error_gc_overhead_limit =
       k_h->allocate_instance(CHECK_false);
@@ -1045,7 +1051,9 @@
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
 
     msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg());
+    java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
+    msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
+    java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
 
     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
@@ -1145,6 +1153,7 @@
 
   // Initialize performance counters for metaspaces
   MetaspaceCounters::initialize_performance_counters();
+  MemoryService::add_metaspace_memory_pools();
 
   GC_locker::unlock();  // allow gc after bootstrapping
 
--- a/src/share/vm/memory/universe.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/memory/universe.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -178,10 +178,12 @@
   static LatestMethodOopCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
   static LatestMethodOopCache* _pd_implies_cache;         // method for checking protection domain attributes
   static ActiveMethodOopsCache* _reflect_invoke_cache;    // method for security checks
-  static oop          _out_of_memory_error_java_heap; // preallocated error object (no backtrace)
-  static oop          _out_of_memory_error_perm_gen;  // preallocated error object (no backtrace)
-  static oop          _out_of_memory_error_array_size;// preallocated error object (no backtrace)
-  static oop          _out_of_memory_error_gc_overhead_limit; // preallocated error object (no backtrace)
+  // preallocated error objects (no backtrace)
+  static oop          _out_of_memory_error_java_heap;
+  static oop          _out_of_memory_error_metaspace;
+  static oop          _out_of_memory_error_class_metaspace;
+  static oop          _out_of_memory_error_array_size;
+  static oop          _out_of_memory_error_gc_overhead_limit;
 
   static Array<int>*       _the_empty_int_array;    // Canonicalized int array
   static Array<u2>*        _the_empty_short_array;  // Canonicalized short array
@@ -352,7 +354,8 @@
   // may or may not have a backtrace. If error has a backtrace then the stack trace is already
   // filled in.
   static oop out_of_memory_error_java_heap()          { return gen_out_of_memory_error(_out_of_memory_error_java_heap);  }
-  static oop out_of_memory_error_perm_gen()           { return gen_out_of_memory_error(_out_of_memory_error_perm_gen);   }
+  static oop out_of_memory_error_metaspace()          { return gen_out_of_memory_error(_out_of_memory_error_metaspace);   }
+  static oop out_of_memory_error_class_metaspace()    { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace);   }
   static oop out_of_memory_error_array_size()         { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
   static oop out_of_memory_error_gc_overhead_limit()  { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit);  }
 
--- a/src/share/vm/oops/arrayKlass.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/arrayKlass.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -221,8 +221,8 @@
 
 // Verification
 
-void ArrayKlass::verify_on(outputStream* st) {
-  Klass::verify_on(st);
+void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
+  Klass::verify_on(st, check_dictionary);
 
   if (component_mirror() != NULL) {
     guarantee(component_mirror()->klass() != NULL, "should have a class");
--- a/src/share/vm/oops/arrayKlass.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/arrayKlass.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -152,7 +152,7 @@
   void oop_print_on(oop obj, outputStream* st);
 
   // Verification
-  void verify_on(outputStream* st);
+  void verify_on(outputStream* st, bool check_dictionary);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/compiledICHolder.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/compiledICHolder.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -48,8 +48,6 @@
 // Verification
 
 void CompiledICHolder::verify_on(outputStream* st) {
-  guarantee(holder_method()->is_metadata(),   "should be in metaspace");
   guarantee(holder_method()->is_method(), "should be method");
-  guarantee(holder_klass()->is_metadata(),    "should be in metaspace");
   guarantee(holder_klass()->is_klass(),   "should be klass");
 }
--- a/src/share/vm/oops/constMethod.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/constMethod.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -440,7 +440,6 @@
 
 void ConstMethod::verify_on(outputStream* st) {
   guarantee(is_constMethod(), "object must be constMethod");
-  guarantee(is_metadata(), err_msg("Should be metadata " PTR_FORMAT, this));
 
   // Verification can occur during oop construction before the method or
   // other fields have been initialized.
--- a/src/share/vm/oops/constantPool.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/constantPool.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -2095,12 +2095,10 @@
     CPSlot entry = slot_at(i);
     if (tag.is_klass()) {
       if (entry.is_resolved()) {
-        guarantee(entry.get_klass()->is_metadata(), "should be metadata");
         guarantee(entry.get_klass()->is_klass(),    "should be klass");
       }
     } else if (tag.is_unresolved_klass()) {
       if (entry.is_resolved()) {
-        guarantee(entry.get_klass()->is_metadata(), "should be metadata");
         guarantee(entry.get_klass()->is_klass(),    "should be klass");
       }
     } else if (tag.is_symbol()) {
@@ -2112,13 +2110,11 @@
   if (cache() != NULL) {
     // Note: cache() can be NULL before a class is completely setup or
     // in temporary constant pools used during constant pool merging
-    guarantee(cache()->is_metadata(),          "should be metadata");
     guarantee(cache()->is_constantPoolCache(), "should be constant pool cache");
   }
   if (pool_holder() != NULL) {
     // Note: pool_holder() can be NULL in temporary constant pools
     // used during constant pool merging
-    guarantee(pool_holder()->is_metadata(), "should be metadata");
     guarantee(pool_holder()->is_klass(),    "should be klass");
   }
 }
--- a/src/share/vm/oops/instanceKlass.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/instanceKlass.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -3088,27 +3088,26 @@
   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
 };
 
-void InstanceKlass::verify_on(outputStream* st) {
-  Klass::verify_on(st);
-  Thread *thread = Thread::current();
-
+void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
 #ifndef PRODUCT
-  // Avoid redundant verifies
+  // Avoid redundant verifies, this really should be in product.
   if (_verify_count == Universe::verify_count()) return;
   _verify_count = Universe::verify_count();
 #endif
-  // Verify that klass is present in SystemDictionary
-  if (is_loaded() && !is_anonymous()) {
+
+  // Verify Klass
+  Klass::verify_on(st, check_dictionary);
+
+  // Verify that klass is present in SystemDictionary if not already
+  // verifying the SystemDictionary.
+  if (is_loaded() && !is_anonymous() && check_dictionary) {
     Symbol* h_name = name();
     SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
   }
 
-  // Verify static fields
-  VerifyFieldClosure blk;
-
   // Verify vtables
   if (is_linked()) {
-    ResourceMark rm(thread);
+    ResourceMark rm;
     // $$$ This used to be done only for m/s collections.  Doing it
     // always seemed a valid generalization.  (DLD -- 6/00)
     vtable()->verify(st);
@@ -3116,7 +3115,6 @@
 
   // Verify first subklass
   if (subklass_oop() != NULL) {
-    guarantee(subklass_oop()->is_metadata(), "should be in metaspace");
     guarantee(subklass_oop()->is_klass(), "should be klass");
   }
 
@@ -3128,7 +3126,6 @@
       fatal(err_msg("subclass points to itself " PTR_FORMAT, sib));
     }
 
-    guarantee(sib->is_metadata(), "should be in metaspace");
     guarantee(sib->is_klass(), "should be klass");
     guarantee(sib->super() == super, "siblings should have same superklass");
   }
@@ -3164,7 +3161,6 @@
   if (methods() != NULL) {
     Array<Method*>* methods = this->methods();
     for (int j = 0; j < methods->length(); j++) {
-      guarantee(methods->at(j)->is_metadata(), "should be in metaspace");
       guarantee(methods->at(j)->is_method(), "non-method in methods array");
     }
     for (int j = 0; j < methods->length() - 1; j++) {
@@ -3202,16 +3198,13 @@
 
   // Verify other fields
   if (array_klasses() != NULL) {
-    guarantee(array_klasses()->is_metadata(), "should be in metaspace");
     guarantee(array_klasses()->is_klass(), "should be klass");
   }
   if (constants() != NULL) {
-    guarantee(constants()->is_metadata(), "should be in metaspace");
     guarantee(constants()->is_constantPool(), "should be constant pool");
   }
   const Klass* host = host_klass();
   if (host != NULL) {
-    guarantee(host->is_metadata(), "should be in metaspace");
     guarantee(host->is_klass(), "should be klass");
   }
 }
--- a/src/share/vm/oops/instanceKlass.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/instanceKlass.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1050,7 +1050,7 @@
   const char* internal_name() const;
 
   // Verification
-  void verify_on(outputStream* st);
+  void verify_on(outputStream* st, bool check_dictionary);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/klass.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/klass.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -377,7 +377,6 @@
 }
 
 bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
-  assert(is_metadata(), "p is not meta-data");
   assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
 
 #ifdef ASSERT
@@ -648,27 +647,24 @@
 
 // Verification
 
-void Klass::verify_on(outputStream* st) {
-  guarantee(!Universe::heap()->is_in_reserved(this), "Shouldn't be");
-  guarantee(this->is_metadata(), "should be in metaspace");
+void Klass::verify_on(outputStream* st, bool check_dictionary) {
 
+  // This can be expensive, but it is worth checking that this klass is actually
+  // in the CLD graph but not in production.
   assert(ClassLoaderDataGraph::contains((address)this), "Should be");
 
   guarantee(this->is_klass(),"should be klass");
 
   if (super() != NULL) {
-    guarantee(super()->is_metadata(), "should be in metaspace");
     guarantee(super()->is_klass(), "should be klass");
   }
   if (secondary_super_cache() != NULL) {
     Klass* ko = secondary_super_cache();
-    guarantee(ko->is_metadata(), "should be in metaspace");
     guarantee(ko->is_klass(), "should be klass");
   }
   for ( uint i = 0; i < primary_super_limit(); i++ ) {
     Klass* ko = _primary_supers[i];
     if (ko != NULL) {
-      guarantee(ko->is_metadata(), "should be in metaspace");
       guarantee(ko->is_klass(), "should be klass");
     }
   }
@@ -680,7 +676,6 @@
 
 void Klass::oop_verify_on(oop obj, outputStream* st) {
   guarantee(obj->is_oop(),  "should be oop");
-  guarantee(obj->klass()->is_metadata(), "should not be in Java heap");
   guarantee(obj->klass()->is_klass(), "klass field is not a klass");
 }
 
--- a/src/share/vm/oops/klass.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/klass.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -703,8 +703,8 @@
   virtual const char* internal_name() const = 0;
 
   // Verification
-  virtual void verify_on(outputStream* st);
-  void verify() { verify_on(tty); }
+  virtual void verify_on(outputStream* st, bool check_dictionary);
+  void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
 
 #ifndef PRODUCT
   void verify_vtable_index(int index);
--- a/src/share/vm/oops/method.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/method.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -1969,14 +1969,9 @@
 
 void Method::verify_on(outputStream* st) {
   guarantee(is_method(), "object must be method");
-  guarantee(is_metadata(),  "should be metadata");
   guarantee(constants()->is_constantPool(), "should be constant pool");
-  guarantee(constants()->is_metadata(), "should be metadata");
   guarantee(constMethod()->is_constMethod(), "should be ConstMethod*");
-  guarantee(constMethod()->is_metadata(), "should be metadata");
   MethodData* md = method_data();
   guarantee(md == NULL ||
-      md->is_metadata(), "should be metadata");
-  guarantee(md == NULL ||
       md->is_methodData(), "should be method data");
 }
--- a/src/share/vm/oops/objArrayKlass.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/objArrayKlass.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -676,11 +676,9 @@
 
 // Verification
 
-void ObjArrayKlass::verify_on(outputStream* st) {
-  ArrayKlass::verify_on(st);
-  guarantee(element_klass()->is_metadata(), "should be in metaspace");
+void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
+  ArrayKlass::verify_on(st, check_dictionary);
   guarantee(element_klass()->is_klass(), "should be klass");
-  guarantee(bottom_klass()->is_metadata(), "should be in metaspace");
   guarantee(bottom_klass()->is_klass(), "should be klass");
   Klass* bk = bottom_klass();
   guarantee(bk->oop_is_instance() || bk->oop_is_typeArray(),  "invalid bottom klass");
--- a/src/share/vm/oops/objArrayKlass.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/objArrayKlass.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -151,7 +151,7 @@
   const char* internal_name() const;
 
   // Verification
-  void verify_on(outputStream* st);
+  void verify_on(outputStream* st, bool check_dictionary);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/symbol.cpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/symbol.cpp	Mon Jul 15 11:07:03 2013 +0100
@@ -32,7 +32,9 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 
-Symbol::Symbol(const u1* name, int length, int refcount) : _refcount(refcount), _length(length) {
+Symbol::Symbol(const u1* name, int length, int refcount) {
+  _refcount = refcount;
+  _length = length;
   _identity_hash = os::random();
   for (int i = 0; i < _length; i++) {
     byte_at_put(i, name[i]);
--- a/src/share/vm/oops/symbol.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/oops/symbol.hpp	Mon Jul 15 11:07:03 2013 +0100
@@ -27,6 +27,7 @@
 
 #include "utilities/utf8.hpp"
 #include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
 
 // A Symbol is a canonicalized string.
 // All Symbols reside in global SymbolTable and are reference counted.
@@ -101,14 +102,22 @@
 // type without virtual functions.
 class ClassLoaderData;
 
-class Symbol : public MetaspaceObj {
+// We separate the fields in SymbolBase from Symbol::_body so that
+// Symbol::size(int) can correctly calculate the space needed.
+class SymbolBase : public MetaspaceObj {
+ public:
+  ATOMIC_SHORT_PAIR(
+    volatile short _refcount,  // needs atomic operation
+    unsigned short _length     // number of UTF8 characters in the symbol (does not need atomic op)
+  );
+  int            _identity_hash;
+};
+
+class Symbol : private SymbolBase {
   friend class VMStructs;
   friend class SymbolTable;
   friend class MoveSymbols;
  private:
-  volatile int   _refcount;
-  int            _identity_hash;
-  unsigned short _length; // number of UTF8 characters in the symbol
   jbyte _body[1];
 
   enum {
@@ -117,7 +126,7 @@
   };
 
   static int size(int length) {
-    size_t sz = heap_word_size(sizeof(Symbol) + (length > 0 ? length - 1 : 0));
+    size_t sz = heap_word_size(sizeof(SymbolBase) + (length > 0 ? length : 0));
     return align_object_size(sz);
   }
 
--- a/src/share/vm/opto/c2_globals.hpp	Thu Jul 11 12:59:03 2013 -0400
+++ b/src/share/vm/opto/c2_globals.hpp	Mon Ju