changeset 7143:424bf1a57654

Merge
author lana
date Sat, 13 Nov 2010 18:40:37 -0800
parents fc99b9a4bee3 dc71e7fdd835
children 6df7c2b3dbea
files hotspot/src/os/linux/vm/objectMonitor_linux.cpp hotspot/src/os/linux/vm/objectMonitor_linux.hpp hotspot/src/os/linux/vm/objectMonitor_linux.inline.hpp hotspot/src/os/solaris/vm/objectMonitor_solaris.cpp hotspot/src/os/solaris/vm/objectMonitor_solaris.hpp hotspot/src/os/solaris/vm/objectMonitor_solaris.inline.hpp hotspot/src/os/windows/vm/objectMonitor_windows.cpp hotspot/src/os/windows/vm/objectMonitor_windows.hpp hotspot/src/os/windows/vm/objectMonitor_windows.inline.hpp jdk/src/share/classes/java/dyn/JavaMethodHandle.java jdk/src/share/classes/java/nio/channels/AsynchronousDatagramChannel.java jdk/src/share/classes/sun/java2d/pisces/LineSink.java jdk/src/share/classes/sun/nio/ch/SimpleAsynchronousDatagramChannelImpl.java jdk/test/java/nio/channels/AsynchronousDatagramChannel/Basic.java langtools/src/share/classes/com/sun/source/tree/AnnotatedTypeTree.java langtools/src/share/classes/com/sun/source/tree/DisjointTypeTree.java langtools/src/share/classes/com/sun/source/util/AbstractTypeProcessor.java langtools/test/tools/javac/T6985181.java langtools/test/tools/javac/diags/examples/TypeAnnotationsNotSupported.java langtools/test/tools/javac/treeannotests/AnnoTreeTests.java langtools/test/tools/javac/typeAnnotations/6967002/T6967002.java langtools/test/tools/javac/typeAnnotations/6967002/T6967002.out langtools/test/tools/javac/typeAnnotations/InnerClass.java langtools/test/tools/javac/typeAnnotations/MultipleTargets.java langtools/test/tools/javac/typeAnnotations/TypeParameterTarget.java langtools/test/tools/javac/typeAnnotations/TypeUseTarget.java langtools/test/tools/javac/typeAnnotations/attribution/Scopes.java langtools/test/tools/javac/typeAnnotations/classfile/DeadCode.java langtools/test/tools/javac/typeAnnotations/failures/AnnotationVersion.java langtools/test/tools/javac/typeAnnotations/failures/AnnotationVersion.out langtools/test/tools/javac/typeAnnotations/failures/IncompleteArray.java langtools/test/tools/javac/typeAnnotations/failures/IncompleteArray.out langtools/test/tools/javac/typeAnnotations/failures/IncompleteVararg.java langtools/test/tools/javac/typeAnnotations/failures/IncompleteVararg.out langtools/test/tools/javac/typeAnnotations/failures/IndexArray.java langtools/test/tools/javac/typeAnnotations/failures/IndexArray.out langtools/test/tools/javac/typeAnnotations/failures/LintCast.java langtools/test/tools/javac/typeAnnotations/failures/LintCast.out langtools/test/tools/javac/typeAnnotations/failures/OldArray.java langtools/test/tools/javac/typeAnnotations/failures/Scopes.java langtools/test/tools/javac/typeAnnotations/failures/Scopes.out langtools/test/tools/javac/typeAnnotations/failures/StaticFields.java langtools/test/tools/javac/typeAnnotations/failures/StaticFields.out langtools/test/tools/javac/typeAnnotations/failures/StaticMethods.java langtools/test/tools/javac/typeAnnotations/failures/StaticMethods.out langtools/test/tools/javac/typeAnnotations/failures/VoidGenericMethod.java langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/arrayclass/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/arrays/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/arrays/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/arrays/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/arrays/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/arrays/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/arrays/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/arrays/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/arrays/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/innertypeparams/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/newarray/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/newarray/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/newarray/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/newarray/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/newarray/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/newarray/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/newarray/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/newarray/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/parambounds/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/receiver/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/receiver/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/receiver/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/receiver/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/receiver/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/receiver/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/receiver/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/receiver/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/rest/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/rest/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/rest/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/rest/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/rest/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/rest/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/rest/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/rest/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/typeArgs/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/typeparams/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/DuplicateAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/DuplicateAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/DuplicateTypeAnnotation.java langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/DuplicateTypeAnnotation.out langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/InvalidLocation.java langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/InvalidLocation.out langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/MissingAnnotationValue.java langtools/test/tools/javac/typeAnnotations/failures/common/wildcards/MissingAnnotationValue.out langtools/test/tools/javac/typeAnnotations/failures/target/Constructor.java langtools/test/tools/javac/typeAnnotations/failures/target/Constructor.out langtools/test/tools/javac/typeAnnotations/failures/target/IncompleteArray.java langtools/test/tools/javac/typeAnnotations/failures/target/IncompleteArray.out langtools/test/tools/javac/typeAnnotations/failures/target/NotTypeParameter.java langtools/test/tools/javac/typeAnnotations/failures/target/NotTypeParameter.out langtools/test/tools/javac/typeAnnotations/failures/target/NotTypeUse.java langtools/test/tools/javac/typeAnnotations/failures/target/NotTypeUse.out langtools/test/tools/javac/typeAnnotations/failures/target/VoidMethod.java langtools/test/tools/javac/typeAnnotations/failures/target/VoidMethod.out langtools/test/tools/javac/typeAnnotations/newlocations/ClassExtends.java langtools/test/tools/javac/typeAnnotations/newlocations/ClassLiterals.java langtools/test/tools/javac/typeAnnotations/newlocations/ClassParameters.java langtools/test/tools/javac/typeAnnotations/newlocations/ConstructorTypeArgs.java langtools/test/tools/javac/typeAnnotations/newlocations/Expressions.java langtools/test/tools/javac/typeAnnotations/newlocations/Fields.java langtools/test/tools/javac/typeAnnotations/newlocations/LocalVariables.java langtools/test/tools/javac/typeAnnotations/newlocations/MethodReturnType.java langtools/test/tools/javac/typeAnnotations/newlocations/MethodTypeArgs.java langtools/test/tools/javac/typeAnnotations/newlocations/MethodTypeParameters.java langtools/test/tools/javac/typeAnnotations/newlocations/Parameters.java langtools/test/tools/javac/typeAnnotations/newlocations/Receivers.java langtools/test/tools/javac/typeAnnotations/newlocations/Throws.java langtools/test/tools/javac/typeAnnotations/newlocations/TypeCasts.java langtools/test/tools/javac/typeAnnotations/newlocations/TypeParameters.java langtools/test/tools/javac/typeAnnotations/newlocations/Wildcards.java langtools/test/tools/javap/typeAnnotations/ArrayClassLiterals.java langtools/test/tools/javap/typeAnnotations/ArrayClassLiterals2.java langtools/test/tools/javap/typeAnnotations/ClassLiterals.java langtools/test/tools/javap/typeAnnotations/JSR175Annotations.java langtools/test/tools/javap/typeAnnotations/NewArray.java langtools/test/tools/javap/typeAnnotations/Presence.java langtools/test/tools/javap/typeAnnotations/PresenceInner.java langtools/test/tools/javap/typeAnnotations/T6855990.java langtools/test/tools/javap/typeAnnotations/Visibility.java
diffstat 608 files changed, 31626 insertions(+), 19669 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Nov 04 15:32:01 2010 -0700
+++ b/.hgtags	Sat Nov 13 18:40:37 2010 -0800
@@ -91,3 +91,4 @@
 750c1ccb2f2d1ddfa95ab6c7f897fdab2f87f7e9 jdk7-b114
 9cb24917216bc68997154f6e9566c3de62acb2f4 jdk7-b115
 a4e6aa1f45ad23a6f083ed98d970b5006ea4d292 jdk7-b116
+228e73f288c543a8c34e2a54227103ae5649e6af jdk7-b117
--- a/.hgtags-top-repo	Thu Nov 04 15:32:01 2010 -0700
+++ b/.hgtags-top-repo	Sat Nov 13 18:40:37 2010 -0800
@@ -91,3 +91,4 @@
 27985a5c6e5268014d25d55886e0ecb96af4763d jdk7-b114
 e8ebdf41b9c01a26642848f4134f5504e8fb3233 jdk7-b115
 94e9a1bfba8b8d1fe0bfd43b88629b1f27b02a76 jdk7-b116
+7220e60b097fa027e922f1aeecdd330f3e37409f jdk7-b117
--- a/corba/.hgtags	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/.hgtags	Sat Nov 13 18:40:37 2010 -0800
@@ -91,3 +91,4 @@
 88fddb73c5c4a4b50c319cbae9380caf5172ab45 jdk7-b114
 da7561d479e0ddaa4650d8023ac0fc7294e014e3 jdk7-b115
 98c028de4301106f2285ac0e128a1bb9b4c24f5c jdk7-b116
+fa502e4834dac2176499cc1f44794d5dc32a11b9 jdk7-b117
--- a/corba/make/com/sun/corba/minclude/com_sun_corba_se_impl_io.jmk	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/make/com/sun/corba/minclude/com_sun_corba_se_impl_io.jmk	Sat Nov 13 18:40:37 2010 -0800
@@ -34,7 +34,7 @@
 	com/sun/corba/se/impl/io/ObjectStreamField.java \
 	com/sun/corba/se/impl/io/OptionalDataException.java \
 	com/sun/corba/se/impl/io/ValueHandlerImpl.java \
-	com/sun/corba/se/impl/io/IIOPInputStream.java \
+        com/sun/corba/se/impl/io/IIOPInputStream.java \
 	com/sun/corba/se/impl/io/IIOPOutputStream.java \
 	com/sun/corba/se/impl/io/TypeMismatchException.java \
 	com/sun/corba/se/impl/io/InputStreamHook.java \
--- a/corba/src/share/classes/com/sun/corba/se/impl/io/IIOPInputStream.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/impl/io/IIOPInputStream.java	Sat Nov 13 18:40:37 2010 -0800
@@ -2553,8 +2553,8 @@
             bridge.putObject( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetObjectField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
-                ObjectUtility.compactObjectToString( v )) ;
+                o.toString(),
+                v.toString() ) ;
         }
     }
 
@@ -2566,7 +2566,7 @@
             bridge.putBoolean( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetBooleanField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Boolean(v) ) ;
         }
     }
@@ -2579,7 +2579,7 @@
             bridge.putByte( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetByteField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Byte(v) ) ;
         }
     }
@@ -2592,7 +2592,7 @@
             bridge.putChar( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetCharField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Character(v) ) ;
         }
     }
@@ -2605,7 +2605,7 @@
             bridge.putShort( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetShortField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Short(v) ) ;
         }
     }
@@ -2618,7 +2618,7 @@
             bridge.putInt( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetIntField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Integer(v) ) ;
         }
     }
@@ -2631,7 +2631,7 @@
             bridge.putLong( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetLongField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Long(v) ) ;
         }
     }
@@ -2644,7 +2644,7 @@
             bridge.putFloat( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetFloatField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Float(v) ) ;
         }
     }
@@ -2657,7 +2657,7 @@
             bridge.putDouble( o, key, v ) ;
         } catch (Exception e) {
             throw utilWrapper.errorSetDoubleField( e, fieldName,
-                ObjectUtility.compactObjectToString( o ),
+                o.toString(),
                 new Double(v) ) ;
         }
     }
--- a/corba/src/share/classes/com/sun/corba/se/impl/io/ValueHandlerImpl.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/impl/io/ValueHandlerImpl.java	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,32 +32,22 @@
 package com.sun.corba.se.impl.io;
 
 import javax.rmi.CORBA.Util;
-import javax.rmi.PortableRemoteObject;
 
 import java.util.Hashtable;
-import java.util.Stack;
 import java.io.IOException;
-import java.util.EmptyStackException;
 
-import com.sun.corba.se.impl.util.Utility;
-import com.sun.corba.se.impl.io.IIOPInputStream;
-import com.sun.corba.se.impl.io.IIOPOutputStream;
 import com.sun.corba.se.impl.util.RepositoryId;
 import com.sun.corba.se.impl.util.Utility;
 
 import org.omg.CORBA.TCKind;
 
-import org.omg.CORBA.MARSHAL;
-import org.omg.CORBA.BAD_PARAM;
-import org.omg.CORBA.CompletionStatus;
 import org.omg.CORBA.portable.IndirectionException;
 import com.sun.org.omg.SendingContext.CodeBase;
 import com.sun.org.omg.SendingContext.CodeBaseHelper;
 
 import java.security.AccessController;
 import java.security.PrivilegedAction;
-
-import com.sun.corba.se.impl.io.IIOPInputStream.ActiveRecursionManager;
+import java.security.PrivilegedExceptionAction;
 
 import com.sun.corba.se.spi.logging.CORBALogDomains;
 import com.sun.corba.se.impl.logging.OMGSystemException;
@@ -809,65 +799,163 @@
         return "com.sun.corba.se.impl.io.IIOPOutputStream";
     }
 
-    private com.sun.corba.se.impl.io.IIOPOutputStream createOutputStream() {
-        return (com.sun.corba.se.impl.io.IIOPOutputStream)AccessController.doPrivileged(
-            new StreamFactory(getOutputStreamClassName()));
+   private IIOPOutputStream createOutputStream() {
+        final String name = getOutputStreamClassName();
+        try {
+             IIOPOutputStream stream = createOutputStreamBuiltIn(name);
+             if (stream != null) {
+                 return stream;
+             }
+             return createCustom(IIOPOutputStream.class, name);
+        } catch (Throwable t) {
+            // Throw exception under the carpet.
+            InternalError ie = new InternalError(
+                "Error loading " + name
+            );
+                ie.initCause(t);
+                throw ie;
+        }
+    }
+
+    /**
+     * Construct a built in implementation with priveleges.
+     * Returning null indicates a non-built is specified.
+     */
+    private IIOPOutputStream createOutputStreamBuiltIn(
+        final String name
+    ) throws Throwable {
+        try {
+            return AccessController.doPrivileged(
+                new PrivilegedExceptionAction<IIOPOutputStream>() {
+                    public IIOPOutputStream run() throws IOException {
+                        return createOutputStreamBuiltInNoPriv(name);
+                    }
+                }
+            );
+        } catch (java.security.PrivilegedActionException exc) {
+            throw exc.getCause();
+        }
+    }
+
+    /**
+     * Returning null indicates a non-built is specified.
+     */
+    private IIOPOutputStream createOutputStreamBuiltInNoPriv(
+        final String name
+    ) throws IOException {
+        return
+            name.equals(
+                IIOPOutputStream
+                    .class.getName()
+            ) ?
+            new IIOPOutputStream() :
+
+            name.equals(
+                com.sun.corba.se.impl.orbutil.IIOPOutputStream_1_3
+                    .class.getName()
+            ) ?
+            new com.sun.corba.se.impl.orbutil.IIOPOutputStream_1_3() :
+
+            name.equals(
+                com.sun.corba.se.impl.orbutil.IIOPOutputStream_1_3_1
+                    .class.getName()
+            ) ?
+            new com.sun.corba.se.impl.orbutil.IIOPOutputStream_1_3_1() :
+
+            null;
     }
 
     protected String getInputStreamClassName() {
         return "com.sun.corba.se.impl.io.IIOPInputStream";
     }
 
-    private com.sun.corba.se.impl.io.IIOPInputStream createInputStream() {
-        return (com.sun.corba.se.impl.io.IIOPInputStream)AccessController.doPrivileged(
-            new StreamFactory(getInputStreamClassName()));
+    private IIOPInputStream createInputStream() {
+        final String name = getInputStreamClassName();
+        try {
+             IIOPInputStream stream = createInputStreamBuiltIn(name);
+             if (stream != null) {
+                 return stream;
+             }
+             return createCustom(IIOPInputStream.class, name);
+        } catch (Throwable t) {
+            // Throw exception under the carpet.
+            InternalError ie = new InternalError(
+                "Error loading " + name
+            );
+                ie.initCause(t);
+                throw ie;
+        }
     }
 
     /**
-     * Instantiates a class of the given name using the system ClassLoader
-     * as part of a PrivilegedAction.
-     *
-     * It's private final so hopefully people can't grab it outside of
-     * this class.
-     *
-     * If you're worried that someone could subclass ValueHandlerImpl,
-     * install his own streams, and snoop what's on the wire:
-     * Someone can do that only if he's allowed to use the feature
-     * of installing his own javax.rmi.CORBA.Util delegate (via a
-     * JVM property or orb.properties file, read the first time the
-     * Util class is used).  If he can do that, he can snoop
-     * anything on the wire, anyway, without abusing the
-     * StreamFactory class.
+     * Construct a built in implementation with priveleges.
+     * Returning null indicates a non-built is specified.
      */
-    private static final class StreamFactory implements PrivilegedAction {
-        private String className;
+     private IIOPInputStream createInputStreamBuiltIn(
+         final String name
+     ) throws Throwable {
+         try {
+             return AccessController.doPrivileged(
+                 new PrivilegedExceptionAction<IIOPInputStream>() {
+                     public IIOPInputStream run() throws IOException {
+                         return createInputStreamBuiltInNoPriv(name);
+                     }
+                 }
+             );
+         } catch (java.security.PrivilegedActionException exc) {
+             throw exc.getCause();
+         }
+     }
 
-        public StreamFactory (String _className) {
-            className = _className;
-        }
+     /**
+      * Returning null indicates a non-built is specified.
+      */
+     private IIOPInputStream createInputStreamBuiltInNoPriv(
+         final String name
+     ) throws IOException {
+         return
+             name.equals(
+                 IIOPInputStream
+                     .class.getName()
+             ) ?
+             new IIOPInputStream() :
 
-        public Object run() {
-            try {
-                // Note: We must use the system ClassLoader here
-                // since we want to load classes outside of the
-                // core JDK when running J2EE Pure ORB and
-                // talking to Kestrel.
+             name.equals(
+                 com.sun.corba.se.impl.orbutil.IIOPInputStream_1_3
+                     .class.getName()
+             ) ?
+             new com.sun.corba.se.impl.orbutil.IIOPInputStream_1_3() :
+
+             name.equals(
+                 com.sun.corba.se.impl.orbutil.IIOPInputStream_1_3_1
+                     .class.getName()
+             ) ?
+             new com.sun.corba.se.impl.orbutil.IIOPInputStream_1_3_1() :
+
+             null;
+     }
+
+     /**
+      * Create a custom implementation without privileges.
+      */
+     private <T> T createCustom(
+         final Class<T> type, final String className
+     ) throws Throwable {
+           // Note: We use the thread context or system ClassLoader here
+           // since we want to load classes outside of the
+           // core JDK when running J2EE Pure ORB and
+           // talking to Kestrel.
                 ClassLoader cl = Thread.currentThread().getContextClassLoader();
                 if (cl == null)
                     cl = ClassLoader.getSystemClassLoader();
 
-                Class streamClass = cl.loadClass(className);
+                Class<?> clazz = cl.loadClass(className);
+                Class<? extends T> streamClass = clazz.asSubclass(type);
 
                 // Since the ClassLoader should cache the class, this isn't
                 // as expensive as it looks.
                 return streamClass.newInstance();
 
-            } catch(Throwable t) {
-                InternalError ie = new InternalError( "Error loading " + className ) ;
-                ie.initCause( t ) ;
-                throw ie ;
-            }
-        }
     }
 
     /**
--- a/corba/src/share/classes/com/sun/corba/se/impl/orb/PrefixParserAction.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/impl/orb/PrefixParserAction.java	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,7 +110,7 @@
                     throw wrapper.couldNotSetArray( thr,
                         getPropertyName(), new Integer(ctr),
                         componentType, new Integer(size),
-                        ObjectUtility.compactObjectToString( obj )) ;
+                        obj.toString() ) ;
                 }
                 ctr++ ;
             }
--- a/corba/src/share/classes/com/sun/corba/se/impl/orbutil/ObjectUtility.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/impl/orbutil/ObjectUtility.java	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,103 +50,8 @@
 import java.math.BigDecimal ;
 
 public final class ObjectUtility {
-    private boolean useToString ;
-    private boolean isIndenting ;
-    private int initialLevel ;
-    private int increment ;
-    private ClassMap classToPrinter = new ClassMap() ;
+    private ObjectUtility() {}
 
-    private static ObjectUtility standard = new ObjectUtility( false, true,
-        0, 4 ) ;
-    private static ObjectUtility compact = new ObjectUtility( true, false,
-        0, 4 ) ;
-
-    private ObjectUtility( boolean useToString, boolean isIndenting,
-        int initialLevel, int increment )
-    {
-        this.useToString = useToString ;
-        this.isIndenting = isIndenting ;
-        this.initialLevel = initialLevel ;
-        this.increment = increment ;
-        classToPrinter.put( Properties.class, propertiesPrinter ) ;
-        classToPrinter.put( Collection.class, collectionPrinter ) ;
-        classToPrinter.put( Map.class, mapPrinter ) ;
-    }
-
-    /** Construct an Utility instance with the desired objectToString
-    * behavior.
-    */
-    public static ObjectUtility make( boolean useToString, boolean isIndenting,
-        int initialLevel, int increment )
-    {
-        return new ObjectUtility( useToString, isIndenting, initialLevel,
-            increment ) ;
-    }
-
-    /** Construct an Utility instance with the desired objectToString
-    * behavior.
-    */
-    public static ObjectUtility make( boolean useToString, boolean isIndenting )
-    {
-        return new ObjectUtility( useToString, isIndenting, 0, 4 ) ;
-    }
-
-    /** Get the standard Utility object that supports objectToString with
-    * indented display and no use of toString() methods.
-    */
-    public static ObjectUtility make()
-    {
-        return standard ;
-    }
-
-    /** A convenience method that gives the default behavior: use indenting
-    * to display the object's structure and do not use built-in toString
-    * methods.
-    */
-    public static String defaultObjectToString( java.lang.Object object )
-    {
-        return standard.objectToString( object ) ;
-    }
-
-    public static String compactObjectToString( java.lang.Object object )
-    {
-        return compact.objectToString( object ) ;
-    }
-
-    /** objectToString handles display of arbitrary objects.  It correctly
-    * handles objects whose elements form an arbitrary graph.  It uses
-    * reflection to display the contents of any kind of object.
-    * An object's toString() method may optionally be used, but the default
-    * is to ignore all toString() methods except for those defined for
-    * primitive types, primitive type wrappers, and strings.
-    */
-    public String objectToString(java.lang.Object obj)
-    {
-        IdentityHashMap printed = new IdentityHashMap() ;
-        ObjectWriter result = ObjectWriter.make( isIndenting, initialLevel,
-            increment ) ;
-        objectToStringHelper( printed, result, obj ) ;
-        return result.toString() ;
-    }
-
-    // Perform a deep structural equality comparison of the two objects.
-    // This handles all arrays, maps, and sets specially, otherwise
-    // it just calls the object's equals() method.
-    public static boolean equals( java.lang.Object obj1, java.lang.Object obj2 )
-    {
-        // Set of pairs of objects that have been (or are being) considered for
-        // equality.  Such pairs are presumed to be equals.  If they are not,
-        // this will be detected eventually and the equals method will return
-        // false.
-        Set considered = new HashSet() ;
-
-        // Map that gives the corresponding component of obj2 for a component
-        // of obj1.  This is used to check for the same aliasing and use of
-        // equal objects in both objects.
-        Map counterpart = new IdentityHashMap() ;
-
-        return equalsHelper( counterpart, considered, obj1, obj2 ) ;
-    }
 
     /** If arr1 and arr2 are both arrays of the same component type,
      * return an array of that component type that consists of the
@@ -179,544 +84,4 @@
         return result ;
     }
 
-//===========================================================================
-//  Implementation
-//===========================================================================
-
-    private void objectToStringHelper( IdentityHashMap printed,
-        ObjectWriter result, java.lang.Object obj)
-    {
-        if (obj==null) {
-            result.append( "null" ) ;
-            result.endElement() ;
-        } else {
-            Class cls = obj.getClass() ;
-            result.startObject( obj ) ;
-
-            if (printed.keySet().contains( obj )) {
-                result.endObject( "*VISITED*" ) ;
-            } else {
-                printed.put( obj, null ) ;
-
-                if (mustUseToString(cls)) {
-                    result.endObject( obj.toString() ) ;
-                } else {
-                    // First, handle any classes that have special printer
-                    // methods defined.  This is useful when the class
-                    // overrides toString with something that
-                    // is not sufficiently detailed.
-                    ObjectPrinter printer = (ObjectPrinter)(classToPrinter.get(
-                        cls )) ;
-                    if (printer != null) {
-                        printer.print( printed, result, obj ) ;
-                        result.endObject() ;
-                    } else {
-                        Class compClass = cls.getComponentType() ;
-
-                        if (compClass == null)
-                            // handleObject always calls endObject
-                            handleObject( printed, result, obj ) ;
-                        else {
-                            handleArray( printed, result, obj ) ;
-                            result.endObject() ;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    private static interface ObjectPrinter {
-        void print( IdentityHashMap printed, ObjectWriter buff,
-            java.lang.Object obj ) ;
-    }
-
-    private ObjectPrinter propertiesPrinter = new ObjectPrinter() {
-        public void print( IdentityHashMap printed, ObjectWriter buff,
-            java.lang.Object obj )
-        {
-            if (!(obj instanceof Properties))
-                throw new Error() ;
-
-            Properties props = (Properties)obj ;
-            Enumeration keys = props.propertyNames() ;
-            while (keys.hasMoreElements()) {
-                String key = (String)(keys.nextElement()) ;
-                String value = props.getProperty( key ) ;
-                buff.startElement() ;
-                buff.append( key ) ;
-                buff.append( "=" ) ;
-                buff.append( value ) ;
-                buff.endElement() ;
-            }
-        }
-    } ;
-
-    private ObjectPrinter collectionPrinter = new ObjectPrinter() {
-        public void print( IdentityHashMap printed, ObjectWriter buff,
-            java.lang.Object obj )
-        {
-            if (!(obj instanceof Collection))
-                throw new Error() ;
-
-            Collection coll = (Collection)obj ;
-            Iterator iter = coll.iterator() ;
-            while (iter.hasNext()) {
-                java.lang.Object element = iter.next() ;
-                buff.startElement() ;
-                objectToStringHelper( printed, buff, element ) ;
-                buff.endElement() ;
-            }
-        }
-    } ;
-
-    private ObjectPrinter mapPrinter = new ObjectPrinter() {
-        public void print( IdentityHashMap printed, ObjectWriter buff,
-            java.lang.Object obj )
-        {
-            if (!(obj instanceof Map))
-                throw new Error() ;
-
-            Map map = (Map)obj ;
-            Iterator iter = map.entrySet().iterator() ;
-            while (iter.hasNext()) {
-                Entry entry = (Entry)(iter.next()) ;
-                buff.startElement() ;
-                objectToStringHelper( printed, buff, entry.getKey() ) ;
-                buff.append( "=>" ) ;
-                objectToStringHelper( printed, buff, entry.getValue() ) ;
-                buff.endElement() ;
-            }
-        }
-    } ;
-
-    private static class ClassMap {
-        ArrayList data ;
-
-        public ClassMap()
-        {
-            data = new ArrayList() ;
-        }
-
-        /** Return the first element of the ClassMap that is assignable to cls.
-        * The order is determined by the order in which the put method was
-        * called.  Returns null if there is no match.
-        */
-        public java.lang.Object get( Class cls )
-        {
-            Iterator iter = data.iterator() ;
-            while (iter.hasNext()) {
-                java.lang.Object[] arr = (java.lang.Object[])(iter.next()) ;
-                Class key = (Class)(arr[0]) ;
-                if (key.isAssignableFrom( cls ))
-                    return arr[1] ;
-            }
-
-            return null ;
-        }
-
-        /** Add obj to the map with key cls.  Note that order matters,
-         * as the first match is returned.
-         */
-        public void put( Class cls, java.lang.Object obj )
-        {
-            java.lang.Object[] pair = { cls, obj } ;
-            data.add( pair ) ;
-        }
-    }
-
-    private boolean mustUseToString( Class cls )
-    {
-        // These probably never occur
-        if (cls.isPrimitive())
-            return true ;
-
-        // We must use toString for all primitive wrappers, since
-        // otherwise the code recurses endlessly (access value field
-        // inside Integer, returns another Integer through reflection).
-        if ((cls == Integer.class) ||
-            (cls == BigInteger.class) ||
-            (cls == BigDecimal.class) ||
-            (cls == String.class) ||
-            (cls == StringBuffer.class) ||
-            (cls == Long.class) ||
-            (cls == Short.class) ||
-            (cls == Byte.class) ||
-            (cls == Character.class) ||
-            (cls == Float.class) ||
-            (cls == Double.class) ||
-            (cls == Boolean.class))
-            return true ;
-
-        if (useToString) {
-            try {
-                cls.getDeclaredMethod( "toString", (Class[])null ) ;
-                return true ;
-            } catch (Exception exc) {
-                return false ;
-            }
-        }
-
-        return false ;
-    }
-
-    private void handleObject( IdentityHashMap printed, ObjectWriter result,
-        java.lang.Object obj )
-    {
-        Class cls = obj.getClass() ;
-
-        try {
-            Field[] fields;
-            SecurityManager security = System.getSecurityManager();
-            if (security != null && !Modifier.isPublic(cls.getModifiers())) {
-                fields = new Field[0];
-            } else {
-                fields = cls.getDeclaredFields();
-            }
-
-            for (int ctr=0; ctr<fields.length; ctr++ ) {
-                final Field fld = fields[ctr] ;
-                int modifiers = fld.getModifiers() ;
-
-                // Do not display field if it is static, since these fields
-                // are always the same for every instances.  This could
-                // be made configurable, but I don't think it is
-                // useful to do so.
-                if (!Modifier.isStatic( modifiers )) {
-                    if (security != null) {
-                        if (!Modifier.isPublic(modifiers))
-                            continue;
-                    }
-                    result.startElement() ;
-                    result.append( fld.getName() ) ;
-                    result.append( ":" ) ;
-
-                    try {
-                        // Make sure that we can read the field if it is
-                        // not public
-                        AccessController.doPrivileged( new PrivilegedAction() {
-                            public Object run() {
-                                fld.setAccessible( true ) ;
-                                return null ;
-                            }
-                        } ) ;
-
-                        java.lang.Object value = fld.get( obj ) ;
-                        objectToStringHelper( printed, result, value ) ;
-                    } catch (Exception exc2) {
-                        result.append( "???" ) ;
-                    }
-
-                    result.endElement() ;
-                }
-            }
-
-            result.endObject() ;
-        } catch (Exception exc2) {
-            result.endObject( obj.toString() ) ;
-        }
-    }
-
-    private void handleArray( IdentityHashMap printed, ObjectWriter result,
-        java.lang.Object obj )
-    {
-        Class compClass = obj.getClass().getComponentType() ;
-        if (compClass == boolean.class) {
-            boolean[] arr = (boolean[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == byte.class) {
-            byte[] arr = (byte[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == short.class) {
-            short[] arr = (short[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == int.class) {
-            int[] arr = (int[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == long.class) {
-            long[] arr = (long[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == char.class) {
-            char[] arr = (char[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == float.class) {
-            float[] arr = (float[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else if (compClass == double.class) {
-            double[] arr = (double[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                result.append( arr[ctr] ) ;
-                result.endElement() ;
-            }
-        } else { // array of object
-            java.lang.Object[] arr = (java.lang.Object[])obj ;
-            for (int ctr=0; ctr<arr.length; ctr++) {
-                result.startElement() ;
-                objectToStringHelper( printed, result, arr[ctr] ) ;
-                result.endElement() ;
-            }
-        }
-    }
-
-    private static class Pair
-    {
-        private java.lang.Object obj1 ;
-        private java.lang.Object obj2 ;
-
-        Pair( java.lang.Object obj1, java.lang.Object obj2 )
-        {
-            this.obj1 = obj1 ;
-            this.obj2 = obj2 ;
-        }
-
-        public boolean equals( java.lang.Object obj )
-        {
-            if (!(obj instanceof Pair))
-                return false ;
-
-            Pair other = (Pair)obj ;
-            return other.obj1 == obj1 && other.obj2 == obj2 ;
-        }
-
-        public int hashCode()
-        {
-            return System.identityHashCode( obj1 ) ^
-                System.identityHashCode( obj2 ) ;
-        }
-    }
-
-    private static boolean equalsHelper( Map counterpart, Set considered,
-        java.lang.Object obj1, java.lang.Object obj2 )
-    {
-        if ((obj1 == null) || (obj2 == null))
-            return obj1 == obj2 ;
-
-        java.lang.Object other2 = counterpart.get( obj1 ) ;
-        if (other2 == null) {
-            other2 = obj2 ;
-            counterpart.put( obj1, other2 ) ;
-        }
-
-        if (obj1 == other2)
-            return true ;
-
-        if (obj2 != other2)
-            return false ;
-
-        Pair pair = new Pair( obj1, obj2 ) ;
-        if (considered.contains( pair ))
-            return true ;
-        else
-            considered.add( pair ) ;
-
-        if (obj1 instanceof java.lang.Object[] &&
-            obj2 instanceof java.lang.Object[])
-            return equalArrays( counterpart, considered,
-                (java.lang.Object[])obj1, (java.lang.Object[])obj2 ) ;
-        else if (obj1 instanceof Map && obj2 instanceof Map)
-            return equalMaps( counterpart, considered,
-                (Map)obj1, (Map)obj2 ) ;
-        else if (obj1 instanceof Set && obj2 instanceof Set)
-            return equalSets( counterpart, considered,
-                (Set)obj1, (Set)obj2 ) ;
-        else if (obj1 instanceof List && obj2 instanceof List)
-            return equalLists( counterpart, considered,
-                (List)obj1, (List)obj2 ) ;
-        else if (obj1 instanceof boolean[] && obj2 instanceof boolean[])
-            return Arrays.equals( (boolean[])obj1, (boolean[])obj2 ) ;
-        else if (obj1 instanceof byte[] && obj2 instanceof byte[])
-            return Arrays.equals( (byte[])obj1, (byte[])obj2 ) ;
-        else if (obj1 instanceof char[] && obj2 instanceof char[])
-            return Arrays.equals( (char[])obj1, (char[])obj2 ) ;
-        else if (obj1 instanceof double[] && obj2 instanceof double[])
-            return Arrays.equals( (double[])obj1, (double[])obj2 ) ;
-        else if (obj1 instanceof float[] && obj2 instanceof float[])
-            return Arrays.equals( (float[])obj1, (float[])obj2 ) ;
-        else if (obj1 instanceof int[] && obj2 instanceof int[])
-            return Arrays.equals( (int[])obj1, (int[])obj2 ) ;
-        else if (obj1 instanceof long[] && obj2 instanceof long[])
-            return Arrays.equals( (long[])obj1, (long[])obj2 ) ;
-        else {
-            Class cls = obj1.getClass() ;
-            if (cls != obj2.getClass())
-                return obj1.equals( obj2 ) ;
-            else
-                return equalsObject( counterpart, considered, cls, obj1, obj2 ) ;
-        }
-    }
-
-    private static boolean equalsObject( Map counterpart, Set considered,
-        Class cls, java.lang.Object obj1, java.lang.Object obj2 )
-    {
-        Class objectClass = java.lang.Object.class ;
-        if (cls == objectClass)
-            return true ;
-
-        Class[] equalsTypes = { objectClass } ;
-        try {
-            Method equalsMethod = cls.getDeclaredMethod( "equals",
-                equalsTypes ) ;
-            return obj1.equals( obj2 ) ;
-        } catch (Exception exc) {
-            if (equalsObjectFields( counterpart, considered,
-                    cls, obj1, obj2 ))
-                return equalsObject( counterpart, considered,
-                    cls.getSuperclass(), obj1, obj2 ) ;
-            else
-                return false ;
-        }
-    }
-
-    private static boolean equalsObjectFields( Map counterpart, Set considered,
-        Class cls, java.lang.Object obj1, java.lang.Object obj2 )
-    {
-        Field[] fields = cls.getDeclaredFields() ;
-        for (int ctr=0; ctr<fields.length; ctr++) {
-            try {
-                final Field field = fields[ctr] ;
-                // Ignore static fields
-                if (!Modifier.isStatic( field.getModifiers())) {
-                    AccessController.doPrivileged(new PrivilegedAction() {
-                        public Object run() {
-                            field.setAccessible( true ) ;
-                            return null ;
-                        }
-                    } ) ;
-
-                    java.lang.Object value1 = field.get( obj1 ) ;
-                    java.lang.Object value2 = field.get( obj2 ) ;
-                    if (!equalsHelper( counterpart, considered, value1,
-                        value2 ))
-                        return false ;
-                }
-            } catch (IllegalAccessException exc) {
-                return false ;
-            }
-        }
-
-        return true ;
-    }
-
-    private static boolean equalArrays( Map counterpart, Set considered,
-        java.lang.Object[] arr1, java.lang.Object[] arr2 )
-    {
-        int len = arr1.length ;
-        if (len != arr2.length)
-            return false ;
-
-        for (int ctr = 0; ctr<len; ctr++ )
-            if (!equalsHelper( counterpart, considered, arr1[ctr], arr2[ctr] ))
-                return false ;
-
-        return true ;
-    }
-
-    private static boolean equalMaps( Map counterpart, Set considered,
-        Map map1, Map map2 )
-    {
-        if (map2.size() != map1.size())
-            return false;
-
-        try {
-            Iterator i = map1.entrySet().iterator();
-            while (i.hasNext()) {
-                Entry e = (Entry) i.next();
-                java.lang.Object key = e.getKey();
-                java.lang.Object value = e.getValue();
-                if (value == null) {
-                    if (!(map2.get(key)==null && map2.containsKey(key)))
-                        return false;
-                } else {
-                    if (!equalsHelper( counterpart, considered,
-                        value, map2.get(key)))
-                        return false;
-                }
-            }
-        } catch(ClassCastException unused)   {
-            return false;
-        } catch(NullPointerException unused) {
-            return false;
-        }
-
-        return true;
-    }
-
-    // Obviously this is an inefficient quadratic algorithm.
-    // This is taken pretty directly from AbstractSet and AbstractCollection
-    // in the JDK.
-    // For HashSet, an O(n) (with a good hash function) algorithm
-    // is possible, and likewise TreeSet, since it is
-    // ordered, is O(n).  But this is not worth the effort here.
-    // Note that the inner loop uses equals, not equalsHelper.
-    // This is needed because of the searching behavior of this test.
-    // However, note that this will NOT correctly handle sets that
-    // contain themselves as members, or that have members that reference
-    // themselves.  These cases will cause infinite regress!
-    private static boolean equalSets( Map counterpart, Set considered,
-        Set set1, Set set2 )
-    {
-        if (set1.size() != set2.size())
-            return false ;
-
-        Iterator e1 = set1.iterator() ;
-        while (e1.hasNext()) {
-            java.lang.Object obj1 = e1.next() ;
-
-            boolean found = false ;
-            Iterator e2 = set2.iterator() ;
-            while (e2.hasNext() && !found) {
-                java.lang.Object obj2 = e2.next() ;
-                found = equals( obj1, obj2 ) ;
-            }
-
-            if (!found)
-                return false ;
-        }
-
-        return true ;
-    }
-
-    private static boolean equalLists( Map counterpart, Set considered,
-        List list1, List list2 )
-    {
-        ListIterator e1 = list1.listIterator();
-        ListIterator e2 = list2.listIterator();
-        while(e1.hasNext() && e2.hasNext()) {
-            java.lang.Object o1 = e1.next();
-            java.lang.Object o2 = e2.next();
-            if (!(o1==null ? o2==null : equalsHelper(
-                counterpart, considered, o1, o2)))
-                return false;
-        }
-        return !(e1.hasNext() || e2.hasNext());
-    }
 }
--- a/corba/src/share/classes/com/sun/corba/se/impl/transport/SocketOrChannelAcceptorImpl.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/impl/transport/SocketOrChannelAcceptorImpl.java	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,14 +33,7 @@
 import java.nio.channels.SelectionKey;
 import java.nio.channels.ServerSocketChannel;
 import java.nio.channels.SocketChannel;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.Collection;
 import java.util.Iterator;
-import java.util.LinkedList;
-
-import org.omg.CORBA.CompletionStatus;
-import org.omg.CORBA.INTERNAL;
 
 import com.sun.corba.se.pept.broker.Broker;
 import com.sun.corba.se.pept.encoding.InputObject;
@@ -61,18 +54,12 @@
 import com.sun.corba.se.spi.ior.iiop.IIOPProfileTemplate ;
 import com.sun.corba.se.spi.ior.iiop.GIOPVersion ;
 import com.sun.corba.se.spi.ior.iiop.AlternateIIOPAddressComponent;
-import com.sun.corba.se.spi.legacy.connection.LegacyServerSocketEndPointInfo;
 import com.sun.corba.se.spi.logging.CORBALogDomains;
-import com.sun.corba.se.spi.monitoring.LongMonitoredAttributeBase;
-import com.sun.corba.se.spi.monitoring.MonitoringConstants;
-import com.sun.corba.se.spi.monitoring.MonitoringFactories;
-import com.sun.corba.se.spi.monitoring.MonitoredObject;
 import com.sun.corba.se.spi.orb.ORB;
 import com.sun.corba.se.spi.orbutil.threadpool.Work;
 import com.sun.corba.se.spi.protocol.CorbaMessageMediator;
 import com.sun.corba.se.spi.transport.CorbaAcceptor;
 import com.sun.corba.se.spi.transport.CorbaConnection;
-import com.sun.corba.se.spi.transport.CorbaContactInfo;
 import com.sun.corba.se.spi.transport.SocketInfo;
 import com.sun.corba.se.spi.transport.SocketOrChannelAcceptor;
 
@@ -82,7 +69,6 @@
 import com.sun.corba.se.impl.oa.poa.Policies; // REVISIT impl/poa specific
 import com.sun.corba.se.impl.orbutil.ORBConstants;
 import com.sun.corba.se.impl.orbutil.ORBUtility;
-import com.sun.corba.se.impl.ior.iiop.JavaSerializationComponent;
 
 // BEGIN Legacy support.
 import com.sun.corba.se.spi.legacy.connection.LegacyServerSocketEndPointInfo;
@@ -442,12 +428,7 @@
                 dprint(".doWork->: " + this);
             }
             if (selectionKey.isAcceptable()) {
-                AccessController.doPrivileged(new PrivilegedAction() {
-                    public java.lang.Object run() {
                         accept();
-                        return null;
-                    }
-                });
             } else {
                 if (orb.transportDebugFlag) {
                     dprint(".doWork: ! selectionKey.isAcceptable: " + this);
--- a/corba/src/share/classes/com/sun/corba/se/spi/orb/OperationFactory.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/spi/orb/OperationFactory.java	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 package com.sun.corba.se.spi.orb ;
 
 import java.util.StringTokenizer ;
+import java.util.Arrays ;
 
 import java.lang.reflect.Array ;
 
@@ -446,7 +447,7 @@
         public String toString() {
             return "sequenceAction(separator=\"" + sep +
                 "\",actions=" +
-                ObjectUtility.compactObjectToString(actions) + ")" ;
+                Arrays.toString(actions) + ")" ;
         }
     }
 
@@ -533,7 +534,7 @@
 
         public String toString() {
             return "mapSequenceAction(" +
-                ObjectUtility.compactObjectToString(op) + ")" ;
+                Arrays.toString(op) + ")" ;
         }
     }
 
--- a/corba/src/share/classes/com/sun/corba/se/spi/orb/ParserImplBase.java	Thu Nov 04 15:32:01 2010 -0700
+++ b/corba/src/share/classes/com/sun/corba/se/spi/orb/ParserImplBase.java	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,7 @@
                 // Since exc wraps the actual exception, use exc.getCause()
                 // instead of exc.
                 throw wrapper.errorSettingField( exc.getCause(), name,
-                    ObjectUtility.compactObjectToString(value) ) ;
+                    value.toString() ) ;
             }
         }
 
--- a/hotspot/.hgtags	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/.hgtags	Sat Nov 13 18:40:37 2010 -0800
@@ -127,3 +127,6 @@
 5511edd5d719f3fc9fdd04879482026a3d2c8652 hs20-b01
 bdbc48857210a509b3c50a3291ecb9dd6a72e016 jdk7-b115
 96b3f2a7add0b445b8aa421f6823cff5a2e2fe03 jdk7-b116
+52f19c724d9634af79044a2e0defbe4a5f1adbda hs20-b02
+806d0c037e6bbb88dac0699673f4ba55ee8c02da jdk7-b117
+698b7b727e12de44139d8cca6ab9a494ead13253 jdk7-b118
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -664,7 +664,7 @@
   // Use temps to avoid kills
   LIR_Opr t1 = FrameMap::G1_opr;
   LIR_Opr t2 = FrameMap::G3_opr;
-  LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
+  LIR_Opr addr = new_pointer_register();
 
   // get address of field
   obj.load_item();
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -62,3 +62,5 @@
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
+
+define_pd_global(bool, UseMembar,            false);
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -499,7 +499,7 @@
   Register new_val_reg = new_val()->as_register();
   __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
   __ jcc(Assembler::equal, _continuation);
-  ce->store_parameter(addr()->as_register(), 0);
+  ce->store_parameter(addr()->as_pointer_register(), 0);
   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
   __ jmp(_continuation);
 }
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -765,7 +765,7 @@
     ShouldNotReachHere();
   }
 
-  LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
+  LIR_Opr addr = new_pointer_register();
   LIR_Address* a;
   if(offset.result()->is_constant()) {
     a = new LIR_Address(obj.result(),
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -63,3 +63,5 @@
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
+
+define_pd_global(bool, UseMembar,            false);
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -45,3 +45,5 @@
 
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
+
+define_pd_global(bool,  UseMembar,            false);
--- a/hotspot/src/os/linux/vm/attachListener_linux.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/os/linux/vm/attachListener_linux.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -176,10 +176,10 @@
 
   int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
                    os::get_temp_directory(), os::current_process_id());
-  if (n <= (int)UNIX_PATH_MAX) {
+  if (n < (int)UNIX_PATH_MAX) {
     n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
   }
-  if (n > (int)UNIX_PATH_MAX) {
+  if (n >= (int)UNIX_PATH_MAX) {
     return -1;
   }
 
--- a/hotspot/src/os/linux/vm/objectMonitor_linux.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-
-/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/hotspot/src/os/linux/vm/objectMonitor_linux.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
- private:
--- a/hotspot/src/os/linux/vm/objectMonitor_linux.inline.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -827,8 +827,10 @@
 
       switch (thr_type) {
       case os::java_thread:
-        // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
-        if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
+        // Java threads use ThreadStackSize which default value can be
+        // changed with the flag -Xss
+        assert (JavaThread::stack_size_at_create() > 0, "this should be set");
+        stack_size = JavaThread::stack_size_at_create();
         break;
       case os::compiler_thread:
         if (CompilerThreadStackSize > 0) {
@@ -3922,12 +3924,21 @@
   Linux::signal_sets_init();
   Linux::install_signal_handlers();
 
+  // Check minimum allowable stack size for thread creation and to initialize
+  // the java system classes, including StackOverflowError - depends on page
+  // size.  Add a page for compiler2 recursion in main thread.
+  // Add in 2*BytesPerWord times page size to account for VM stack during
+  // class initialization depending on 32 or 64 bit VM.
+  os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
+
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
-      threadStackSizeInBytes < Linux::min_stack_allowed) {
+      threadStackSizeInBytes < os::Linux::min_stack_allowed) {
         tty->print_cr("\nThe stack size specified is too small, "
                       "Specify at least %dk",
-                      Linux::min_stack_allowed / K);
+                      os::Linux::min_stack_allowed/ K);
         return JNI_ERR;
   }
 
@@ -4839,7 +4850,7 @@
 
   // Next, demultiplex/decode time arguments
   timespec absTime;
-  if (time < 0) { // don't wait at all
+  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
     return;
   }
   if (time > 0) {
--- a/hotspot/src/os/solaris/vm/objectMonitor_solaris.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/hotspot/src/os/solaris/vm/objectMonitor_solaris.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
- private:
--- a/hotspot/src/os/solaris/vm/objectMonitor_solaris.inline.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4878,18 +4878,17 @@
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
   // size.  Add a page for compiler2 recursion in main thread.
-  // Add in BytesPerWord times page size to account for VM stack during
+  // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
-  guarantee((Solaris::min_stack_allowed >=
-    (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
-     COMPILER2_PRESENT(+1)) * page_size),
-    "need to increase Solaris::min_stack_allowed on this platform");
+  os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
-    threadStackSizeInBytes < Solaris::min_stack_allowed) {
+    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
-                  Solaris::min_stack_allowed/K);
+                  os::Solaris::min_stack_allowed/K);
     return JNI_ERR;
   }
 
@@ -5837,7 +5836,7 @@
 
   // First, demultiplex/decode time arguments
   timespec absTime;
-  if (time < 0) { // don't wait at all
+  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
     return;
   }
   if (time > 0) {
--- a/hotspot/src/os/windows/vm/objectMonitor_windows.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "incls/_precompiled.incl"
--- a/hotspot/src/os/windows/vm/objectMonitor_windows.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
- private:
--- a/hotspot/src/os/windows/vm/objectMonitor_windows.inline.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -3311,7 +3311,6 @@
   }
 }
 
-
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void) {
   // Allocate a single page and mark it as readable for safepoint polling
@@ -3390,6 +3389,21 @@
     actual_reserve_size = default_reserve_size;
   }
 
+  // Check minimum allowable stack size for thread creation and to initialize
+  // the java system classes, including StackOverflowError - depends on page
+  // size.  Add a page for compiler2 recursion in main thread.
+  // Add in 2*BytesPerWord times page size to account for VM stack during
+  // class initialization depending on 32 or 64 bit VM.
+  size_t min_stack_allowed =
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+            2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
+  if (actual_reserve_size < min_stack_allowed) {
+    tty->print_cr("\nThe stack size specified is too small, "
+                  "Specify at least %dk",
+                  min_stack_allowed / K);
+    return JNI_ERR;
+  }
+
   JavaThread::set_stack_size_at_create(stack_commit_size);
 
   // Calculate theoretical max. size of Threads to guard gainst artifical
@@ -3992,7 +4006,7 @@
   if (time < 0) { // don't wait
     return;
   }
-  else if (time == 0) {
+  else if (time == 0 && !isAbsolute) {
     time = INFINITE;
   }
   else if  (isAbsolute) {
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1350,7 +1350,6 @@
     addr = ptr;
   }
   assert(addr->is_register(), "must be a register at this point");
-  assert(addr->type() == T_OBJECT, "addr should point to an object");
 
   LIR_Opr xor_res = new_pointer_register();
   LIR_Opr xor_shift_res = new_pointer_register();
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -4309,20 +4309,21 @@
 }
 
 
-// Unqualified names may not contain the characters '.', ';', or '/'.
-// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
-// Note that method names may not be <init> or <clinit> in this method.
-// Because these names have been checked as special cases before calling this method
-// in verify_legal_method_name.
-bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
+// Unqualified names may not contain the characters '.', ';', '[', or '/'.
+// Method names also may not contain the characters '<' or '>', unless <init>
+// or <clinit>.  Note that method names may not be <init> or <clinit> in this
+// method.  Because these names have been checked as special cases before
+// calling this method in verify_legal_method_name.
+bool ClassFileParser::verify_unqualified_name(
+    char* name, unsigned int length, int type) {
   jchar ch;
 
   for (char* p = name; p != name + length; ) {
     ch = *p;
     if (ch < 128) {
       p++;
-      if (ch == '.' || ch == ';') {
-        return false;   // do not permit '.' or ';'
+      if (ch == '.' || ch == ';' || ch == '[' ) {
+        return false;   // do not permit '.', ';', or '['
       }
       if (type != LegalClass && ch == '/') {
         return false;   // do not permit '/' unless it's class name
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/stackMapTableFormat.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -0,0 +1,916 @@
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// These classes represent the stack-map substructures described in the JVMS
+// (hence the non-conforming naming scheme).
+
+// These classes work with the types in their compressed form in-place (as they
+// would appear in the classfile).  No virtual methods or fields allowed.
+
+class verification_type_info {
+ private:
+  // u1 tag
+  // u2 cpool_index || u2 bci (for ITEM_Object & ITEM_Uninitailized only)
+
+  address tag_addr() const { return (address)this; }
+  address cpool_index_addr() const { return tag_addr() + sizeof(u1); }
+  address bci_addr() const { return cpool_index_addr(); }
+
+ protected:
+  // No constructors  - should be 'private', but GCC issues a warning if it is
+  verification_type_info() {}
+  verification_type_info(const verification_type_info&) {}
+
+ public:
+
+  static verification_type_info* at(address addr) {
+    return (verification_type_info*)addr;
+  }
+
+  static verification_type_info* create_at(address addr, u1 tag) {
+    verification_type_info* vti = (verification_type_info*)addr;
+    vti->set_tag(tag);
+    return vti;
+  }
+
+  static verification_type_info* create_object_at(address addr, u2 cp_idx) {
+    verification_type_info* vti = (verification_type_info*)addr;
+    vti->set_tag(ITEM_Object);
+    vti->set_cpool_index(cp_idx);
+    return vti;
+  }
+
+  static verification_type_info* create_uninit_at(address addr, u2 bci) {
+    verification_type_info* vti = (verification_type_info*)addr;
+    vti->set_tag(ITEM_Uninitialized);
+    vti->set_bci(bci);
+    return vti;
+  }
+
+  static size_t calculate_size(u1 tag) {
+    if (tag == ITEM_Object || tag == ITEM_Uninitialized) {
+      return sizeof(u1) + sizeof(u2);
+    } else {
+      return sizeof(u1);
+    }
+  }
+
+  static size_t max_size() { return sizeof(u1) + sizeof(u2); }
+
+  u1 tag() const { return *(u1*)tag_addr(); }
+  void set_tag(u1 tag) { *((u1*)tag_addr()) = tag; }
+
+  bool is_object() const { return tag() == ITEM_Object; }
+  bool is_uninitialized() const { return tag() == ITEM_Uninitialized; }
+
+  u2 cpool_index() const {
+    assert(is_object(), "This type has no cp_index");
+    return Bytes::get_Java_u2(cpool_index_addr());
+  }
+  void set_cpool_index(u2 idx) {
+    assert(is_object(), "This type has no cp_index");
+    Bytes::put_Java_u2(cpool_index_addr(), idx);
+  }
+
+  u2 bci() const {
+    assert(is_uninitialized(), "This type has no bci");
+    return Bytes::get_Java_u2(bci_addr());
+  }
+
+  void set_bci(u2 bci) {
+    assert(is_uninitialized(), "This type has no bci");
+    Bytes::put_Java_u2(bci_addr(), bci);
+  }
+
+  void copy_from(verification_type_info* from) {
+    set_tag(from->tag());
+    if (from->is_object()) {
+      set_cpool_index(from->cpool_index());
+    } else if (from->is_uninitialized()) {
+      set_bci(from->bci());
+    }
+  }
+
+  size_t size() const {
+    return calculate_size(tag());
+  }
+
+  verification_type_info* next() {
+    return (verification_type_info*)((address)this + size());
+  }
+
+  // This method is used when reading unverified data in order to ensure
+  // that we don't read past a particular memory limit.  It returns false
+  // if any part of the data structure is outside the specified memory bounds.
+  bool verify(address start, address end) {
+    return ((address)this >= start &&
+            (address)this < end &&
+            (bci_addr() + sizeof(u2) <= end ||
+                !is_object() && !is_uninitialized()));
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) {
+    switch (tag()) {
+      case ITEM_Top: st->print("Top"); break;
+      case ITEM_Integer: st->print("Integer"); break;
+      case ITEM_Float: st->print("Float"); break;
+      case ITEM_Double: st->print("Double"); break;
+      case ITEM_Long: st->print("Long"); break;
+      case ITEM_Null: st->print("Null"); break;
+      case ITEM_UninitializedThis:
+        st->print("UninitializedThis"); break;
+      case ITEM_Uninitialized:
+        st->print("Uninitialized[#%d]", bci()); break;
+      case ITEM_Object:
+        st->print("Object[#%d]", cpool_index()); break;
+      default:
+        assert(false, "Bad verification_type_info");
+    }
+  }
+#endif
+};
+
+#define FOR_EACH_STACKMAP_FRAME_TYPE(macro, arg1, arg2) \
+  macro(same_frame, arg1, arg2) \
+  macro(same_frame_extended, arg1, arg2) \
+  macro(same_frame_1_stack_item_frame, arg1, arg2) \
+  macro(same_frame_1_stack_item_extended, arg1, arg2) \
+  macro(chop_frame, arg1, arg2) \
+  macro(append_frame, arg1, arg2) \
+  macro(full_frame, arg1, arg2)
+
+#define SM_FORWARD_DECL(type, arg1, arg2) class type;
+FOR_EACH_STACKMAP_FRAME_TYPE(SM_FORWARD_DECL, x, x)
+#undef SM_FORWARD_DECL
+
+class stack_map_frame {
+ protected:
+  address frame_type_addr() const { return (address)this; }
+
+  // No constructors  - should be 'private', but GCC issues a warning if it is
+  stack_map_frame() {}
+  stack_map_frame(const stack_map_frame&) {}
+
+ public:
+
+  static stack_map_frame* at(address addr) {
+    return (stack_map_frame*)addr;
+  }
+
+  stack_map_frame* next() const {
+    return at((address)this + size());
+  }
+
+  u1 frame_type() const { return *(u1*)frame_type_addr(); }
+  void set_frame_type(u1 type) { *((u1*)frame_type_addr()) = type; }
+
+  // pseudo-virtual methods
+  inline size_t size() const;
+  inline int offset_delta() const;
+  inline void set_offset_delta(int offset_delta);
+  inline int number_of_types() const; // number of types contained in the frame
+  inline verification_type_info* types() const; // pointer to first type
+  inline bool is_valid_offset(int offset_delta) const;
+
+  // This method must be used when reading unverified data in order to ensure
+  // that we don't read past a particular memory limit.  It returns false
+  // if any part of the data structure is outside the specified memory bounds.
+  inline bool verify(address start, address end) const;
+#ifdef ASSERT
+  inline void print_on(outputStream* st) const;
+#endif
+
+  // Create as_xxx and is_xxx methods for the subtypes
+#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
+  inline stackmap_frame_type* as_##stackmap_frame_type() const; \
+  bool is_##stackmap_frame_type() { \
+    return as_##stackmap_frame_type() != NULL; \
+  }
+
+  FOR_EACH_STACKMAP_FRAME_TYPE(FRAME_TYPE_DECL, x, x)
+#undef FRAME_TYPE_DECL
+};
+
+class same_frame : public stack_map_frame {
+ private:
+  static int frame_type_to_offset_delta(u1 frame_type) {
+      return frame_type + 1; }
+  static u1 offset_delta_to_frame_type(int offset_delta) {
+      return (u1)(offset_delta - 1); }
+
+ public:
+
+  static bool is_frame_type(u1 tag) {
+    return tag < 64;
+  }
+
+  static same_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (same_frame*)addr;
+  }
+
+  static same_frame* create_at(address addr, int offset_delta) {
+    same_frame* sm = (same_frame*)addr;
+    sm->set_offset_delta(offset_delta);
+    return sm;
+  }
+
+  static size_t calculate_size() { return sizeof(u1); }
+
+  size_t size() const { return calculate_size(); }
+  int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
+
+  void set_offset_delta(int offset_delta) {
+    assert(offset_delta <= 64, "Offset too large for same_frame");
+    set_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  int number_of_types() const { return 0; }
+  verification_type_info* types() const { return NULL; }
+
+  bool is_valid_offset(int offset_delta) const {
+    return is_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  bool verify_subtype(address start, address end) const {
+    return true;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame(%d)", offset_delta());
+  }
+#endif
+};
+
+class same_frame_extended : public stack_map_frame {
+ private:
+  enum { _frame_id = 251 };
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag == _frame_id;
+  }
+
+  static same_frame_extended* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame type");
+    return (same_frame_extended*)addr;
+  }
+
+  static same_frame_extended* create_at(address addr, u2 offset_delta) {
+    same_frame_extended* sm = (same_frame_extended*)addr;
+    sm->set_frame_type(_frame_id);
+    sm->set_offset_delta(offset_delta);
+    return sm;
+  }
+
+  static size_t calculate_size() { return sizeof(u1) + sizeof(u2); }
+
+  size_t size() const { return calculate_size(); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  int number_of_types() const { return 0; }
+  verification_type_info* types() const { return NULL; }
+  bool is_valid_offset(int offset) const { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    return frame_type_addr() + size() <= end;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame_extended(%d)", offset_delta());
+  }
+#endif
+};
+
+class same_frame_1_stack_item_frame : public stack_map_frame {
+ private:
+  address type_addr() const { return frame_type_addr() + sizeof(u1); }
+
+  static int frame_type_to_offset_delta(u1 frame_type) {
+      return frame_type - 63; }
+  static u1 offset_delta_to_frame_type(int offset_delta) {
+      return (u1)(offset_delta + 63); }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag >= 64 && tag < 128;
+  }
+
+  static same_frame_1_stack_item_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (same_frame_1_stack_item_frame*)addr;
+  }
+
+  static same_frame_1_stack_item_frame* create_at(
+      address addr, int offset_delta, verification_type_info* vti) {
+    same_frame_1_stack_item_frame* sm = (same_frame_1_stack_item_frame*)addr;
+    sm->set_offset_delta(offset_delta);
+    if (vti != NULL) {
+      sm->set_type(vti);
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(verification_type_info* vti) {
+    return sizeof(u1) + vti->size();
+  }
+
+  static size_t max_size() {
+    return sizeof(u1) + verification_type_info::max_size();
+  }
+
+  size_t size() const { return calculate_size(types()); }
+  int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
+
+  void set_offset_delta(int offset_delta) {
+    assert(offset_delta > 0 && offset_delta <= 64,
+           "Offset too large for this frame type");
+    set_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  void set_type(verification_type_info* vti) {
+    verification_type_info* cur = types();
+    cur->copy_from(vti);
+  }
+
+  int number_of_types() const { return 1; }
+  verification_type_info* types() const {
+    return verification_type_info::at(type_addr());
+  }
+
+  bool is_valid_offset(int offset_delta) const {
+    return is_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  bool verify_subtype(address start, address end) const {
+    return types()->verify(start, end);
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame_1_stack_item_frame(%d,", offset_delta());
+    types()->print_on(st);
+    st->print(")");
+  }
+#endif
+};
+
+class same_frame_1_stack_item_extended : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+  address type_addr() const { return offset_delta_addr() + sizeof(u2); }
+
+  enum { _frame_id = 247 };
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag == _frame_id;
+  }
+
+  static same_frame_1_stack_item_extended* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (same_frame_1_stack_item_extended*)addr;
+  }
+
+  static same_frame_1_stack_item_extended* create_at(
+      address addr, int offset_delta, verification_type_info* vti) {
+    same_frame_1_stack_item_extended* sm =
+       (same_frame_1_stack_item_extended*)addr;
+    sm->set_frame_type(_frame_id);
+    sm->set_offset_delta(offset_delta);
+    if (vti != NULL) {
+      sm->set_type(vti);
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(verification_type_info* vti) {
+    return sizeof(u1) + sizeof(u2) + vti->size();
+  }
+
+  size_t size() const { return calculate_size(types()); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  void set_type(verification_type_info* vti) {
+    verification_type_info* cur = types();
+    cur->copy_from(vti);
+  }
+
+  int number_of_types() const { return 1; }
+  verification_type_info* types() const {
+    return verification_type_info::at(type_addr());
+  }
+  bool is_valid_offset(int offset) { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    return type_addr() < end && types()->verify(start, end);
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame_1_stack_item_extended(%d,", offset_delta());
+    types()->print_on(st);
+    st->print(")");
+  }
+#endif
+};
+
+class chop_frame : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+
+  static int frame_type_to_chops(u1 frame_type) {
+    int chop = 251 - frame_type;
+    return chop;
+  }
+
+  static u1 chops_to_frame_type(int chop) {
+    return 251 - chop;
+  }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return frame_type_to_chops(tag) > 0 && frame_type_to_chops(tag) < 4;
+  }
+
+  static chop_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (chop_frame*)addr;
+  }
+
+  static chop_frame* create_at(address addr, int offset_delta, int chops) {
+    chop_frame* sm = (chop_frame*)addr;
+    sm->set_chops(chops);
+    sm->set_offset_delta(offset_delta);
+    return sm;
+  }
+
+  static size_t calculate_size() {
+    return sizeof(u1) + sizeof(u2);
+  }
+
+  size_t size() const { return calculate_size(); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  int chops() const {
+    int chops = frame_type_to_chops(frame_type());
+    assert(chops > 0 && chops < 4, "Invalid number of chops in frame");
+    return chops;
+  }
+  void set_chops(int chops) {
+    assert(chops > 0 && chops <= 3, "Bad number of chops");
+    set_frame_type(chops_to_frame_type(chops));
+  }
+
+  int number_of_types() const { return 0; }
+  verification_type_info* types() const { return NULL; }
+  bool is_valid_offset(int offset) { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    return frame_type_addr() + size() <= end;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("chop_frame(%d,%d)", offset_delta(), chops());
+  }
+#endif
+};
+
+class append_frame : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+  address types_addr() const { return offset_delta_addr() + sizeof(u2); }
+
+  static int frame_type_to_appends(u1 frame_type) {
+    int append = frame_type - 251;
+    return append;
+  }
+
+  static u1 appends_to_frame_type(int appends) {
+    assert(appends > 0 && appends < 4, "Invalid append amount");
+    return 251 + appends;
+  }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return frame_type_to_appends(tag) > 0 && frame_type_to_appends(tag) < 4;
+  }
+
+  static append_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (append_frame*)addr;
+  }
+
+  static append_frame* create_at(
+      address addr, int offset_delta, int appends,
+      verification_type_info* types) {
+    append_frame* sm = (append_frame*)addr;
+    sm->set_appends(appends);
+    sm->set_offset_delta(offset_delta);
+    if (types != NULL) {
+      verification_type_info* cur = sm->types();
+      for (int i = 0; i < appends; ++i) {
+        cur->copy_from(types);
+        cur = cur->next();
+        types = types->next();
+      }
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(int appends, verification_type_info* types) {
+    size_t sz = sizeof(u1) + sizeof(u2);
+    for (int i = 0; i < appends; ++i) {
+      sz += types->size();
+      types = types->next();
+    }
+    return sz;
+  }
+
+  static size_t max_size() {
+    return sizeof(u1) + sizeof(u2) + 3 * verification_type_info::max_size();
+  }
+
+  size_t size() const { return calculate_size(number_of_types(), types()); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  void set_appends(int appends) {
+    assert(appends > 0 && appends < 4, "Bad number of appends");
+    set_frame_type(appends_to_frame_type(appends));
+  }
+
+  int number_of_types() const {
+    int appends = frame_type_to_appends(frame_type());
+    assert(appends > 0 && appends < 4, "Invalid number of appends in frame");
+    return appends;
+  }
+  verification_type_info* types() const {
+    return verification_type_info::at(types_addr());
+  }
+  bool is_valid_offset(int offset) const { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    verification_type_info* vti = types();
+    if ((address)vti < end && vti->verify(start, end)) {
+      int nof = number_of_types();
+      vti = vti->next();
+      if (nof < 2 || vti->verify(start, end)) {
+        vti = vti->next();
+        if (nof < 3 || vti->verify(start, end)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("append_frame(%d,", offset_delta());
+    verification_type_info* vti = types();
+    for (int i = 0; i < number_of_types(); ++i) {
+      vti->print_on(st);
+      if (i != number_of_types() - 1) {
+        st->print(",");
+      }
+      vti = vti->next();
+    }
+    st->print(")");
+  }
+#endif
+};
+
+class full_frame : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+  address num_locals_addr() const { return offset_delta_addr() + sizeof(u2); }
+  address locals_addr() const { return num_locals_addr() + sizeof(u2); }
+  address stack_slots_addr(address end_of_locals) const {
+      return end_of_locals; }
+  address stack_addr(address end_of_locals) const {
+      return stack_slots_addr(end_of_locals) + sizeof(u2); }
+
+  enum { _frame_id = 255 };
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag == _frame_id;
+  }
+
+  static full_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (full_frame*)addr;
+  }
+
+  static full_frame* create_at(
+      address addr, int offset_delta, int num_locals,
+      verification_type_info* locals,
+      int stack_slots, verification_type_info* stack) {
+    full_frame* sm = (full_frame*)addr;
+    sm->set_frame_type(_frame_id);
+    sm->set_offset_delta(offset_delta);
+    sm->set_num_locals(num_locals);
+    if (locals != NULL) {
+      verification_type_info* cur = sm->locals();
+      for (int i = 0; i < num_locals; ++i) {
+        cur->copy_from(locals);
+        cur = cur->next();
+        locals = locals->next();
+      }
+      address end_of_locals = (address)cur;
+      sm->set_stack_slots(end_of_locals, stack_slots);
+      cur = sm->stack(end_of_locals);
+      for (int i = 0; i < stack_slots; ++i) {
+        cur->copy_from(stack);
+        cur = cur->next();
+        stack = stack->next();
+      }
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(
+      int num_locals, verification_type_info* locals,
+      int stack_slots, verification_type_info* stack) {
+    size_t sz = sizeof(u1) + sizeof(u2) + sizeof(u2) + sizeof(u2);
+    verification_type_info* vti = locals;
+    for (int i = 0; i < num_locals; ++i) {
+      sz += vti->size();
+      vti = vti->next();
+    }
+    vti = stack;
+    for (int i = 0; i < stack_slots; ++i) {
+      sz += vti->size();
+      vti = vti->next();
+    }
+    return sz;
+  }
+
+  static size_t max_size(int locals, int stack) {
+    return sizeof(u1) + 3 * sizeof(u2) +
+        (locals + stack) * verification_type_info::max_size();
+  }
+
+  size_t size() const {
+    address eol = end_of_locals();
+    return calculate_size(num_locals(), locals(), stack_slots(eol), stack(eol));
+  }
+
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+  int num_locals() const { return Bytes::get_Java_u2(num_locals_addr()); }
+  verification_type_info* locals() const {
+    return verification_type_info::at(locals_addr());
+  }
+  address end_of_locals() const {
+    verification_type_info* vti = locals();
+    for (int i = 0; i < num_locals(); ++i) {
+      vti = vti->next();
+    }
+    return (address)vti;
+  }
+  int stack_slots(address end_of_locals) const {
+    return Bytes::get_Java_u2(stack_slots_addr(end_of_locals));
+  }
+  verification_type_info* stack(address end_of_locals) const {
+    return verification_type_info::at(stack_addr(end_of_locals));
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+  void set_num_locals(int num_locals) {
+    Bytes::put_Java_u2(num_locals_addr(), num_locals);
+  }
+  void set_stack_slots(address end_of_locals, int stack_slots) {
+    Bytes::put_Java_u2(stack_slots_addr(end_of_locals), stack_slots);
+  }
+
+  // These return only the locals.  Extra processing is required for stack
+  // types of full frames.
+  int number_of_types() const { return num_locals(); }
+  verification_type_info* types() const { return locals(); }
+  bool is_valid_offset(int offset) { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    verification_type_info* vti = types();
+    if ((address)vti >= end) {
+      return false;
+    }
+    int count = number_of_types();
+    for (int i = 0; i < count; ++i) {
+      if (!vti->verify(start, end)) {
+        return false;
+      }
+      vti = vti->next();
+    }
+    address eol = (address)vti;
+    if (eol + sizeof(u2) > end) {
+      return false;
+    }
+    count = stack_slots(eol);
+    vti = stack(eol);
+    for (int i = 0; i < stack_slots(eol); ++i) {
+      if (!vti->verify(start, end)) {
+        return false;
+      }
+      vti = vti->next();
+    }
+    return true;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("full_frame(%d,{", offset_delta());
+    verification_type_info* vti = locals();
+    for (int i = 0; i < num_locals(); ++i) {
+      vti->print_on(st);
+      if (i != num_locals() - 1) {
+        st->print(",");
+      }
+      vti = vti->next();
+    }
+    st->print("},{");
+    address end_of_locals = (address)vti;
+    vti = stack(end_of_locals);
+    int ss = stack_slots(end_of_locals);
+    for (int i = 0; i < ss; ++i) {
+      vti->print_on(st);
+      if (i != ss - 1) {
+        st->print(",");
+      }
+      vti = vti->next();
+    }
+    st->print("})");
+  }
+#endif
+};
+
+#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
+  stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
+  if (item_##stack_frame_type != NULL) { \
+    return item_##stack_frame_type->func_name args;  \
+  }
+
+#define VOID_VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
+  stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
+  if (item_##stack_frame_type != NULL) { \
+    item_##stack_frame_type->func_name args;  \
+    return; \
+  }
+
+size_t stack_map_frame::size() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, size, ());
+  return 0;
+}
+
+int stack_map_frame::offset_delta() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, offset_delta, ());
+  return 0;
+}
+
+void stack_map_frame::set_offset_delta(int offset_delta) {
+  FOR_EACH_STACKMAP_FRAME_TYPE(
+      VOID_VIRTUAL_DISPATCH, set_offset_delta, (offset_delta));
+}
+
+int stack_map_frame::number_of_types() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, number_of_types, ());
+  return 0;
+}
+
+verification_type_info* stack_map_frame::types() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, types, ());
+  return NULL;
+}
+
+bool stack_map_frame::is_valid_offset(int offset) const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, is_valid_offset, (offset));
+  return true;
+}
+
+bool stack_map_frame::verify(address start, address end) const {
+  if (frame_type_addr() >= start && frame_type_addr() < end) {
+    FOR_EACH_STACKMAP_FRAME_TYPE(
+       VIRTUAL_DISPATCH, verify_subtype, (start, end));
+  }
+  return false;
+}
+
+#ifdef ASSERT
+void stack_map_frame::print_on(outputStream* st) const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st));
+}
+#endif
+
+#undef VIRTUAL_DISPATCH
+#undef VOID_VIRTUAL_DISPATCH
+
+#define AS_SUBTYPE_DEF(stack_frame_type, arg1, arg2) \
+stack_frame_type* stack_map_frame::as_##stack_frame_type() const { \
+  if (stack_frame_type::is_frame_type(frame_type())) { \
+    return (stack_frame_type*)this; \
+  } else { \
+    return NULL; \
+  } \
+}
+
+FOR_EACH_STACKMAP_FRAME_TYPE(AS_SUBTYPE_DEF, x, x)
+#undef AS_SUBTYPE_DEF
+
+class stack_map_table_attribute {
+ private:
+  address name_index_addr() const {
+      return (address)this; }
+  address attribute_length_addr() const {
+      return name_index_addr() + sizeof(u2); }
+  address number_of_entries_addr() const {
+      return attribute_length_addr() + sizeof(u4); }
+  address entries_addr() const {
+      return number_of_entries_addr() + sizeof(u2); }
+
+ protected:
+  // No constructors  - should be 'private', but GCC issues a warning if it is
+  stack_map_table_attribute() {}
+  stack_map_table_attribute(const stack_map_table_attribute&) {}
+
+ public:
+
+  static stack_map_table_attribute* at(address addr) {
+    return (stack_map_table_attribute*)addr;
+  }
+
+  u2 name_index() const {
+       return Bytes::get_Java_u2(name_index_addr()); }
+  u4 attribute_length() const {
+      return Bytes::get_Java_u4(attribute_length_addr()); }
+  u2 number_of_entries() const {
+      return Bytes::get_Java_u2(number_of_entries_addr()); }
+  stack_map_frame* entries() const {
+    return stack_map_frame::at(entries_addr());
+  }
+
+  static size_t header_size() {
+      return sizeof(u2) + sizeof(u4);
+  }
+
+  void set_name_index(u2 idx) {
+    Bytes::put_Java_u2(name_index_addr(), idx);
+  }
+  void set_attribute_length(u4 len) {
+    Bytes::put_Java_u4(attribute_length_addr(), len);
+  }
+  void set_number_of_entries(u2 num) {
+    Bytes::put_Java_u2(number_of_entries_addr(), num);
+  }
+};
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -354,12 +354,8 @@
 double CMSStats::time_until_cms_gen_full() const {
   size_t cms_free = _cms_gen->cmsSpace()->free();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  size_t expected_promotion = gch->get_gen(0)->capacity();
-  if (HandlePromotionFailure) {
-    expected_promotion = MIN2(
-        (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
-        expected_promotion);
-  }
+  size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
+                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
     // for the next minor collection.  Use the padded average as
@@ -865,57 +861,18 @@
   return free() + _virtual_space.uncommitted_size();
 }
 
-bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
-    size_t max_promotion_in_bytes,
-    bool younger_handles_promotion_failure) const {
-
-  // This is the most conservative test.  Full promotion is
-  // guaranteed if this is used. The multiplicative factor is to
-  // account for the worst case "dilatation".
-  double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
-  if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
-    adjusted_max_promo_bytes = (double)max_uintx;
-  }
-  bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
-
-  if (younger_handles_promotion_failure && !result) {
-    // Full promotion is not guaranteed because fragmentation
-    // of the cms generation can prevent the full promotion.
-    result = (max_available() >= (size_t)adjusted_max_promo_bytes);
-
-    if (!result) {
-      // With promotion failure handling the test for the ability
-      // to support the promotion does not have to be guaranteed.
-      // Use an average of the amount promoted.
-      result = max_available() >= (size_t)
-        gc_stats()->avg_promoted()->padded_average();
-      if (PrintGC && Verbose && result) {
-        gclog_or_tty->print_cr(
-          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-          " max_available: " SIZE_FORMAT
-          " avg_promoted: " SIZE_FORMAT,
-          max_available(), (size_t)
-          gc_stats()->avg_promoted()->padded_average());
-      }
-    } else {
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr(
-          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-          " max_available: " SIZE_FORMAT
-          " adj_max_promo_bytes: " SIZE_FORMAT,
-          max_available(), (size_t)adjusted_max_promo_bytes);
-      }
-    }
-  } else {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr(
-        "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-        " contiguous_available: " SIZE_FORMAT
-        " adj_max_promo_bytes: " SIZE_FORMAT,
-        max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
-    }
-  }
-  return result;
+bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
+  size_t available = max_available();
+  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
+  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
+  if (PrintGC && Verbose) {
+    gclog_or_tty->print_cr(
+      "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
+      "max_promo("SIZE_FORMAT")",
+      res? "":" not", available, res? ">=":"<",
+      av_promo, max_promotion_in_bytes);
+  }
+  return res;
 }
 
 // At a promotion failure dump information on block layout in heap
@@ -6091,23 +6048,14 @@
   assert(_collectorState == Resizing, "Change of collector state to"
     " Resizing must be done under the freelistLocks (plural)");
 
-  // Now that sweeping has been completed, if the GCH's
-  // incremental_collection_will_fail flag is set, clear it,
+  // Now that sweeping has been completed, we clear
+  // the incremental_collection_failed flag,
   // thus inviting a younger gen collection to promote into
   // this generation. If such a promotion may still fail,
   // the flag will be set again when a young collection is
   // attempted.
-  // I think the incremental_collection_will_fail flag's use
-  // is specific to a 2 generation collection policy, so i'll
-  // assert that that's the configuration we are operating within.
-  // The use of the flag can and should be generalized appropriately
-  // in the future to deal with a general n-generation system.
-
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
-         "Resetting of incremental_collection_will_fail flag"
-         " may be incorrect otherwise");
-  gch->clear_incremental_collection_will_fail();
+  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
   gch->update_full_collections_completed(_collection_count_start);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1185,8 +1185,7 @@
   virtual void par_promote_alloc_done(int thread_num);
   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
 
-  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
-    bool younger_handles_promotion_failure) const;
+  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
 
   // Inform this (non-young) generation that a promotion failure was
   // encountered during a collection of a younger generation that
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -272,12 +272,16 @@
   }
 }
 
-// Wait until the next synchronous GC or a timeout, whichever is earlier.
-void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
+// Wait until the next synchronous GC, a concurrent full gc request,
+// or a timeout, whichever is earlier.
+void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
   MutexLockerEx x(CGC_lock,
                   Mutex::_no_safepoint_check_flag);
+  if (_should_terminate || _collector->_full_gc_requested) {
+    return;
+  }
   set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
-  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
+  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
   clear_CMS_flag(CMS_cms_wants_token);
   assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
          "Should not be set");
@@ -289,7 +293,8 @@
       icms_wait();
       return;
     } else {
-      // Wait until the next synchronous GC or a timeout, whichever is earlier
+      // Wait until the next synchronous GC, a concurrent full gc
+      // request or a timeout, whichever is earlier.
       wait_on_cms_lock(CMSWaitDuration);
     }
     // Check if we should start a CMS collection cycle
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -120,8 +120,10 @@
   }
 
   // Wait on CMS lock until the next synchronous GC
-  // or given timeout, whichever is earlier.
-  void    wait_on_cms_lock(long t); // milliseconds
+  // or given timeout, whichever is earlier. A timeout value
+  // of 0 indicates that there is no upper bound on the wait time.
+  // A concurrent full gc request terminates the wait.
+  void wait_on_cms_lock(long t_millis);
 
   // The CMS thread will yield during the work portion of its cycle
   // only when requested to.  Both synchronous and asychronous requests
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -2418,6 +2418,8 @@
   for (int i = 0; i < (int)_max_task_num; ++i) {
     OopTaskQueue* queue = _task_queues->queue(i);
     queue->set_empty();
+    // Clear any partial regions from the CMTasks
+    _tasks[i]->clear_aborted_region();
   }
 }
 
@@ -2706,7 +2708,6 @@
   clear_marking_state();
   for (int i = 0; i < (int)_max_task_num; ++i) {
     _tasks[i]->clear_region_fields();
-    _tasks[i]->clear_aborted_region();
   }
   _has_aborted = true;
 
@@ -2985,7 +2986,7 @@
 
   _nextMarkBitMap                = nextMarkBitMap;
   clear_region_fields();
-  clear_aborted_region();
+  assert(_aborted_region.is_empty(), "should have been cleared");
 
   _calls                         = 0;
   _elapsed_time_ms               = 0.0;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -175,7 +175,7 @@
   }
   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
   assert(_array->offset_array(start_card-1) <= N_words,
-    "Offset card has an unexpected value");
+         "Offset card has an unexpected value");
   size_t start_card_for_region = start_card;
   u_char offset = max_jubyte;
   for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
@@ -577,6 +577,16 @@
 #endif
 }
 
+void
+G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
+  assert(_end ==  new_end, "_end should have already been updated");
+
+  // The first BOT entry should have offset 0.
+  _array->set_offset_array(_array->index_for(_bottom), 0);
+  // The rest should point to the first one.
+  set_remainder_to_point_to_start(_bottom + N_words, new_end);
+}
+
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetArrayContigSpace
 //////////////////////////////////////////////////////////////////////
@@ -626,3 +636,12 @@
          "Precondition of call");
   _array->set_offset_array(bottom_index, 0);
 }
+
+void
+G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
+  G1BlockOffsetArray::set_for_starts_humongous(new_end);
+
+  // Make sure _next_offset_threshold and _next_offset_index point to new_end.
+  _next_offset_threshold = new_end;
+  _next_offset_index     = _array->index_for(new_end);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -436,6 +436,8 @@
   }
 
   void check_all_cards(size_t left_card, size_t right_card) const;
+
+  virtual void set_for_starts_humongous(HeapWord* new_end);
 };
 
 // A subtype of BlockOffsetArray that takes advantage of the fact
@@ -484,4 +486,6 @@
 
   HeapWord* block_start_unsafe(const void* addr);
   HeapWord* block_start_unsafe_const(const void* addr) const;
+
+  virtual void set_for_starts_humongous(HeapWord* new_end);
 };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -791,7 +791,7 @@
   int                _worker_i;
 public:
   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
-    _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
+    _cl(g1->g1_rem_set(), worker_i),
     _worker_i(worker_i),
     _g1h(g1)
   { }
@@ -890,7 +890,7 @@
     abandon_cur_alloc_region();
     abandon_gc_alloc_regions();
     assert(_cur_alloc_region == NULL, "Invariant.");
-    g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
+    g1_rem_set()->cleanupHRRS();
     tear_down_region_lists();
     set_used_regions_to_need_zero_fill();
 
@@ -1506,15 +1506,11 @@
   }
 
   // Also create a G1 rem set.
-  if (G1UseHRIntoRS) {
-    if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
-      _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
-    } else {
-      vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
-      return JNI_ENOMEM;
-    }
+  if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
+    _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
   } else {
-    _g1_rem_set = new StupidG1RemSet(this);
+    vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
+    return JNI_ENOMEM;
   }
 
   // Carve out the G1 part of the heap.
@@ -2706,8 +2702,7 @@
 }
 
 size_t G1CollectedHeap::cards_scanned() {
-  HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
-  return g1_rset->cardsScanned();
+  return g1_rem_set()->cardsScanned();
 }
 
 void
@@ -3850,6 +3845,54 @@
                undo_waste() * HeapWordSize / K);
 }
 
+#ifdef ASSERT
+bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
+  assert(ref != NULL, "invariant");
+  assert(UseCompressedOops, "sanity");
+  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
+  oop p = oopDesc::load_decode_heap_oop(ref);
+  assert(_g1h->is_in_g1_reserved(p),
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+  return true;
+}
+
+bool G1ParScanThreadState::verify_ref(oop* ref) const {
+  assert(ref != NULL, "invariant");
+  if (has_partial_array_mask(ref)) {
+    // Must be in the collection set--it's already been copied.
+    oop p = clear_partial_array_mask(ref);
+    assert(_g1h->obj_in_cs(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+  } else {
+    oop p = oopDesc::load_decode_heap_oop(ref);
+    assert(_g1h->is_in_g1_reserved(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+  }
+  return true;
+}
+
+bool G1ParScanThreadState::verify_task(StarTask ref) const {
+  if (ref.is_narrow()) {
+    return verify_ref((narrowOop*) ref);
+  } else {
+    return verify_ref((oop*) ref);
+  }
+}
+#endif // ASSERT
+
+void G1ParScanThreadState::trim_queue() {
+  StarTask ref;
+  do {
+    // Drain the overflow stack first, so other threads can steal.
+    while (refs()->pop_overflow(ref)) {
+      deal_with_reference(ref);
+    }
+    while (refs()->pop_local(ref)) {
+      deal_with_reference(ref);
+    }
+  } while (!refs()->is_empty());
+}
+
 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
   _par_scan_state(par_scan_state) { }
@@ -4052,38 +4095,43 @@
     : _g1h(g1h), _par_scan_state(par_scan_state),
       _queues(queues), _terminator(terminator) {}
 
-  void do_void() {
-    G1ParScanThreadState* pss = par_scan_state();
-    while (true) {
+  void do_void();
+
+private:
+  inline bool offer_termination();
+};
+
+bool G1ParEvacuateFollowersClosure::offer_termination() {
+  G1ParScanThreadState* const pss = par_scan_state();
+  pss->start_term_time();
+  const bool res = terminator()->offer_termination();
+  pss->end_term_time();
+  return res;
+}
+
+void G1ParEvacuateFollowersClosure::do_void() {
+  StarTask stolen_task;
+  G1ParScanThreadState* const pss = par_scan_state();
+  pss->trim_queue();
+
+  do {
+    while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
+      assert(pss->verify_task(stolen_task), "sanity");
+      if (stolen_task.is_narrow()) {
+        pss->deal_with_reference((narrowOop*) stolen_task);
+      } else {
+        pss->deal_with_reference((oop*) stolen_task);
+      }
+
+      // We've just processed a reference and we might have made
+      // available new entries on the queues. So we have to make sure
+      // we drain the queues as necessary.
       pss->trim_queue();
-
-      StarTask stolen_task;
-      if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
-        // slightly paranoid tests; I'm trying to catch potential
-        // problems before we go into push_on_queue to know where the
-        // problem is coming from
-        assert((oop*)stolen_task != NULL, "Error");
-        if (stolen_task.is_narrow()) {
-          assert(UseCompressedOops, "Error");
-          narrowOop* p = (narrowOop*) stolen_task;
-          assert(has_partial_array_mask(p) ||
-                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
-          pss->push_on_queue(p);
-        } else {
-          oop* p = (oop*) stolen_task;
-          assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
-          pss->push_on_queue(p);
-        }
-        continue;
-      }
-      pss->start_term_time();
-      if (terminator()->offer_termination()) break;
-      pss->end_term_time();
     }
-    pss->end_term_time();
-    pss->retire_alloc_buffers();
-  }
-};
+  } while (!offer_termination());
+
+  pss->retire_alloc_buffers();
+}
 
 class G1ParTask : public AbstractGangTask {
 protected:
@@ -4182,8 +4230,7 @@
       pss.print_termination_stats(i);
     }
 
-    assert(pss.refs_to_scan() == 0, "Task queue should be empty");
-    assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
+    assert(pss.refs()->is_empty(), "should be empty");
     double end_time_ms = os::elapsedTime() * 1000.0;
     _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1651,49 +1651,17 @@
   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
   size_t undo_waste() const                      { return _undo_waste; }
 
+#ifdef ASSERT
+  bool verify_ref(narrowOop* ref) const;
+  bool verify_ref(oop* ref) const;
+  bool verify_task(StarTask ref) const;
+#endif // ASSERT
+
   template <class T> void push_on_queue(T* ref) {
-    assert(ref != NULL, "invariant");
-    assert(has_partial_array_mask(ref) ||
-           _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
-#ifdef ASSERT
-    if (has_partial_array_mask(ref)) {
-      oop p = clear_partial_array_mask(ref);
-      // Verify that we point into the CS
-      assert(_g1h->obj_in_cs(p), "Should be in CS");
-    }
-#endif
+    assert(verify_ref(ref), "sanity");
     refs()->push(ref);
   }
 
-  void pop_from_queue(StarTask& ref) {
-    if (refs()->pop_local(ref)) {
-      assert((oop*)ref != NULL, "pop_local() returned true");
-      assert(UseCompressedOops || !ref.is_narrow(), "Error");
-      assert(has_partial_array_mask((oop*)ref) ||
-             _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
-                                                     : oopDesc::load_decode_heap_oop((oop*)ref)),
-              "invariant");
-    } else {
-      StarTask null_task;
-      ref = null_task;
-    }
-  }
-
-  void pop_from_overflow_queue(StarTask& ref) {
-    StarTask new_ref;
-    refs()->pop_overflow(new_ref);
-    assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
-    assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
-    assert(has_partial_array_mask((oop*)new_ref) ||
-           _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
-                                                       : oopDesc::load_decode_heap_oop((oop*)new_ref)),
-           "invariant");
-    ref = new_ref;
-  }
-
-  int refs_to_scan()            { return (int)refs()->size(); }
-  int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
-
   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
     if (G1DeferredRSUpdate) {
       deferred_rs_update(from, p, tid);
@@ -1804,7 +1772,6 @@
     }
   }
 
-private:
   template <class T> void deal_with_reference(T* ref_to_scan) {
     if (has_partial_array_mask(ref_to_scan)) {
       _partial_scan_cl->do_oop_nv(ref_to_scan);
@@ -1818,59 +1785,15 @@
     }
   }
 
-public:
-  void trim_queue() {
-    // I've replicated the loop twice, first to drain the overflow
-    // queue, second to drain the task queue. This is better than
-    // having a single loop, which checks both conditions and, inside
-    // it, either pops the overflow queue or the task queue, as each
-    // loop is tighter. Also, the decision to drain the overflow queue
-    // first is not arbitrary, as the overflow queue is not visible
-    // to the other workers, whereas the task queue is. So, we want to
-    // drain the "invisible" entries first, while allowing the other
-    // workers to potentially steal the "visible" entries.
-
-    while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
-      while (overflowed_refs_to_scan() > 0) {
-        StarTask ref_to_scan;
-        assert((oop*)ref_to_scan == NULL, "Constructed above");
-        pop_from_overflow_queue(ref_to_scan);
-        // We shouldn't have pushed it on the queue if it was not
-        // pointing into the CSet.
-        assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
-        if (ref_to_scan.is_narrow()) {
-          assert(UseCompressedOops, "Error");
-          narrowOop* p = (narrowOop*)ref_to_scan;
-          assert(!has_partial_array_mask(p) &&
-                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
-          deal_with_reference(p);
-        } else {
-          oop* p = (oop*)ref_to_scan;
-          assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
-                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
-          deal_with_reference(p);
-        }
-      }
-
-      while (refs_to_scan() > 0) {
-        StarTask ref_to_scan;
-        assert((oop*)ref_to_scan == NULL, "Constructed above");
-        pop_from_queue(ref_to_scan);
-        if ((oop*)ref_to_scan != NULL) {
-          if (ref_to_scan.is_narrow()) {
-            assert(UseCompressedOops, "Error");
-            narrowOop* p = (narrowOop*)ref_to_scan;
-            assert(!has_partial_array_mask(p) &&
-                    _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
-            deal_with_reference(p);
-          } else {
-            oop* p = (oop*)ref_to_scan;
-            assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
-                   _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
-            deal_with_reference(p);
-          }
-        }
-      }
+  void deal_with_reference(StarTask ref) {
+    assert(verify_task(ref), "sanity");
+    if (ref.is_narrow()) {
+      deal_with_reference((narrowOop*)ref);
+    } else {
+      deal_with_reference((oop*)ref);
     }
   }
+
+public:
+  void trim_queue();
 };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -25,8 +25,6 @@
 class HeapRegion;
 class G1CollectedHeap;
 class G1RemSet;
-class HRInto_G1RemSet;
-class G1RemSet;
 class ConcurrentMark;
 class DirtyCardToOopClosure;
 class CMBitMap;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -97,13 +97,6 @@
   }
 };
 
-void
-StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
-                                            int worker_i) {
-  IntoCSRegionClosure rc(_g1, oc);
-  _g1->heap_region_iterate(&rc);
-}
-
 class VerifyRSCleanCardOopClosure: public OopClosure {
   G1CollectedHeap* _g1;
 public:
@@ -119,8 +112,9 @@
   }
 };
 
-HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
-  : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
+G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
+  : _g1(g1), _conc_refine_cards(0),
+    _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
     _cg1r(g1->concurrent_g1_refine()),
     _traversal_in_progress(false),
     _cset_rs_update_cl(NULL),
@@ -134,7 +128,7 @@
   }
 }
 
-HRInto_G1RemSet::~HRInto_G1RemSet() {
+G1RemSet::~G1RemSet() {
   delete _seq_task;
   for (uint i = 0; i < n_workers(); i++) {
     assert(_cset_rs_update_cl[i] == NULL, "it should be");
@@ -277,7 +271,7 @@
 //          p threads
 // Then thread t will start at region t * floor (n/p)
 
-HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
+HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
   HeapRegion* result = _g1p->collection_set();
   if (ParallelGCThreads > 0) {
     size_t cs_size = _g1p->collection_set_size();
@@ -290,7 +284,7 @@
   return result;
 }
 
-void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
+void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
   double rs_time_start = os::elapsedTime();
   HeapRegion *startRegion = calculateStartRegion(worker_i);
 
@@ -340,7 +334,7 @@
   }
 };
 
-void HRInto_G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
+void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
   double start = os::elapsedTime();
   // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
@@ -439,12 +433,11 @@
   }
 };
 
-void HRInto_G1RemSet::cleanupHRRS() {
+void G1RemSet::cleanupHRRS() {
   HeapRegionRemSet::cleanup();
 }
 
-void
-HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
+void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
                                              int worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
@@ -508,8 +501,7 @@
   _cset_rs_update_cl[worker_i] = NULL;
 }
 
-void HRInto_G1RemSet::
-prepare_for_oops_into_collection_set_do() {
+void G1RemSet::prepare_for_oops_into_collection_set_do() {
 #if G1_REM_SET_LOGGING
   PrintRSClosure cl;
   _g1->collection_set_iterate(&cl);
@@ -581,7 +573,7 @@
     //   RSet updating,
     // * the post-write barrier shouldn't be logging updates to young
     //   regions (but there is a situation where this can happen - see
-    //   the comment in HRInto_G1RemSet::concurrentRefineOneCard below -
+    //   the comment in G1RemSet::concurrentRefineOneCard below -
     //   that should not be applicable here), and
     // * during actual RSet updating, the filtering of cards in young
     //   regions in HeapRegion::oops_on_card_seq_iterate_careful is
@@ -601,7 +593,7 @@
   }
 };
 
-void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
+void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   guarantee( _cards_scanned != NULL, "invariant" );
   _total_cards_scanned = 0;
   for (uint i = 0; i < n_workers(); ++i)
@@ -692,12 +684,12 @@
   }
 };
 
-void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
+void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
   ScrubRSClosure scrub_cl(region_bm, card_bm);
   _g1->heap_region_iterate(&scrub_cl);
 }
 
-void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
+void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
                                 int worker_num, int claim_val) {
   ScrubRSClosure scrub_cl(region_bm, card_bm);
   _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
@@ -741,7 +733,7 @@
   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 };
 
-bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
+bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
                                                    bool check_for_refs_into_cset) {
   // Construct the region representing the card.
   HeapWord* start = _ct_bs->addr_for(card_ptr);
@@ -820,7 +812,7 @@
   return trigger_cl.value();
 }
 
-bool HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
+bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
                                               bool check_for_refs_into_cset) {
   // If the card is no longer dirty, nothing to do.
   if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
@@ -995,7 +987,7 @@
   }
 };
 
-void HRInto_G1RemSet::print_summary_info() {
+void G1RemSet::print_summary_info() {
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 
 #if CARD_REPEAT_HISTO
@@ -1029,30 +1021,26 @@
   g1->concurrent_g1_refine()->threads_do(&p);
   gclog_or_tty->print_cr("");
 
-  if (G1UseHRIntoRS) {
-    HRRSStatsIter blk;
-    g1->heap_region_iterate(&blk);
-    gclog_or_tty->print_cr("  Total heap region rem set sizes = " SIZE_FORMAT "K."
-                           "  Max = " SIZE_FORMAT "K.",
-                           blk.total_mem_sz()/K, blk.max_mem_sz()/K);
-    gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
-                           " free_lists = " SIZE_FORMAT "K.",
-                           HeapRegionRemSet::static_mem_size()/K,
-                           HeapRegionRemSet::fl_mem_size()/K);
-    gclog_or_tty->print_cr("    %d occupied cards represented.",
-                           blk.occupied());
-    gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
-                           ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
-                           blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
-                           (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
-                           (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
-    gclog_or_tty->print_cr("    Did %d coarsenings.",
-                  HeapRegionRemSet::n_coarsenings());
-
-  }
+  HRRSStatsIter blk;
+  g1->heap_region_iterate(&blk);
+  gclog_or_tty->print_cr("  Total heap region rem set sizes = " SIZE_FORMAT "K."
+                         "  Max = " SIZE_FORMAT "K.",
+                         blk.total_mem_sz()/K, blk.max_mem_sz()/K);
+  gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
+                         " free_lists = " SIZE_FORMAT "K.",
+                         HeapRegionRemSet::static_mem_size()/K,
+                         HeapRegionRemSet::fl_mem_size()/K);
+  gclog_or_tty->print_cr("    %d occupied cards represented.",
+                         blk.occupied());
+  gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
+                         ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
+                         blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
+                         (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
+                         (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
+  gclog_or_tty->print_cr("    Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
 }
 
-void HRInto_G1RemSet::prepare_for_verify() {
+void G1RemSet::prepare_for_verify() {
   if (G1HRRSFlushLogBuffersOnVerify &&
       (VerifyBeforeGC || VerifyAfterGC)
       &&  !_g1->full_collection()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -27,107 +27,18 @@
 
 class G1CollectedHeap;
 class CardTableModRefBarrierSet;
-class HRInto_G1RemSet;
 class ConcurrentG1Refine;
 
+// A G1RemSet in which each heap region has a rem set that records the
+// external heap references into it.  Uses a mod ref bs to track updates,
+// so that they can be used to update the individual region remsets.
+
 class G1RemSet: public CHeapObj {
 protected:
   G1CollectedHeap* _g1;
   unsigned _conc_refine_cards;
   size_t n_workers();
 
-public:
-  G1RemSet(G1CollectedHeap* g1) :
-    _g1(g1), _conc_refine_cards(0)
-  {}
-
-  // Invoke "blk->do_oop" on all pointers into the CS in object in regions
-  // outside the CS (having invoked "blk->set_region" to set the "from"
-  // region correctly beforehand.) The "worker_i" param is for the
-  // parallel case where the number of the worker thread calling this
-  // function can be helpful in partitioning the work to be done. It
-  // should be the same as the "i" passed to the calling thread's
-  // work(i) function. In the sequential case this param will be ingored.
-  virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
-                                           int worker_i) = 0;
-
-  // Prepare for and cleanup after an oops_into_collection_set_do
-  // call.  Must call each of these once before and after (in sequential
-  // code) any threads call oops into collection set do.  (This offers an
-  // opportunity to sequential setup and teardown of structures needed by a
-  // parallel iteration over the CS's RS.)
-  virtual void prepare_for_oops_into_collection_set_do() = 0;
-  virtual void cleanup_after_oops_into_collection_set_do() = 0;
-
-  // If "this" is of the given subtype, return "this", else "NULL".
-  virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
-
-  // Record, if necessary, the fact that *p (where "p" is in region "from",
-  // and is, a fortiori, required to be non-NULL) has changed to its new value.
-  virtual void write_ref(HeapRegion* from, oop* p) = 0;
-  virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
-  virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
-  virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
-
-  // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
-  // or card, respectively, such that a region or card with a corresponding
-  // 0 bit contains no part of any live object.  Eliminates any remembered
-  // set entries that correspond to dead heap ranges.
-  virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
-  // Like the above, but assumes is called in parallel: "worker_num" is the
-  // parallel thread id of the current thread, and "claim_val" is the
-  // value that should be used to claim heap regions.
-  virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
-                         int worker_num, int claim_val) = 0;
-
-  // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
-  // join and leave around parts that must be atomic wrt GC.  (NULL means
-  // being done at a safepoint.)
-  // With some implementations of this routine, when check_for_refs_into_cset
-  // is true, a true result may be returned if the given card contains oops
-  // that have references into the current collection set.
-  virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
-                                       bool check_for_refs_into_cset) {
-    return false;
-  }
-
-  // Print any relevant summary info.
-  virtual void print_summary_info() {}
-
-  // Prepare remebered set for verification.
-  virtual void prepare_for_verify() {};
-};
-
-
-// The simplest possible G1RemSet: iterates over all objects in non-CS
-// regions, searching for pointers into the CS.
-class StupidG1RemSet: public G1RemSet {
-public:
-  StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
-
-  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
-                                   int worker_i);
-
-  void prepare_for_oops_into_collection_set_do() {}
-  void cleanup_after_oops_into_collection_set_do() {}
-
-  // Nothing is necessary in the version below.
-  void write_ref(HeapRegion* from, oop* p) {}
-  void write_ref(HeapRegion* from, narrowOop* p) {}
-  void par_write_ref(HeapRegion* from, oop* p, int tid) {}
-  void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
-
-  void scrub(BitMap* region_bm, BitMap* card_bm) {}
-  void scrub_par(BitMap* region_bm, BitMap* card_bm,
-                 int worker_num, int claim_val) {}
-
-};
-
-// A G1RemSet in which each heap region has a rem set that records the
-// external heap references into it.  Uses a mod ref bs to track updates,
-// so that they can be used to update the individual region remsets.
-
-class HRInto_G1RemSet: public G1RemSet {
 protected:
   enum SomePrivateConstants {
     UpdateRStoMergeSync  = 0,
@@ -175,28 +86,32 @@
   // scanned.
   void cleanupHRRS();
 
-  HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
-  ~HRInto_G1RemSet();
+  G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
+  ~G1RemSet();
 
+  // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
+  // outside the CS (having invoked "blk->set_region" to set the "from"
+  // region correctly beforehand.) The "worker_i" param is for the
+  // parallel case where the number of the worker thread calling this
+  // function can be helpful in partitioning the work to be done. It
+  // should be the same as the "i" passed to the calling thread's
+  // work(i) function. In the sequential case this param will be ingored.
   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
                                    int worker_i);
 
+  // Prepare for and cleanup after an oops_into_collection_set_do
+  // call.  Must call each of these once before and after (in sequential
+  // code) any threads call oops_into_collection_set_do.  (This offers an
+  // opportunity to sequential setup and teardown of structures needed by a
+  // parallel iteration over the CS's RS.)
   void prepare_for_oops_into_collection_set_do();
   void cleanup_after_oops_into_collection_set_do();
+
   void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
-  template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
-  void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
-    if (UseCompressedOops) {
-      scanNewRefsRS_work<narrowOop>(oc, worker_i);
-    } else {
-      scanNewRefsRS_work<oop>(oc, worker_i);
-    }
-  }
   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
+
   HeapRegion* calculateStartRegion(int i);
 
-  HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
-
   CardTableModRefBS* ct_bs() { return _ct_bs; }
   size_t cardsScanned() { return _total_cards_scanned; }
 
@@ -219,17 +134,31 @@
 
   bool self_forwarded(oop obj);
 
+  // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
+  // or card, respectively, such that a region or card with a corresponding
+  // 0 bit contains no part of any live object.  Eliminates any remembered
+  // set entries that correspond to dead heap ranges.
   void scrub(BitMap* region_bm, BitMap* card_bm);
+
+  // Like the above, but assumes is called in parallel: "worker_num" is the
+  // parallel thread id of the current thread, and "claim_val" is the
+  // value that should be used to claim heap regions.
   void scrub_par(BitMap* region_bm, BitMap* card_bm,
                  int worker_num, int claim_val);
 
-  // If check_for_refs_into_cset is true then a true result is returned
-  // if the card contains oops that have references into the current
-  // collection set.
+  // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
+  // join and leave around parts that must be atomic wrt GC.  (NULL means
+  // being done at a safepoint.)
+  // If check_for_refs_into_cset is true, a true result is returned
+  // if the given card contains oops that have references into the
+  // current collection set.
   virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
                                        bool check_for_refs_into_cset);
 
+  // Print any relevant summary info.
   virtual void print_summary_info();
+
+  // Prepare remembered set for verification.
   virtual void prepare_for_verify();
 };
 
@@ -250,13 +179,13 @@
 
 class UpdateRSOopClosure: public OopClosure {
   HeapRegion* _from;
-  HRInto_G1RemSet* _rs;
+  G1RemSet* _rs;
   int _worker_i;
 
   template <class T> void do_oop_work(T* p);
 
 public:
-  UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
+  UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
     _from(NULL), _rs(rs), _worker_i(worker_i) {
     guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -30,16 +30,18 @@
   }
 }
 
-template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
+template <class T>
+inline void G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
   par_write_ref_nv(from, p, 0);
 }
 
-inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
+inline bool G1RemSet::self_forwarded(oop obj) {
   bool result =  (obj->is_forwarded() && (obj->forwardee()== obj));
   return result;
 }
 
-template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
+template <class T>
+inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
   oop obj = oopDesc::load_decode_heap_oop(p);
 #ifdef ASSERT
   // can't do because of races
@@ -77,7 +79,7 @@
       // Deferred updates to the CSet are either discarded (in the normal case),
       // or processed (if an evacuation failure occurs) at the end
       // of the collection.
-      // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
+      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
     } else {
 #if G1_REM_SET_LOGGING
       gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
@@ -91,12 +93,14 @@
   }
 }
 
-template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
+template <class T>
+inline void UpdateRSOopClosure::do_oop_work(T* p) {
   assert(_from != NULL, "from region must be non-NULL");
   _rs->par_write_ref(_from, p, _worker_i);
 }
 
-template <class T> inline void UpdateRSetImmediate::do_oop_work(T* p) {
+template <class T>
+inline void UpdateRSetImmediate::do_oop_work(T* p) {
   assert(_from->is_in_reserved(p), "paranoia");
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -40,9 +40,6 @@
   develop(intx, G1PolicyVerbose, 0,                                         \
           "The verbosity level on G1 policy decisions")                     \
                                                                             \
-  develop(bool, G1UseHRIntoRS, true,                                        \
-          "Determines whether the 'advanced' HR Into rem set is used.")     \
-                                                                            \
   develop(intx, G1MarkingVerboseLevel, 0,                                   \
           "Level (0-4) of verboseness of the marking code")                 \
                                                                             \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -377,10 +377,26 @@
 }
 // </PREDICTION>
 
-void HeapRegion::set_startsHumongous() {
+void HeapRegion::set_startsHumongous(HeapWord* new_end) {
+  assert(end() == _orig_end,
+         "Should be normal before the humongous object allocation");
+  assert(top() == bottom(), "should be empty");
+
   _humongous_type = StartsHumongous;
   _humongous_start_region = this;
-  assert(end() == _orig_end, "Should be normal before alloc.");
+
+  set_end(new_end);
+  _offsets.set_for_starts_humongous(new_end);
+}
+
+void HeapRegion::set_continuesHumongous(HeapRegion* start) {
+  assert(end() == _orig_end,
+         "Should be normal before the humongous object allocation");
+  assert(top() == bottom(), "should be empty");
+  assert(start->startsHumongous(), "pre-condition");
+
+  _humongous_type = ContinuesHumongous;
+  _humongous_start_region = start;
 }
 
 bool HeapRegion::claimHeapRegion(jint claimValue) {
@@ -500,23 +516,6 @@
   return blk.result();
 }
 
-void HeapRegion::set_continuesHumongous(HeapRegion* start) {
-  // The order is important here.
-  start->add_continuingHumongousRegion(this);
-  _humongous_type = ContinuesHumongous;
-  _humongous_start_region = start;
-}
-
-void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
-  // Must join the blocks of the current H region seq with the block of the
-  // added region.
-  offsets()->join_blocks(bottom(), cont->bottom());
-  arrayOop obj = (arrayOop)(bottom());
-  obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
-  set_end(cont->end());
-  set_top(cont->end());
-}
-
 void HeapRegion::save_marks() {
   set_saved_mark();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -395,14 +395,12 @@
 
   // Causes the current region to represent a humongous object spanning "n"
   // regions.
-  virtual void set_startsHumongous();
+  void set_startsHumongous(HeapWord* new_end);
 
   // The regions that continue a humongous sequence should be added using
   // this method, in increasing address order.
   void set_continuesHumongous(HeapRegion* start);
 
-  void add_continuingHumongousRegion(HeapRegion* cont);
-
   // If the region has a remembered set, return a pointer to it.
   HeapRegionRemSet* rem_set() const {
     return _rem_set;
@@ -733,13 +731,6 @@
                                    FilterOutOfRegionClosure* cl,
                                    bool filter_young);
 
-  // The region "mr" is entirely in "this", and starts and ends at block
-  // boundaries. The caller declares that all the contained blocks are
-  // coalesced into one.
-  void declare_filled_region_to_BOT(MemRegion mr) {
-    _offsets.single_block(mr.start(), mr.end());
-  }
-
   // A version of block start that is guaranteed to find *some* block
   // boundary at or before "p", but does not object iteration, and may
   // therefore be used safely when the heap is unparseable.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -1159,9 +1159,7 @@
   _hrrs(NULL),
   _g1h(G1CollectedHeap::heap()),
   _bosa(NULL),
-  _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start())
-               >> CardTableModRefBS::card_shift)
-{}
+  _sparse_iter() { }
 
 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
   _hrrs = hrrs;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -91,34 +91,118 @@
   }
   if (sumSizes >= word_size) {
     _alloc_search_start = cur;
-    // Mark the allocated regions as allocated.
+
+    // We need to initialize the region(s) we just discovered. This is
+    // a bit tricky given that it can happen concurrently with
+    // refinement threads refining cards on these regions and
+    // potentially wanting to refine the BOT as they are scanning
+    // those cards (this can happen shortly after a cleanup; see CR
+    // 6991377). So we have to set up the region(s) carefully and in
+    // a specific order.
+
+    // Currently, allocs_are_zero_filled() returns false. The zero
+    // filling infrastructure will be going away soon (see CR 6977804).
+    // So no need to do anything else here.
     bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
+    assert(!zf, "not supported");
+
+    // This will be the "starts humongous" region.
     HeapRegion* first_hr = _regions.at(first);
-    for (int i = first; i < cur; i++) {
-      HeapRegion* hr = _regions.at(i);
-      if (zf)
-        hr->ensure_zero_filled();
+    {
+      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
+      first_hr->set_zero_fill_allocated();
+    }
+    // The header of the new object will be placed at the bottom of
+    // the first region.
+    HeapWord* new_obj = first_hr->bottom();
+    // This will be the new end of the first region in the series that
+    // should also match the end of the last region in the seriers.
+    // (Note: sumSizes = "region size" x "number of regions we found").
+    HeapWord* new_end = new_obj + sumSizes;
+    // This will be the new top of the first region that will reflect
+    // this allocation.
+    HeapWord* new_top = new_obj + word_size;
+
+    // First, we need to zero the header of the space that we will be
+    // allocating. When we update top further down, some refinement
+    // threads might try to scan the region. By zeroing the header we
+    // ensure that any thread that will try to scan the region will
+    // come across the zero klass word and bail out.
+    //
+    // NOTE: It would not have been correct to have used
+    // CollectedHeap::fill_with_object() and make the space look like
+    // an int array. The thread that is doing the allocation will
+    // later update the object header to a potentially different array
+    // type and, for a very short period of time, the klass and length
+    // fields will be inconsistent. This could cause a refinement
+    // thread to calculate the object size incorrectly.
+    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
+
+    // We will set up the first region as "starts humongous". This
+    // will also update the BOT covering all the regions to reflect
+    // that there is a single object that starts at the bottom of the
+    // first region.
+    first_hr->set_startsHumongous(new_end);
+
+    // Then, if there are any, we will set up the "continues
+    // humongous" regions.
+    HeapRegion* hr = NULL;
+    for (int i = first + 1; i < cur; ++i) {
+      hr = _regions.at(i);
       {
         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
         hr->set_zero_fill_allocated();
       }
-      size_t sz = hr->capacity() / HeapWordSize;
-      HeapWord* tmp = hr->allocate(sz);
-      assert(tmp != NULL, "Humongous allocation failure");
-      MemRegion mr = MemRegion(tmp, sz);
-      CollectedHeap::fill_with_object(mr);
-      hr->declare_filled_region_to_BOT(mr);
-      if (i == first) {
-        first_hr->set_startsHumongous();
+      hr->set_continuesHumongous(first_hr);
+    }
+    // If we have "continues humongous" regions (hr != NULL), then the
+    // end of the last one should match new_end.
+    assert(hr == NULL || hr->end() == new_end, "sanity");
+
+    // Up to this point no concurrent thread would have been able to
+    // do any scanning on any region in this series. All the top
+    // fields still point to bottom, so the intersection between
+    // [bottom,top] and [card_start,card_end] will be empty. Before we
+    // update the top fields, we'll do a storestore to make sure that
+    // no thread sees the update to top before the zeroing of the
+    // object header and the BOT initialization.
+    OrderAccess::storestore();
+
+    // Now that the BOT and the object header have been initialized,
+    // we can update top of the "starts humongous" region.
+    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
+           "new_top should be in this region");
+    first_hr->set_top(new_top);
+
+    // Now, we will update the top fields of the "continues humongous"
+    // regions. The reason we need to do this is that, otherwise,
+    // these regions would look empty and this will confuse parts of
+    // G1. For example, the code that looks for a consecutive number
+    // of empty regions will consider them empty and try to
+    // re-allocate them. We can extend is_empty() to also include
+    // !continuesHumongous(), but it is easier to just update the top
+    // fields here.
+    hr = NULL;
+    for (int i = first + 1; i < cur; ++i) {
+      hr = _regions.at(i);
+      if ((i + 1) == cur) {
+        // last continues humongous region
+        assert(hr->bottom() < new_top && new_top <= hr->end(),
+               "new_top should fall on this region");
+        hr->set_top(new_top);
       } else {
-        assert(i > first, "sanity");
-        hr->set_continuesHumongous(first_hr);
+        // not last one
+        assert(new_top > hr->end(), "new_top should be above this region");
+        hr->set_top(hr->end());
       }
     }
-    HeapWord* first_hr_bot = first_hr->bottom();
-    HeapWord* obj_end = first_hr_bot + word_size;
-    first_hr->set_top(obj_end);
-    return first_hr_bot;
+    // If we have continues humongous regions (hr != NULL), then the
+    // end of the last one should match new_end and its top should
+    // match new_top.
+    assert(hr == NULL ||
+           (hr->end() == new_end && hr->top() == new_top), "sanity");
+
+    return new_obj;
   } else {
     // If we started from the beginning, we want to know why we can't alloc.
     return NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -308,7 +308,7 @@
   assert(e2->num_valid_cards() > 0, "Postcondition.");
 }
 
-CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
+CardIdx_t RSHashTableIter::find_first_card_in_list() {
   CardIdx_t res;
   while (_bl_ind != RSHashTable::NullEntry) {
     res = _rsht->entry(_bl_ind)->card(0);
@@ -322,14 +322,11 @@
   return SparsePRTEntry::NullEntry;
 }
 
-size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
-  return
-    _heap_bot_card_ind
-    + (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
-    + ci;
+size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
+  return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
 }
 
-bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
+bool RSHashTableIter::has_next(size_t& card_index) {
   _card_ind++;
   CardIdx_t ci;
   if (_card_ind < SparsePRTEntry::cards_num() &&
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -169,7 +169,6 @@
   int _bl_ind;          // [-1, 0.._rsht->_capacity)
   short _card_ind;      // [0..SparsePRTEntry::cards_num())
   RSHashTable* _rsht;
-  size_t _heap_bot_card_ind;
 
   // If the bucket list pointed to by _bl_ind contains a card, sets
   // _bl_ind to the index of that entry, and returns the card.
@@ -183,13 +182,11 @@
   size_t compute_card_ind(CardIdx_t ci);
 
 public:
-  RSHashTableIter(size_t heap_bot_card_ind) :
+  RSHashTableIter() :
     _tbl_ind(RSHashTable::NullEntry),
     _bl_ind(RSHashTable::NullEntry),
     _card_ind((SparsePRTEntry::cards_num() - 1)),
-    _rsht(NULL),
-    _heap_bot_card_ind(heap_bot_card_ind)
-  {}
+    _rsht(NULL) {}
 
   void init(RSHashTable* rsht) {
     _rsht = rsht;
@@ -280,20 +277,11 @@
   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
     return _next->contains_card(region_id, card_index);
   }
-
-#if 0
-  void verify_is_cleared();
-  void print();
-#endif
 };
 
 
-class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter {
+class SparsePRTIter: public RSHashTableIter {
 public:
-  SparsePRTIter(size_t heap_bot_card_ind) :
-    /* RSHashTable:: */RSHashTableIter(heap_bot_card_ind)
-  {}
-
   void init(const SparsePRT* sprt) {
     RSHashTableIter::init(sprt->cur());
   }
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Sat Nov 13 18:40:37 2010 -0800
@@ -310,10 +310,16 @@
 
 heapRegionSeq.inline.hpp                heapRegionSeq.hpp
 
+instanceKlass.cpp                       g1RemSet.inline.hpp
+
+instanceRefKlass.cpp                    g1RemSet.inline.hpp
+
 klass.hpp				g1OopClosures.hpp
 
 memoryService.cpp                       g1MemoryPool.hpp
 
+objArrayKlass.cpp                       g1RemSet.inline.hpp
+
 ptrQueue.cpp                            allocation.hpp
 ptrQueue.cpp                            allocation.inline.hpp
 ptrQueue.cpp                            mutex.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -846,7 +846,7 @@
   // from this generation, pass on collection; let the next generation
   // do it.
   if (!collection_attempt_is_safe()) {
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
     return;
   }
   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@@ -935,8 +935,6 @@
 
     assert(to()->is_empty(), "to space should be empty now");
   } else {
-    assert(HandlePromotionFailure,
-      "Should only be here if promotion failure handling is on");
     assert(_promo_failure_scan_stack.is_empty(), "post condition");
     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -947,7 +945,7 @@
     // All the spaces are in play for mark-sweep.
     swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
     from()->set_next_compaction_space(to());
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed();
     // Inform the next generation that a promotion failure occurred.
     _next_gen->promotion_failure_occurred();
 
@@ -1092,11 +1090,6 @@
                                        old, m, sz);
 
     if (new_obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
-        // is incorrectly set. In any case, its seriously wrong to be here!
-        vm_exit_out_of_memory(sz*wordSize, "promotion");
-      }
       // promotion failed, forward to self
       _promotion_failed = true;
       new_obj = old;
@@ -1206,12 +1199,6 @@
                                        old, m, sz);
 
     if (new_obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio
-        // flag is incorrectly set. In any case, its seriously wrong to be
-        // here!
-        vm_exit_out_of_memory(sz*wordSize, "promotion");
-      }
       // promotion failed, forward to self
       forward_ptr = old->forward_to_atomic(old);
       new_obj = old;
--- a/hotspot/src/share/vm/includeDB_compiler1	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_compiler1	Sat Nov 13 18:40:37 2010 -0800
@@ -301,6 +301,7 @@
 c1_MacroAssembler.hpp                   assembler_<arch>.inline.hpp
 
 c1_MacroAssembler_<arch>.cpp            arrayOop.hpp
+c1_MacroAssembler_<arch>.cpp            basicLock.hpp
 c1_MacroAssembler_<arch>.cpp            biasedLocking.hpp
 c1_MacroAssembler_<arch>.cpp            c1_MacroAssembler.hpp
 c1_MacroAssembler_<arch>.cpp            c1_Runtime1.hpp
@@ -309,7 +310,6 @@
 c1_MacroAssembler_<arch>.cpp            markOop.hpp
 c1_MacroAssembler_<arch>.cpp            os.hpp
 c1_MacroAssembler_<arch>.cpp            stubRoutines.hpp
-c1_MacroAssembler_<arch>.cpp            synchronizer.hpp
 c1_MacroAssembler_<arch>.cpp            systemDictionary.hpp
 
 c1_MacroAssembler_<arch>.hpp            generate_platform_dependent_include
--- a/hotspot/src/share/vm/includeDB_core	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_core	Sat Nov 13 18:40:37 2010 -0800
@@ -300,10 +300,17 @@
 barrierSet.inline.hpp                   barrierSet.hpp
 barrierSet.inline.hpp                   cardTableModRefBS.hpp
 
+basicLock.cpp                           basicLock.hpp
+basicLock.cpp                           synchronizer.hpp
+
+basicLock.hpp                           handles.hpp
+basicLock.hpp                           markOop.hpp
+basicLock.hpp                           top.hpp
+
+biasedLocking.cpp                       basicLock.hpp
 biasedLocking.cpp                       biasedLocking.hpp
 biasedLocking.cpp                       klass.inline.hpp
 biasedLocking.cpp                       markOop.hpp
-biasedLocking.cpp                       synchronizer.hpp
 biasedLocking.cpp                       task.hpp
 biasedLocking.cpp                       vframe.hpp
 biasedLocking.cpp                       vmThread.hpp
@@ -404,13 +411,13 @@
 bytecodeInterpreterWithChecks.cpp       bytecodeInterpreter.cpp
 
 bytecodeInterpreter.hpp                 allocation.hpp
+bytecodeInterpreter.hpp                 basicLock.hpp
 bytecodeInterpreter.hpp                 bytes_<arch>.hpp
 bytecodeInterpreter.hpp                 frame.hpp
 bytecodeInterpreter.hpp                 globalDefinitions.hpp
 bytecodeInterpreter.hpp                 globals.hpp
 bytecodeInterpreter.hpp                 methodDataOop.hpp
 bytecodeInterpreter.hpp                 methodOop.hpp
-bytecodeInterpreter.hpp                 synchronizer.hpp
 
 bytecodeInterpreter.inline.hpp          bytecodeInterpreter.hpp
 bytecodeInterpreter.inline.hpp          stubRoutines.hpp
@@ -1667,10 +1674,10 @@
 frame.cpp                               universe.inline.hpp
 
 frame.hpp                               assembler.hpp
+frame.hpp                               basicLock.hpp
 frame.hpp                               methodOop.hpp
 frame.hpp                               monitorChunk.hpp
 frame.hpp                               registerMap.hpp
-frame.hpp                               synchronizer.hpp
 frame.hpp                               top.hpp
 
 frame.inline.hpp                        bytecodeInterpreter.hpp
@@ -2120,6 +2127,7 @@
 interfaceSupport_<os_family>.hpp        generate_platform_dependent_include
 
 interp_masm_<arch_model>.cpp            arrayOop.hpp
+interp_masm_<arch_model>.cpp            basicLock.hpp
 interp_masm_<arch_model>.cpp            biasedLocking.hpp
 interp_masm_<arch_model>.cpp            interp_masm_<arch_model>.hpp
 interp_masm_<arch_model>.cpp            interpreterRuntime.hpp
@@ -2131,7 +2139,6 @@
 interp_masm_<arch_model>.cpp            methodDataOop.hpp
 interp_masm_<arch_model>.cpp            methodOop.hpp
 interp_masm_<arch_model>.cpp            sharedRuntime.hpp
-interp_masm_<arch_model>.cpp            synchronizer.hpp
 interp_masm_<arch_model>.cpp            thread_<os_family>.inline.hpp
 
 interp_masm_<arch_model>.hpp            assembler_<arch>.inline.hpp
@@ -3094,25 +3101,26 @@
 
 objArrayOop.hpp                         arrayOop.hpp
 
+objectMonitor.cpp                       dtrace.hpp
+objectMonitor.cpp                       handles.inline.hpp
+objectMonitor.cpp                       interfaceSupport.hpp
+objectMonitor.cpp                       markOop.hpp
+objectMonitor.cpp                       mutexLocker.hpp
+objectMonitor.cpp                       objectMonitor.hpp
+objectMonitor.cpp                       objectMonitor.inline.hpp
+objectMonitor.cpp                       oop.inline.hpp
+objectMonitor.cpp                       osThread.hpp
+objectMonitor.cpp                       os_<os_family>.inline.hpp
+objectMonitor.cpp                       preserveException.hpp
+objectMonitor.cpp                       resourceArea.hpp
+objectMonitor.cpp                       stubRoutines.hpp
+objectMonitor.cpp                       thread.hpp
+objectMonitor.cpp                       thread_<os_family>.inline.hpp
+objectMonitor.cpp                       threadService.hpp
+objectMonitor.cpp                       vmSymbols.hpp
+
 objectMonitor.hpp                       os.hpp
-
-objectMonitor_<os_family>.cpp           dtrace.hpp
-objectMonitor_<os_family>.cpp           interfaceSupport.hpp
-objectMonitor_<os_family>.cpp           objectMonitor.hpp
-objectMonitor_<os_family>.cpp           objectMonitor.inline.hpp
-objectMonitor_<os_family>.cpp           oop.inline.hpp
-objectMonitor_<os_family>.cpp           osThread.hpp
-objectMonitor_<os_family>.cpp           os_<os_family>.inline.hpp
-objectMonitor_<os_family>.cpp           threadService.hpp
-objectMonitor_<os_family>.cpp           thread_<os_family>.inline.hpp
-objectMonitor_<os_family>.cpp           vmSymbols.hpp
-
-objectMonitor_<os_family>.hpp           generate_platform_dependent_include
-objectMonitor_<os_family>.hpp           os_<os_family>.inline.hpp
-objectMonitor_<os_family>.hpp           thread_<os_family>.inline.hpp
-objectMonitor_<os_family>.hpp           top.hpp
-
-objectMonitor_<os_family>.inline.hpp    generate_platform_dependent_include
+objectMonitor.hpp                       perfData.hpp
 
 oop.cpp                                 copy.hpp
 oop.cpp                                 handles.inline.hpp
@@ -3231,6 +3239,7 @@
 orderAccess.hpp                         os.hpp
 
 orderAccess_<os_arch>.inline.hpp        orderAccess.hpp
+orderAccess_<os_arch>.inline.hpp        vm_version_<arch>.hpp
 
 os.cpp                                  allocation.inline.hpp
 os.cpp                                  arguments.hpp
@@ -3328,7 +3337,6 @@
 os_<os_family>.cpp                      nativeInst_<arch>.hpp
 os_<os_family>.cpp                      no_precompiled_headers
 os_<os_family>.cpp                      objectMonitor.hpp
-os_<os_family>.cpp                      objectMonitor.inline.hpp
 os_<os_family>.cpp                      oop.inline.hpp
 os_<os_family>.cpp                      osThread.hpp
 os_<os_family>.cpp                      os_share_<os_family>.hpp
@@ -3388,6 +3396,12 @@
 ostream.hpp                             allocation.hpp
 ostream.hpp                             timer.hpp
 
+// include thread.hpp to prevent cyclic includes
+park.cpp                                thread.hpp
+
+park.hpp                                debug.hpp
+park.hpp                                globalDefinitions.hpp
+
 pcDesc.cpp                              debugInfoRec.hpp
 pcDesc.cpp                              nmethod.hpp
 pcDesc.cpp                              pcDesc.hpp
@@ -3600,7 +3614,9 @@
 relocator.cpp                           bytecodes.hpp
 relocator.cpp                           handles.inline.hpp
 relocator.cpp                           oop.inline.hpp
+relocator.cpp                           oopFactory.hpp
 relocator.cpp                           relocator.hpp
+relocator.cpp                           stackMapTableFormat.hpp
 relocator.cpp                           universe.inline.hpp
 
 relocator.hpp                           bytecodes.hpp
@@ -3907,6 +3923,8 @@
 stackMapTable.hpp                       methodOop.hpp
 stackMapTable.hpp                       stackMapFrame.hpp
 
+stackMapTableFormat.hpp                 verificationType.hpp
+
 stackValue.cpp                          debugInfo.hpp
 stackValue.cpp                          frame.inline.hpp
 stackValue.cpp                          handles.inline.hpp
@@ -4062,10 +4080,10 @@
 synchronizer.cpp                        resourceArea.hpp
 synchronizer.cpp                        stubRoutines.hpp
 synchronizer.cpp                        synchronizer.hpp
-synchronizer.cpp                        threadService.hpp
 synchronizer.cpp                        thread_<os_family>.inline.hpp
 synchronizer.cpp                        vmSymbols.hpp
 
+synchronizer.hpp                        basicLock.hpp
 synchronizer.hpp                        handles.hpp
 synchronizer.hpp                        markOop.hpp
 synchronizer.hpp                        perfData.hpp
@@ -4237,7 +4255,6 @@
 thread.cpp                              mutexLocker.hpp
 thread.cpp                              objArrayOop.hpp
 thread.cpp                              objectMonitor.hpp
-thread.cpp                              objectMonitor.inline.hpp
 thread.cpp                              oop.inline.hpp
 thread.cpp                              oopFactory.hpp
 thread.cpp                              osThread.hpp
@@ -4275,6 +4292,7 @@
 thread.hpp                              oop.hpp
 thread.hpp                              os.hpp
 thread.hpp                              osThread.hpp
+thread.hpp                              park.hpp
 thread.hpp                              safepoint.hpp
 thread.hpp                              stubRoutines.hpp
 thread.hpp                              threadLocalAllocBuffer.hpp
@@ -4586,6 +4604,7 @@
 vframeArray.hpp                         growableArray.hpp
 vframeArray.hpp                         monitorChunk.hpp
 
+vframe_hp.cpp                           basicLock.hpp
 vframe_hp.cpp                           codeCache.hpp
 vframe_hp.cpp                           debugInfoRec.hpp
 vframe_hp.cpp                           handles.inline.hpp
@@ -4599,7 +4618,6 @@
 vframe_hp.cpp                           scopeDesc.hpp
 vframe_hp.cpp                           signature.hpp
 vframe_hp.cpp                           stubRoutines.hpp
-vframe_hp.cpp                           synchronizer.hpp
 vframe_hp.cpp                           vframeArray.hpp
 vframe_hp.cpp                           vframe_hp.hpp
 
@@ -4751,6 +4769,7 @@
 workgroup.cpp                           workgroup.hpp
 
 workgroup.hpp                           taskqueue.hpp
+
 workgroup.hpp                           thread_<os_family>.inline.hpp
 
 xmlstream.cpp                           allocation.hpp
--- a/hotspot/src/share/vm/includeDB_features	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_features	Sat Nov 13 18:40:37 2010 -0800
@@ -184,6 +184,13 @@
 jvmtiImpl.hpp                           systemDictionary.hpp
 jvmtiImpl.hpp                           vm_operations.hpp
 
+jvmtiRawMonitor.cpp                     interfaceSupport.hpp
+jvmtiRawMonitor.cpp                     jvmtiRawMonitor.hpp
+jvmtiRawMonitor.cpp                     thread.hpp
+
+jvmtiRawMonitor.hpp                     growableArray.hpp
+jvmtiRawMonitor.hpp                     objectMonitor.hpp
+
 jvmtiTagMap.cpp                         biasedLocking.hpp
 jvmtiTagMap.cpp                         javaCalls.hpp
 jvmtiTagMap.cpp                         jniHandles.hpp
--- a/hotspot/src/share/vm/includeDB_jvmti	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_jvmti	Sat Nov 13 18:40:37 2010 -0800
@@ -35,6 +35,7 @@
 // jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
 
 jvmtiEnter.cpp                          jvmtiEnter.hpp
+jvmtiEnter.cpp                          jvmtiRawMonitor.hpp
 jvmtiEnter.cpp                          jvmtiUtil.hpp
 
 jvmtiEnter.hpp                          interfaceSupport.hpp
@@ -44,6 +45,7 @@
 jvmtiEnter.hpp                          systemDictionary.hpp
 
 jvmtiEnterTrace.cpp                     jvmtiEnter.hpp
+jvmtiEnterTrace.cpp                     jvmtiRawMonitor.hpp
 jvmtiEnterTrace.cpp                     jvmtiUtil.hpp
 
 jvmtiEnv.cpp                            arguments.hpp
@@ -66,11 +68,11 @@
 jvmtiEnv.cpp                            jvmtiGetLoadedClasses.hpp
 jvmtiEnv.cpp                            jvmtiImpl.hpp
 jvmtiEnv.cpp                            jvmtiManageCapabilities.hpp
+jvmtiEnv.cpp                            jvmtiRawMonitor.hpp
 jvmtiEnv.cpp                            jvmtiRedefineClasses.hpp
 jvmtiEnv.cpp                            jvmtiTagMap.hpp
 jvmtiEnv.cpp                            jvmtiThreadState.inline.hpp
 jvmtiEnv.cpp                            jvmtiUtil.hpp
-jvmtiEnv.cpp                            objectMonitor.inline.hpp
 jvmtiEnv.cpp                            osThread.hpp
 jvmtiEnv.cpp                            preserveException.hpp
 jvmtiEnv.cpp                            reflectionUtils.hpp
@@ -178,11 +180,13 @@
 jvmtiExport.cpp                         jvmtiExport.hpp
 jvmtiExport.cpp                         jvmtiImpl.hpp
 jvmtiExport.cpp                         jvmtiManageCapabilities.hpp
+jvmtiExport.cpp                         jvmtiRawMonitor.hpp
 jvmtiExport.cpp                         jvmtiTagMap.hpp
 jvmtiExport.cpp                         jvmtiThreadState.inline.hpp
 jvmtiExport.cpp                         nmethod.hpp
 jvmtiExport.cpp                         objArrayKlass.hpp
 jvmtiExport.cpp                         objArrayOop.hpp
+jvmtiExport.cpp                         objectMonitor.hpp
 jvmtiExport.cpp                         objectMonitor.inline.hpp
 jvmtiExport.cpp                         pcDesc.hpp
 jvmtiExport.cpp                         resourceArea.hpp
@@ -210,6 +214,8 @@
 jvmtiManageCapabilities.hpp             allocation.hpp
 jvmtiManageCapabilities.hpp             jvmti.h
 
+// jvmtiRawMonitor is jck optional, please put deps in includeDB_features
+
 jvmtiRedefineClasses.cpp                bitMap.inline.hpp
 jvmtiRedefineClasses.cpp                codeCache.hpp
 jvmtiRedefineClasses.cpp                deoptimization.hpp
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -659,9 +659,6 @@
     }
     return result;   // could be null if we are out of space
   } else if (!gch->incremental_collection_will_fail()) {
-    // The gc_prologues have not executed yet.  The value
-    // for incremental_collection_will_fail() is the remanent
-    // of the last collection.
     // Do an incremental collection.
     gch->do_collection(false            /* full */,
                        false            /* clear_all_soft_refs */,
@@ -739,9 +736,8 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
   return    (word_size > heap_word_size(gen0_capacity))
-         || (GC_locker::is_active_and_needs_gc())
-         || (   gch->last_incremental_collection_failed()
-             && gch->incremental_collection_will_fail());
+         || GC_locker::is_active_and_needs_gc()
+         || gch->incremental_collection_failed();
 }
 
 
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -510,7 +510,7 @@
   // from this generation, pass on collection; let the next generation
   // do it.
   if (!collection_attempt_is_safe()) {
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
     return;
   }
   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@@ -596,9 +596,8 @@
     if (PrintGC && !PrintGCDetails) {
       gch->print_heap_change(gch_prev_used);
     }
+    assert(!gch->incremental_collection_failed(), "Should be clear");
   } else {
-    assert(HandlePromotionFailure,
-      "Should not be here unless promotion failure handling is on");
     assert(_promo_failure_scan_stack.is_empty(), "post condition");
     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -613,7 +612,7 @@
     // and from-space.
     swap_spaces();   // For uniformity wrt ParNewGeneration.
     from()->set_next_compaction_space(to());
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed();
 
     // Inform the next generation that a promotion failure occurred.
     _next_gen->promotion_failure_occurred();
@@ -700,12 +699,6 @@
   if (obj == NULL) {
     obj = _next_gen->promote(old, s);
     if (obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
-        // is incorrectly set. In any case, its seriously wrong to be here!
-        vm_exit_out_of_memory(s*wordSize, "promotion");
-      }
-
       handle_promotion_failure(old);
       return old;
     }
@@ -812,47 +805,43 @@
     assert(_next_gen != NULL,
            "This must be the youngest gen, and not the only gen");
   }
-
-  // Decide if there's enough room for a full promotion
-  // When using extremely large edens, we effectively lose a
-  // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
-  // flag to reduce the minimum evacuation space requirements. If
-  // there is not enough space to evacuate eden during a scavenge,
-  // the VM will immediately exit with an out of memory error.
-  // This flag has not been tested
-  // with collectors other than simple mark & sweep.
-  //
-  // Note that with the addition of promotion failure handling, the
-  // VM will not immediately exit but will undo the young generation
-  // collection.  The parameter is left here for compatibility.
-  const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
-
-  // worst_case_evacuation is based on "used()".  For the case where this
-  // method is called after a collection, this is still appropriate because
-  // the case that needs to be detected is one in which a full collection
-  // has been done and has overflowed into the young generation.  In that
-  // case a minor collection will fail (the overflow of the full collection
-  // means there is no space in the old generation for any promotion).
-  size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
-
-  return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
-                                              HandlePromotionFailure);
+  return _next_gen->promotion_attempt_is_safe(used());
 }
 
 void DefNewGeneration::gc_epilogue(bool full) {
+  DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
+
+  assert(!GC_locker::is_active(), "We should not be executing here");
   // Check if the heap is approaching full after a collection has
   // been done.  Generally the young generation is empty at
   // a minimum at the end of a collection.  If it is not, then
   // the heap is approaching full.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  clear_should_allocate_from_space();
-  if (collection_attempt_is_safe()) {
-    gch->clear_incremental_collection_will_fail();
+  if (full) {
+    DEBUG_ONLY(seen_incremental_collection_failed = false;)
+    if (!collection_attempt_is_safe()) {
+      gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
+      set_should_allocate_from_space(); // we seem to be running out of space
+    } else {
+      gch->clear_incremental_collection_failed(); // We just did a full collection
+      clear_should_allocate_from_space(); // if set
+    }
   } else {
-    gch->set_incremental_collection_will_fail();
-    if (full) { // we seem to be running out of space
-      set_should_allocate_from_space();
+#ifdef ASSERT
+    // It is possible that incremental_collection_failed() == true
+    // here, because an attempted scavenge did not succeed. The policy
+    // is normally expected to cause a full collection which should
+    // clear that condition, so we should not be here twice in a row
+    // with incremental_collection_failed() == true without having done
+    // a full collection in between.
+    if (!seen_incremental_collection_failed &&
+        gch->incremental_collection_failed()) {
+      seen_incremental_collection_failed = true;
+    } else if (seen_incremental_collection_failed) {
+      assert(!gch->incremental_collection_failed(), "Twice in a row");
+      seen_incremental_collection_failed = false;
     }
+#endif // ASSERT
   }
 
   if (ZapUnusedHeapArea) {
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -82,12 +82,6 @@
   Stack<oop>     _objs_with_preserved_marks;
   Stack<markOop> _preserved_marks_of_objs;
 
-  // Returns true if the collection can be safely attempted.
-  // If this method returns false, a collection is not
-  // guaranteed to fail but the system may not be able
-  // to recover from the failure.
-  bool collection_attempt_is_safe();
-
   // Promotion failure handling
   OopClosure *_promo_failure_scan_stack_closure;
   void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
@@ -304,6 +298,14 @@
 
   // GC support
   virtual void compute_new_size();
+
+  // Returns true if the collection is likely to be safely
+  // completed. Even if this method returns true, a collection
+  // may not be guaranteed to succeed, and the system should be
+  // able to safely unwind and recover from that failure, albeit
+  // at some additional cost. Override superclass's implementation.
+  virtual bool collection_attempt_is_safe();
+
   virtual void collect(bool   full,
                        bool   clear_all_soft_refs,
                        size_t size,
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -142,8 +142,7 @@
   }
   _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
 
-  clear_incremental_collection_will_fail();
-  clear_last_incremental_collection_failed();
+  clear_incremental_collection_failed();
 
 #ifndef SERIALGC
   // If we are running CMS, create the collector responsible
@@ -1347,17 +1346,6 @@
 };
 
 void GenCollectedHeap::gc_epilogue(bool full) {
-  // Remember if a partial collection of the heap failed, and
-  // we did a complete collection.
-  if (full && incremental_collection_will_fail()) {
-    set_last_incremental_collection_failed();
-  } else {
-    clear_last_incremental_collection_failed();
-  }
-  // Clear the flag, if set; the generation gc_epilogues will set the
-  // flag again if the condition persists despite the collection.
-  clear_incremental_collection_will_fail();
-
 #ifdef COMPILER2
   assert(DerivedPointerTable::is_empty(), "derived pointer present");
   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -62,11 +62,10 @@
   // The generational collector policy.
   GenCollectorPolicy* _gen_policy;
 
-  // If a generation would bail out of an incremental collection,
-  // it sets this flag.  If the flag is set, satisfy_failed_allocation
-  // will attempt allocating in all generations before doing a full GC.
-  bool _incremental_collection_will_fail;
-  bool _last_incremental_collection_failed;
+  // Indicates that the most recent previous incremental collection failed.
+  // The flag is cleared when an action is taken that might clear the
+  // condition that caused that incremental collection to fail.
+  bool _incremental_collection_failed;
 
   // In support of ExplicitGCInvokesConcurrent functionality
   unsigned int _full_collections_completed;
@@ -469,26 +468,26 @@
   // call to "save_marks".
   bool no_allocs_since_save_marks(int level);
 
+  // Returns true if an incremental collection is likely to fail.
+  bool incremental_collection_will_fail() {
+    // Assumes a 2-generation system; the first disjunct remembers if an
+    // incremental collection failed, even when we thought (second disjunct)
+    // that it would not.
+    assert(heap()->collector_policy()->is_two_generation_policy(),
+           "the following definition may not be suitable for an n(>2)-generation system");
+    return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
+  }
+
   // If a generation bails out of an incremental collection,
   // it sets this flag.
-  bool incremental_collection_will_fail() {
-    return _incremental_collection_will_fail;
+  bool incremental_collection_failed() const {
+    return _incremental_collection_failed;
   }
-  void set_incremental_collection_will_fail() {
-    _incremental_collection_will_fail = true;
+  void set_incremental_collection_failed() {
+    _incremental_collection_failed = true;
   }
-  void clear_incremental_collection_will_fail() {
-    _incremental_collection_will_fail = false;
-  }
-
-  bool last_incremental_collection_failed() const {
-    return _last_incremental_collection_failed;
-  }
-  void set_last_incremental_collection_failed() {
-    _last_incremental_collection_failed = true;
-  }
-  void clear_last_incremental_collection_failed() {
-    _last_incremental_collection_failed = false;
+  void clear_incremental_collection_failed() {
+    _incremental_collection_failed = false;
   }
 
   // Promotion of obj into gen failed.  Try to promote obj to higher non-perm
--- a/hotspot/src/share/vm/memory/generation.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/generation.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -165,15 +165,16 @@
   return max;
 }
 
-bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
-                                           bool not_used) const {
+bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
+  size_t available = max_contiguous_available();
+  bool   res = (available >= max_promotion_in_bytes);
   if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
-                " contiguous_available: " SIZE_FORMAT
-                " promotion_in_bytes: " SIZE_FORMAT,
-                max_contiguous_available(), promotion_in_bytes);
+    gclog_or_tty->print_cr(
+      "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
+      res? "":" not", available, res? ">=":"<",
+      max_promotion_in_bytes);
   }
-  return max_contiguous_available() >= promotion_in_bytes;
+  return res;
 }
 
 // Ignores "ref" and calls allocate().
--- a/hotspot/src/share/vm/memory/generation.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/generation.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -173,15 +173,11 @@
   // The largest number of contiguous free bytes in this or any higher generation.
   virtual size_t max_contiguous_available() const;
 
-  // Returns true if promotions of the specified amount can
-  // be attempted safely (without a vm failure).
+  // Returns true if promotions of the specified amount are
+  // likely to succeed without a promotion failure.
   // Promotion of the full amount is not guaranteed but
-  // can be attempted.
-  //   younger_handles_promotion_failure
-  // is true if the younger generation handles a promotion
-  // failure.
-  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
-    bool younger_handles_promotion_failure) const;
+  // might be attempted in the worst case.
+  virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
 
   // For a non-young generation, this interface can be used to inform a
   // generation that a promotion attempt into that generation failed.
@@ -358,6 +354,16 @@
     return (full || should_allocate(word_size, is_tlab));
   }
 
+  // Returns true if the collection is likely to be safely
+  // completed. Even if this method returns true, a collection
+  // may not be guaranteed to succeed, and the system should be
+  // able to safely unwind and recover from that failure, albeit
+  // at some additional cost.
+  virtual bool collection_attempt_is_safe() {
+    guarantee(false, "Are you sure you want to call this method?");
+    return true;
+  }
+
   // Perform a garbage collection.
   // If full is true attempt a full garbage collection of this generation.
   // Otherwise, attempting to (at least) free enough space to support an
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -419,29 +419,16 @@
 void TenuredGeneration::verify_alloc_buffers_clean() {}
 #endif // SERIALGC
 
-bool TenuredGeneration::promotion_attempt_is_safe(
-    size_t max_promotion_in_bytes,
-    bool younger_handles_promotion_failure) const {
-
-  bool result = max_contiguous_available() >= max_promotion_in_bytes;
-
-  if (younger_handles_promotion_failure && !result) {
-    result = max_contiguous_available() >=
-      (size_t) gc_stats()->avg_promoted()->padded_average();
-    if (PrintGC && Verbose && result) {
-      gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
-                  " contiguous_available: " SIZE_FORMAT
-                  " avg_promoted: " SIZE_FORMAT,
-                  max_contiguous_available(),
-                  gc_stats()->avg_promoted()->padded_average());
-    }
-  } else {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
-                  " contiguous_available: " SIZE_FORMAT
-                  " promotion_in_bytes: " SIZE_FORMAT,
-                  max_contiguous_available(), max_promotion_in_bytes);
-    }
+bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
+  size_t available = max_contiguous_available();
+  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
+  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
+  if (PrintGC && Verbose) {
+    gclog_or_tty->print_cr(
+      "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
+      "max_promo("SIZE_FORMAT")",
+      res? "":" not", available, res? ">=":"<",
+      av_promo, max_promotion_in_bytes);
   }
-  return result;
+  return res;
 }
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -101,8 +101,7 @@
 
   virtual void update_gc_stats(int level, bool full);
 
-  virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes,
-    bool younger_handles_promotion_failure) const;
+  virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
 
   void verify_alloc_buffers_clean();
 };
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -247,6 +247,10 @@
     return constMethod()->stackmap_data();
   }
 
+  void set_stackmap_data(typeArrayOop sd) {
+    constMethod()->set_stackmap_data(sd);
+  }
+
   // exception handler table
   typeArrayOop exception_table() const
                                    { return constMethod()->exception_table(); }
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -25,26 +25,6 @@
 # include "incls/_precompiled.incl"
 # include "incls/_jvmtiImpl.cpp.incl"
 
-GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
-
-void JvmtiPendingMonitors::transition_raw_monitors() {
-  assert((Threads::number_of_threads()==1),
-         "Java thread has not created yet or more than one java thread \
-is running. Raw monitor transition will not work");
-  JavaThread *current_java_thread = JavaThread::current();
-  assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
-  {
-    ThreadBlockInVM __tbivm(current_java_thread);
-    for(int i=0; i< count(); i++) {
-      JvmtiRawMonitor *rmonitor = monitors()->at(i);
-      int r = rmonitor->raw_enter(current_java_thread);
-      assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
-    }
-  }
-  // pending monitors are converted to real monitor so delete them all.
-  dispose();
-}
-
 //
 // class JvmtiAgentThread
 //
@@ -216,57 +196,6 @@
   }
 }
 
-
-//
-// class JvmtiRawMonitor
-//
-
-JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
-#ifdef ASSERT
-  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
-#else
-  _name = NULL;
-#endif
-  _magic = JVMTI_RM_MAGIC;
-}
-
-JvmtiRawMonitor::~JvmtiRawMonitor() {
-#ifdef ASSERT
-  FreeHeap(_name);
-#endif
-  _magic = 0;
-}
-
-
-bool
-JvmtiRawMonitor::is_valid() {
-  int value = 0;
-
-  // This object might not be a JvmtiRawMonitor so we can't assume
-  // the _magic field is properly aligned. Get the value in a safe
-  // way and then check against JVMTI_RM_MAGIC.
-
-  switch (sizeof(_magic)) {
-  case 2:
-    value = Bytes::get_native_u2((address)&_magic);
-    break;
-
-  case 4:
-    value = Bytes::get_native_u4((address)&_magic);
-    break;
-
-  case 8:
-    value = Bytes::get_native_u8((address)&_magic);
-    break;
-
-  default:
-    guarantee(false, "_magic field is an unexpected size");
-  }
-
-  return value == JVMTI_RM_MAGIC;
-}
-
-
 //
 // class JvmtiBreakpoint
 //
--- a/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -26,7 +26,6 @@
 // Forward Declarations
 //
 
-class JvmtiRawMonitor;
 class JvmtiBreakpoint;
 class JvmtiBreakpoints;
 
@@ -327,76 +326,6 @@
     return false;
 }
 
-
-///////////////////////////////////////////////////////////////
-//
-// class JvmtiRawMonitor
-//
-// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
-//
-// Wrapper for ObjectMonitor class that saves the Monitor's name
-//
-
-class JvmtiRawMonitor : public ObjectMonitor  {
-private:
-  int           _magic;
-  char *        _name;
-  // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
-  enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
-
-public:
-  JvmtiRawMonitor(const char *name);
-  ~JvmtiRawMonitor();
-  int            magic()   { return _magic;  }
-  const char *get_name()   { return _name; }
-  bool        is_valid();
-};
-
-// Onload pending raw monitors
-// Class is used to cache onload or onstart monitor enter
-// which will transition into real monitor when
-// VM is fully initialized.
-class JvmtiPendingMonitors : public AllStatic {
-
-private:
-  static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
-
-  inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
-
-  static void dispose() {
-    delete monitors();
-  }
-
-public:
-  static void enter(JvmtiRawMonitor *monitor) {
-    monitors()->append(monitor);
-  }
-
-  static int count() {
-    return monitors()->length();
-  }
-
-  static void destroy(JvmtiRawMonitor *monitor) {
-    while (monitors()->contains(monitor)) {
-      monitors()->remove(monitor);
-    }
-  }
-
-  // Return false if monitor is not found in the list.
-  static bool exit(JvmtiRawMonitor *monitor) {
-    if (monitors()->contains(monitor)) {
-      monitors()->remove(monitor);
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  static void transition_raw_monitors();
-};
-
-
-
 ///////////////////////////////////////////////////////////////
 // The get/set local operations must only be done by the VM thread
 // because the interpreter version needs to access oop maps, which can
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_jvmtiRawMonitor.cpp.incl"
+
+GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
+
+void JvmtiPendingMonitors::transition_raw_monitors() {
+  assert((Threads::number_of_threads()==1),
+         "Java thread has not created yet or more than one java thread \
+is running. Raw monitor transition will not work");
+  JavaThread *current_java_thread = JavaThread::current();
+  assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
+  {
+    ThreadBlockInVM __tbivm(current_java_thread);
+    for(int i=0; i< count(); i++) {
+      JvmtiRawMonitor *rmonitor = monitors()->at(i);
+      int r = rmonitor->raw_enter(current_java_thread);
+      assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
+    }
+  }
+  // pending monitors are converted to real monitor so delete them all.
+  dispose();
+}
+
+//
+// class JvmtiRawMonitor
+//
+
+JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
+#ifdef ASSERT
+  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
+#else
+  _name = NULL;
+#endif
+  _magic = JVMTI_RM_MAGIC;
+}
+
+JvmtiRawMonitor::~JvmtiRawMonitor() {
+#ifdef ASSERT
+  FreeHeap(_name);
+#endif
+  _magic = 0;
+}
+
+
+bool
+JvmtiRawMonitor::is_valid() {
+  int value = 0;
+
+  // This object might not be a JvmtiRawMonitor so we can't assume
+  // the _magic field is properly aligned. Get the value in a safe
+  // way and then check against JVMTI_RM_MAGIC.
+
+  switch (sizeof(_magic)) {
+  case 2:
+    value = Bytes::get_native_u2((address)&_magic);
+    break;
+
+  case 4:
+    value = Bytes::get_native_u4((address)&_magic);
+    break;
+
+  case 8:
+    value = Bytes::get_native_u8((address)&_magic);
+    break;
+
+  default:
+    guarantee(false, "_magic field is an unexpected size");
+  }
+
+  return value == JVMTI_RM_MAGIC;
+}
+
+// -------------------------------------------------------------------------
+// The raw monitor subsystem is entirely distinct from normal
+// java-synchronization or jni-synchronization.  raw monitors are not
+// associated with objects.  They can be implemented in any manner
+// that makes sense.  The original implementors decided to piggy-back
+// the raw-monitor implementation on the existing Java objectMonitor mechanism.
+// This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
+// Specifically, we should not implement raw monitors via java monitors.
+// Time permitting, we should disentangle and deconvolve the two implementations
+// and move the resulting raw monitor implementation over to the JVMTI directories.
+// Ideally, the raw monitor implementation would be built on top of
+// park-unpark and nothing else.
+//
+// raw monitors are used mainly by JVMTI
+// The raw monitor implementation borrows the ObjectMonitor structure,
+// but the operators are degenerate and extremely simple.
+//
+// Mixed use of a single objectMonitor instance -- as both a raw monitor
+// and a normal java monitor -- is not permissible.
+//
+// Note that we use the single RawMonitor_lock to protect queue operations for
+// _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
+// is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
+// be held indefinitely.  The critical sections must be short and bounded.
+//
+// -------------------------------------------------------------------------
+
+int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
+  for (;;) {
+    if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+       return OS_OK ;
+    }
+
+    ObjectWaiter Node (Self) ;
+    Self->_ParkEvent->reset() ;     // strictly optional
+    Node.TState = ObjectWaiter::TS_ENTER ;
+
+    RawMonitor_lock->lock_without_safepoint_check() ;
+    Node._next  = _EntryList ;
+    _EntryList  = &Node ;
+    OrderAccess::fence() ;
+    if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+        _EntryList = Node._next ;
+        RawMonitor_lock->unlock() ;
+        return OS_OK ;
+    }
+    RawMonitor_lock->unlock() ;
+    while (Node.TState == ObjectWaiter::TS_ENTER) {
+       Self->_ParkEvent->park() ;
+    }
+  }
+}
+
+int JvmtiRawMonitor::SimpleExit (Thread * Self) {
+  guarantee (_owner == Self, "invariant") ;
+  OrderAccess::release_store_ptr (&_owner, NULL) ;
+  OrderAccess::fence() ;
+  if (_EntryList == NULL) return OS_OK ;
+  ObjectWaiter * w ;
+
+  RawMonitor_lock->lock_without_safepoint_check() ;
+  w = _EntryList ;
+  if (w != NULL) {
+      _EntryList = w->_next ;
+  }
+  RawMonitor_lock->unlock() ;
+  if (w != NULL) {
+      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+      ParkEvent * ev = w->_event ;
+      w->TState = ObjectWaiter::TS_RUN ;
+      OrderAccess::fence() ;
+      ev->unpark() ;
+  }
+  return OS_OK ;
+}
+
+int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
+  guarantee (_owner == Self  , "invariant") ;
+  guarantee (_recursions == 0, "invariant") ;
+
+  ObjectWaiter Node (Self) ;
+  Node._notified = 0 ;
+  Node.TState    = ObjectWaiter::TS_WAIT ;
+
+  RawMonitor_lock->lock_without_safepoint_check() ;
+  Node._next     = _WaitSet ;
+  _WaitSet       = &Node ;
+  RawMonitor_lock->unlock() ;
+
+  SimpleExit (Self) ;
+  guarantee (_owner != Self, "invariant") ;
+
+  int ret = OS_OK ;
+  if (millis <= 0) {
+    Self->_ParkEvent->park();
+  } else {
+    ret = Self->_ParkEvent->park(millis);
+  }
+
+  // If thread still resides on the waitset then unlink it.
+  // Double-checked locking -- the usage is safe in this context
+  // as we TState is volatile and the lock-unlock operators are
+  // serializing (barrier-equivalent).
+
+  if (Node.TState == ObjectWaiter::TS_WAIT) {
+    RawMonitor_lock->lock_without_safepoint_check() ;
+    if (Node.TState == ObjectWaiter::TS_WAIT) {
+      // Simple O(n) unlink, but performance isn't critical here.
+      ObjectWaiter * p ;
+      ObjectWaiter * q = NULL ;
+      for (p = _WaitSet ; p != &Node; p = p->_next) {
+         q = p ;
+      }
+      guarantee (p == &Node, "invariant") ;
+      if (q == NULL) {
+        guarantee (p == _WaitSet, "invariant") ;
+        _WaitSet = p->_next ;
+      } else {
+        guarantee (p == q->_next, "invariant") ;
+        q->_next = p->_next ;
+      }
+      Node.TState = ObjectWaiter::TS_RUN ;
+    }
+    RawMonitor_lock->unlock() ;
+  }
+
+  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
+  SimpleEnter (Self) ;
+
+  guarantee (_owner == Self, "invariant") ;
+  guarantee (_recursions == 0, "invariant") ;
+  return ret ;
+}
+
+int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) {
+  guarantee (_owner == Self, "invariant") ;
+  if (_WaitSet == NULL) return OS_OK ;
+
+  // We have two options:
+  // A. Transfer the threads from the WaitSet to the EntryList
+  // B. Remove the thread from the WaitSet and unpark() it.
+  //
+  // We use (B), which is crude and results in lots of futile
+  // context switching.  In particular (B) induces lots of contention.
+
+  ParkEvent * ev = NULL ;       // consider using a small auto array ...
+  RawMonitor_lock->lock_without_safepoint_check() ;
+  for (;;) {
+      ObjectWaiter * w = _WaitSet ;
+      if (w == NULL) break ;
+      _WaitSet = w->_next ;
+      if (ev != NULL) { ev->unpark(); ev = NULL; }
+      ev = w->_event ;
+      OrderAccess::loadstore() ;
+      w->TState = ObjectWaiter::TS_RUN ;
+      OrderAccess::storeload();
+      if (!All) break ;
+  }
+  RawMonitor_lock->unlock() ;
+  if (ev != NULL) ev->unpark();
+  return OS_OK ;
+}
+
+// Any JavaThread will enter here with state _thread_blocked
+int JvmtiRawMonitor::raw_enter(TRAPS) {
+  TEVENT (raw_enter) ;
+  void * Contended ;
+
+  // don't enter raw monitor if thread is being externally suspended, it will
+  // surprise the suspender if a "suspended" thread can still enter monitor
+  JavaThread * jt = (JavaThread *)THREAD;
+  if (THREAD->is_Java_thread()) {
+    jt->SR_lock()->lock_without_safepoint_check();
+    while (jt->is_external_suspend()) {
+      jt->SR_lock()->unlock();
+      jt->java_suspend_self();
+      jt->SR_lock()->lock_without_safepoint_check();
+    }
+    // guarded by SR_lock to avoid racing with new external suspend requests.
+    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+    jt->SR_lock()->unlock();
+  } else {
+    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+  }
+
+  if (Contended == THREAD) {
+     _recursions ++ ;
+     return OM_OK ;
+  }
+
+  if (Contended == NULL) {
+     guarantee (_owner == THREAD, "invariant") ;
+     guarantee (_recursions == 0, "invariant") ;
+     return OM_OK ;
+  }
+
+  THREAD->set_current_pending_monitor(this);
+
+  if (!THREAD->is_Java_thread()) {
+     // No other non-Java threads besides VM thread would acquire
+     // a raw monitor.
+     assert(THREAD->is_VM_thread(), "must be VM thread");
+     SimpleEnter (THREAD) ;
+   } else {
+     guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
+     for (;;) {
+       jt->set_suspend_equivalent();
+       // cleared by handle_special_suspend_equivalent_condition() or
+       // java_suspend_self()
+       SimpleEnter (THREAD) ;
+
+       // were we externally suspended while we were waiting?
+       if (!jt->handle_special_suspend_equivalent_condition()) break ;
+
+       // This thread was externally suspended
+       //
+       // This logic isn't needed for JVMTI raw monitors,
+       // but doesn't hurt just in case the suspend rules change. This
+           // logic is needed for the JvmtiRawMonitor.wait() reentry phase.
+           // We have reentered the contended monitor, but while we were
+           // waiting another thread suspended us. We don't want to reenter
+           // the monitor while suspended because that would surprise the
+           // thread that suspended us.
+           //
+           // Drop the lock -
+       SimpleExit (THREAD) ;
+
+           jt->java_suspend_self();
+         }
+
+     assert(_owner == THREAD, "Fatal error with monitor owner!");
+     assert(_recursions == 0, "Fatal error with monitor recursions!");
+  }
+
+  THREAD->set_current_pending_monitor(NULL);
+  guarantee (_recursions == 0, "invariant") ;
+  return OM_OK;
+}
+
+// Used mainly for JVMTI raw monitor implementation
+// Also used for JvmtiRawMonitor::wait().
+int JvmtiRawMonitor::raw_exit(TRAPS) {
+  TEVENT (raw_exit) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+  if (_recursions > 0) {
+    --_recursions ;
+    return OM_OK ;
+  }
+
+  void * List = _EntryList ;
+  SimpleExit (THREAD) ;
+
+  return OM_OK;
+}
+
+// Used for JVMTI raw monitor implementation.
+// All JavaThreads will enter here with state _thread_blocked
+
+int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
+  TEVENT (raw_wait) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+
+  // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
+  // The caller must be able to tolerate spurious returns from raw_wait().
+  THREAD->_ParkEvent->reset() ;
+  OrderAccess::fence() ;
+
+  // check interrupt event
+  if (interruptible && Thread::is_interrupted(THREAD, true)) {
+    return OM_INTERRUPTED;
+  }
+
+  intptr_t save = _recursions ;
+  _recursions = 0 ;
+  _waiters ++ ;
+  if (THREAD->is_Java_thread()) {
+    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
+    ((JavaThread *)THREAD)->set_suspend_equivalent();
+  }
+  int rv = SimpleWait (THREAD, millis) ;
+  _recursions = save ;
+  _waiters -- ;
+
+  guarantee (THREAD == _owner, "invariant") ;
+  if (THREAD->is_Java_thread()) {
+     JavaThread * jSelf = (JavaThread *) THREAD ;
+     for (;;) {
+        if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
+        SimpleExit (THREAD) ;
+        jSelf->java_suspend_self();
+        SimpleEnter (THREAD) ;
+        jSelf->set_suspend_equivalent() ;
+     }
+  }
+  guarantee (THREAD == _owner, "invariant") ;
+
+  if (interruptible && Thread::is_interrupted(THREAD, true)) {
+    return OM_INTERRUPTED;
+  }
+  return OM_OK ;
+}
+
+int JvmtiRawMonitor::raw_notify(TRAPS) {
+  TEVENT (raw_notify) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+  SimpleNotify (THREAD, false) ;
+  return OM_OK;
+}
+
+int JvmtiRawMonitor::raw_notifyAll(TRAPS) {
+  TEVENT (raw_notifyAll) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+  SimpleNotify (THREAD, true) ;
+  return OM_OK;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiRawMonitor.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+//
+// class JvmtiRawMonitor
+//
+// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
+//
+// Wrapper for ObjectMonitor class that saves the Monitor's name
+//
+
+class JvmtiRawMonitor : public ObjectMonitor  {
+private:
+  int           _magic;
+  char *        _name;
+  // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
+  enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
+
+  int       SimpleEnter (Thread * Self) ;
+  int       SimpleExit  (Thread * Self) ;
+  int       SimpleWait  (Thread * Self, jlong millis) ;
+  int       SimpleNotify (Thread * Self, bool All) ;
+
+public:
+  JvmtiRawMonitor(const char *name);
+  ~JvmtiRawMonitor();
+  int       raw_enter(TRAPS);
+  int       raw_exit(TRAPS);
+  int       raw_wait(jlong millis, bool interruptable, TRAPS);
+  int       raw_notify(TRAPS);
+  int       raw_notifyAll(TRAPS);
+  int            magic()   { return _magic;  }
+  const char *get_name()   { return _name; }
+  bool        is_valid();
+};
+
+// Onload pending raw monitors
+// Class is used to cache onload or onstart monitor enter
+// which will transition into real monitor when
+// VM is fully initialized.
+class JvmtiPendingMonitors : public AllStatic {
+
+private:
+  static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
+
+  inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
+
+  static void dispose() {
+    delete monitors();
+  }
+
+public:
+  static void enter(JvmtiRawMonitor *monitor) {
+    monitors()->append(monitor);
+  }
+
+  static int count() {
+    return monitors()->length();
+  }
+
+  static void destroy(JvmtiRawMonitor *monitor) {
+    while (monitors()->contains(monitor)) {
+      monitors()->remove(monitor);
+    }
+  }
+
+  // Return false if monitor is not found in the list.
+  static bool exit(JvmtiRawMonitor *monitor) {
+    if (monitors()->contains(monitor)) {
+      monitors()->remove(monitor);
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  static void transition_raw_monitors();
+};
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -119,11 +119,8 @@
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
                                                                  "Java Virtual Machine Specification",  false));
-  PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
-        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
-  PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(),  true));
 
   // following are JVMTI agent writeable properties.
@@ -151,6 +148,14 @@
   os::init_system_properties_values();
 }
 
+
+  // Update/Initialize System properties after JDK version number is known
+void Arguments::init_version_specific_system_properties() {
+  PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
+        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
+  PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
+}
+
 /**
  * Provide a slightly more user-friendly way of eliminating -XX flags.
  * When a flag is eliminated, it can be added to this list in order to
@@ -185,6 +190,10 @@
                            JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
   { "UseDepthFirstScavengeOrder",
                            JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
+  { "HandlePromotionFailure",
+                           JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
+  { "MaxLiveObjectEvacuationRatio",
+                           JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -948,26 +957,65 @@
   }
 }
 
+void Arguments::check_compressed_oops_compat() {
+#ifdef _LP64
+  assert(UseCompressedOops, "Precondition");
+#  if defined(COMPILER1) && !defined(TIERED)
+  // Until c1 supports compressed oops turn them off.
+  FLAG_SET_DEFAULT(UseCompressedOops, false);
+#  else
+  // Is it on by default or set on ergonomically
+  bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
+
+  // Tiered currently doesn't work with compressed oops
+  if (TieredCompilation) {
+    if (is_on_by_default) {
+      FLAG_SET_DEFAULT(UseCompressedOops, false);
+      return;
+    } else {
+      vm_exit_during_initialization(
+        "Tiered compilation is not supported with compressed oops yet", NULL);
+    }
+  }
+
+  // XXX JSR 292 currently does not support compressed oops
+  if (EnableMethodHandles) {
+    if (is_on_by_default) {
+      FLAG_SET_DEFAULT(UseCompressedOops, false);
+      return;
+    } else {
+      vm_exit_during_initialization(
+        "JSR292 is not supported with compressed oops yet", NULL);
+    }
+  }
+
+  // If dumping an archive or forcing its use, disable compressed oops if possible
+  if (DumpSharedSpaces || RequireSharedSpaces) {
+    if (is_on_by_default) {
+      FLAG_SET_DEFAULT(UseCompressedOops, false);
+      return;
+    } else {
+      vm_exit_during_initialization(
+        "Class Data Sharing is not supported with compressed oops yet", NULL);
+    }
+  } else if (UseSharedSpaces) {
+    // UseSharedSpaces is on by default. With compressed oops, we turn it off.
+    FLAG_SET_DEFAULT(UseSharedSpaces, false);
+  }
+
+#  endif // defined(COMPILER1) && !defined(TIERED)
+#endif // _LP64
+}
+
 void Arguments::set_tiered_flags() {
   if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
     FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
   }
-
   if (CompilationPolicyChoice < 2) {
     vm_exit_during_initialization(
       "Incompatible compilation policy selected", NULL);
   }
-
-#ifdef _LP64
-  if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
-    UseCompressedOops = false;
-  }
-  if (UseCompressedOops) {
-    vm_exit_during_initialization(
-      "Tiered compilation is not supported with compressed oops yet", NULL);
-  }
-#endif
- // Increase the code cache size - tiered compiles a lot more.
+  // Increase the code cache size - tiered compiles a lot more.
   if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
     FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
   }
@@ -1676,7 +1724,8 @@
   bool status = true;
   status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages");
   status = status && verify_min_value(StackRedPages, 1, "StackRedPages");
-  status = status && verify_min_value(StackShadowPages, 1, "StackShadowPages");
+  // greater stack shadow pages can't generate instruction to bang stack
+  status = status && verify_interval(StackShadowPages, 1, 50, "StackShadowPages");
   return status;
 }
 
@@ -1722,8 +1771,6 @@
     status = false;
   }
 
-  status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
-                              "MaxLiveObjectEvacuationRatio");
   status = status && verify_percentage(AdaptiveSizePolicyWeight,
                               "AdaptiveSizePolicyWeight");
   status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
@@ -2827,6 +2874,7 @@
   return JNI_OK;
 }
 
+
 // Parse entry point called from JNI_CreateJavaVM
 
 jint Arguments::parse(const JavaVMInitArgs* args) {
@@ -2969,10 +3017,6 @@
     PrintGC = true;
   }
 
-#if defined(_LP64) && defined(COMPILER1) && !defined(TIERED)
-  UseCompressedOops = false;
-#endif
-
   // Set object alignment values.
   set_object_alignment();
 
@@ -2987,13 +3031,10 @@
   set_ergonomics_flags();
 
 #ifdef _LP64
-  // XXX JSR 292 currently does not support compressed oops.
-  if (EnableMethodHandles && UseCompressedOops) {
-    if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
-      UseCompressedOops = false;
-    }
+  if (UseCompressedOops) {
+    check_compressed_oops_compat();
   }
-#endif // _LP64
+#endif
 
   // Check the GC selections again.
   if (!check_gc_consistency()) {
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -291,6 +291,8 @@
 
   // Tiered
   static void set_tiered_flags();
+  // Check compressed oops compatibility with other flags
+  static void check_compressed_oops_compat();
   // CMS/ParNew garbage collectors
   static void set_parnew_gc_flags();
   static void set_cms_and_parnew_gc_flags();
@@ -484,6 +486,9 @@
   // System properties
   static void init_system_properties();
 
+  // Update/Initialize System properties after JDK version number is known
+  static void init_version_specific_system_properties();
+
   // Property List manipulation
   static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
   static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/basicLock.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_basicLock.cpp.incl"
+
+void BasicLock::print_on(outputStream* st) const {
+  st->print("monitor");
+}
+
+void BasicLock::move_to(oop obj, BasicLock* dest) {
+  // Check to see if we need to inflate the lock. This is only needed
+  // if an object is locked using "this" lightweight monitor. In that
+  // case, the displaced_header() is unlocked, because the
+  // displaced_header() contains the header for the originally unlocked
+  // object. However the object could have already been inflated. But it
+  // does not matter, the inflation will just a no-op. For other cases,
+  // the displaced header will be either 0x0 or 0x3, which are location
+  // independent, therefore the BasicLock is free to move.
+  //
+  // During OSR we may need to relocate a BasicLock (which contains a
+  // displaced word) from a location in an interpreter frame to a
+  // new location in a compiled frame.  "this" refers to the source
+  // basiclock in the interpreter frame.  "dest" refers to the destination
+  // basiclock in the new compiled frame.  We *always* inflate in move_to().
+  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
+  // cause performance problems in code that makes heavy use of a small # of
+  // uncontended locks.   (We'd inflate during OSR, and then sync performance
+  // would subsequently plummet because the thread would be forced thru the slow-path).
+  // This problem has been made largely moot on IA32 by inlining the inflated fast-path
+  // operations in Fast_Lock and Fast_Unlock in i486.ad.
+  //
+  // Note that there is a way to safely swing the object's markword from
+  // one stack location to another.  This avoids inflation.  Obviously,
+  // we need to ensure that both locations refer to the current thread's stack.
+  // There are some subtle concurrency issues, however, and since the benefit is
+  // is small (given the support for inflated fast-path locking in the fast_lock, etc)
+  // we'll leave that optimization for another time.
+
+  if (displaced_header()->is_neutral()) {
+    ObjectSynchronizer::inflate_helper(obj);
+    // WARNING: We can not put check here, because the inflation
+    // will not update the displaced header. Once BasicLock is inflated,
+    // no one should ever look at its content.
+  } else {
+    // Typically the displaced header will be 0 (recursive stack lock) or
+    // unused_mark.  Naively we'd like to assert that the displaced mark
+    // value is either 0, neutral, or 3.  But with the advent of the
+    // store-before-CAS avoidance in fast_lock/compiler_lock_object
+    // we can find any flavor mark in the displaced mark.
+  }
+// [RGV] The next line appears to do nothing!
+  intptr_t dh = (intptr_t) displaced_header();
+  dest->set_displaced_header(displaced_header());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/basicLock.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class BasicLock VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  volatile markOop _displaced_header;
+ public:
+  markOop      displaced_header() const               { return _displaced_header; }
+  void         set_displaced_header(markOop header)   { _displaced_header = header; }
+
+  void print_on(outputStream* st) const;
+
+  // move a basic lock (used during deoptimization
+  void move_to(oop obj, BasicLock* dest);
+
+  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }
+};
+
+// A BasicObjectLock associates a specific Java object with a BasicLock.
+// It is currently embedded in an interpreter frame.
+
+// Because some machines have alignment restrictions on the control stack,
+// the actual space allocated by the interpreter may include padding words
+// after the end of the BasicObjectLock.  Also, in order to guarantee
+// alignment of the embedded BasicLock objects on such machines, we
+// put the embedded BasicLock at the beginning of the struct.
+
+class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  BasicLock _lock;                                    // the lock, must be double word aligned
+  oop       _obj;                                     // object holds the lock;
+
+ public:
+  // Manipulation
+  oop      obj() const                                { return _obj;  }
+  void set_obj(oop obj)                               { _obj = obj; }
+  BasicLock* lock()                                   { return &_lock; }
+
+  // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
+  //       in interpreter activation frames since it includes machine-specific padding.
+  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }
+
+  // GC support
+  void oops_do(OopClosure* f) { f->do_oop(&_obj); }
+
+  static int obj_offset_in_bytes()                    { return offset_of(BasicObjectLock, _obj);  }
+  static int lock_offset_in_bytes()                   { return offset_of(BasicObjectLock, _lock); }
+};
+
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -327,10 +327,10 @@
   /* UseMembar is theoretically a temp flag used for memory barrier         \
    * removal testing.  It was supposed to be removed before FCS but has     \
    * been re-added (see 6401008) */                                         \
-  product(bool, UseMembar, false,                                           \
+  product_pd(bool, UseMembar,                                               \
           "(Unstable) Issues membars on thread state transitions")          \
                                                                             \
-  /* Temporary: See 6948537 */                                             \
+  /* Temporary: See 6948537 */                                              \
   experimental(bool, UseMemSetInBOT, true,                                  \
           "(Unstable) uses memset in BOT updates in GC code")               \
                                                                             \
@@ -822,6 +822,9 @@
   develop(bool, PrintJVMWarnings, false,                                    \
           "Prints warnings for unimplemented JVM functions")                \
                                                                             \
+  product(bool, PrintWarnings, true,                                        \
+          "Prints JVM warnings to output stream")                           \
+                                                                            \
   notproduct(uintx, WarnOnStalledSpinLock, 0,                               \
           "Prints warnings for stalled SpinLocks")                          \
                                                                             \
@@ -1585,7 +1588,7 @@
           "(Temporary, subject to experimentation)"                         \
           "Nominal minimum work per abortable preclean iteration")          \
                                                                             \
-  product(intx, CMSAbortablePrecleanWaitMillis, 100,                        \
+  manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
           "(Temporary, subject to experimentation)"                         \
           " Time that we sleep between iterations when not given"           \
           " enough work per iteration")                                     \
@@ -1677,7 +1680,7 @@
   product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
           "Don't drain below this size per parallel worker/thief")          \
                                                                             \
-  product(intx, CMSWaitDuration, 2000,                                      \
+  manageable(intx, CMSWaitDuration, 2000,                                   \
           "Time in milliseconds that CMS thread waits for young GC")        \
                                                                             \
   product(bool, CMSYield, true,                                             \
@@ -1786,10 +1789,6 @@
   notproduct(bool, GCALotAtAllSafepoints, false,                            \
           "Enforce ScavengeALot/GCALot at all potential safepoints")        \
                                                                             \
-  product(bool, HandlePromotionFailure, true,                               \
-          "The youngest generation collection does not require "            \
-          "a guarantee of full promotion of all live objects.")             \
-                                                                            \
   product(bool, PrintPromotionFailure, false,                               \
           "Print additional diagnostic information following "              \
           " promotion failure")                                             \
@@ -3003,9 +3002,6 @@
   product(intx, NewRatio, 2,                                                \
           "Ratio of new/old generation sizes")                              \
                                                                             \
-  product(uintx, MaxLiveObjectEvacuationRatio, 100,                         \
-          "Max percent of eden objects that will be live at scavenge")      \
-                                                                            \
   product_pd(uintx, NewSizeThreadIncrease,                                  \
           "Additional size added to desired new generation size per "       \
           "non-daemon thread (in bytes)")                                   \
@@ -3542,7 +3538,7 @@
   product(uintx, SharedDummyBlockSize, 512*M,                               \
           "Size of dummy block used to shift heap addresses (in bytes)")    \
                                                                             \
-  product(uintx, SharedReadWriteSize,  12*M,                                \
+  product(uintx, SharedReadWriteSize,  NOT_LP64(12*M) LP64_ONLY(13*M),      \
           "Size of read-write space in permanent generation (in bytes)")    \
                                                                             \
   product(uintx, SharedReadOnlySize,   10*M,                                \
--- a/hotspot/src/share/vm/runtime/mutex.hpp	Thu Nov 04 15:32:01 2010 -0700
+++ b/hotspot/src/share/vm/runtime/mutex.hpp	Sat Nov 13 18:40:37 2010 -0800
@@ -265,48 +265,3 @@
    }
 };
 
-/*
- * Per-thread blocking support for JSR166. See the Java-level
- * Documentation for rationale. Basically, park acts like wait, unpark
- * like notify.
- *
- * 6271289 --
- * To avoid errors where an os thread expires but the JavaThread still
- * exists, Parkers are immortal (type-stable) and are recycled across
- * new threads.  This parallels the ParkEvent implementation.
- * Because park-unpark allow spurious wakeups it is harmless if an
- * unpark call unparks a new thread using the old Parker reference.
- *
- * In the future we'll want to think about eliminating Parker and using
- * ParkEvent instead.  There's considerable duplication between the two
- * services.
- *
- */
-
-class Parker : public os::PlatformParker {
-private:
-  volatile int _counter ;
-  Parker * FreeNext ;
-  JavaThread * AssociatedWith ; // Current association
-
-public:
-  Parker() : PlatformParker() {
-    _counter       = 0 ;
-    FreeNext       = NULL ;
-    AssociatedWith = NULL ;
-  }
-protected:
-  ~Parker() { ShouldNotReachHere(); }
-public:
-  // For simplicity of interface with Java, all forms of park (indefinite,
-  // relative, and absolute) are multiplexed into one call.
-  void park(bool isAbsolute, jlong time);
-  void unpark();
-
-  // Lifecycle operators
-  static Parker * Allocate (JavaThread * t) ;
-  static void Release (Parker * e) ;
-private:
-  static Parker * volatile FreeList ;
-  static volatile int ListLock ;
-};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Sat Nov 13 18:40:37 2010 -0800
@@ -0,0 +1,2421 @@
+/*
+ * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_objectMonitor.cpp.incl"
+
+#if defined(__GNUC__) && !defined(IA64)
+  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+  #define ATTR __attribute__((noinline))
+#else
+  #define ATTR
+#endif
+
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+// TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
+
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
+  jlong, uintptr_t, char*, int);
+
+#define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
+  char* bytes = NULL;                                                      \
+  int len = 0;                                                             \
+  jlong jtid = SharedRuntime::get_java_tid(thread);                        \
+  symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name();  \
+  if (klassname != NULL) {                                                 \
+    bytes = (char*)klassname->bytes();                                     \
+    len = klassname->utf8_length();                                        \
+  }
+
+#define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis)       \
+  {                                                                        \
+    if (DTraceMonitorProbes) {                                            \
+      DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
+      HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
+                       (monitor), bytes, len, (millis));                   \
+    }                                                                      \
+  }
+
+#define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread)             \
+  {                                                                        \
+    if (DTraceMonitorProbes) {                                            \
+      DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
+      HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
+                       (uintptr_t)(monitor), bytes, len);                  \
+    }                                                                      \
+  }
+
+#else //  ndef DTRACE_ENABLED
+
+#define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
+#define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
+
+#endif // ndef DTRACE_ENABLED
+
+// Tunables ...
+// The knob* variables are effectively final.  Once set they should
+// never be modified hence.  Consider using __read_mostly with GCC.
+
+int ObjectMonitor::Knob_Verbose    = 0 ;
+int ObjectMonitor::Knob_SpinLimit  = 5000 ;    // derived by an external tool -
+static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
+static int Knob_HandOff            = 0 ;
+static int Knob_ReportSettings     = 0 ;
+
+static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
+static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
+static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
+static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
+static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
+static int Knob_SpinEarly          = 1 ;
+static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
+static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
+static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
+static int Knob_Bonus              = 100 ;     // spin success bonus
+static int Knob_BonusB             = 100 ;     // spin success bonus
+static int Knob_Penalty            = 200 ;     // spin failure penalty
+static int Knob_Poverty            = 1000 ;
+static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
+static int Knob_FixedSpin          = 0 ;
+static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
+static int Knob_UsePause           = 1 ;
+static int Knob_ExitPolicy         = 0 ;
+static int Knob_PreSpin            = 10 ;      // 20-100 likely better
+static int Knob_ResetEvent         = 0 ;
+static int BackOffMask             = 0 ;
+
+static int Knob_FastHSSEC          = 0 ;
+static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
+static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
+static volatile int InitDone       = 0 ;
+
+#define TrySpin TrySpin_VaryDuration
+
+// -----------------------------------------------------------------------------
+// Theory of operations -- Monitors lists, thread residency, etc:
+//
+// * A thread acquires ownership of a monitor by successfully
+//   CAS()ing the _owner field from null to non-null.
+//
+// * Invariant: A thread appears on at most one monitor list --
+//   cxq, EntryList or WaitSet -- at any one time.
+//
+// * Contending threads "push" themselves onto the cxq with CAS
+//   and then spin/park.
+//
+// * After a contending thread eventually acquires the lock it must
+//   dequeue itself from either the EntryList or the cxq.
+//
+// * The exiting thread identifies and unparks an "heir presumptive"
+//   tentative successor thread on the EntryList.  Critically, the
+//   exiting thread doesn't unlink the successor thread from the EntryList.
+//   After having been unparked, the wakee will recontend for ownership of
+//   the monitor.   The successor (wakee) will either acquire the lock or
+//   re-park itself.
+//
+//   Succession is provided for by a policy of competitive handoff.
+//   The exiting thread does _not_ grant or pass ownership to the
+//   successor thread.  (This is also referred to as "handoff" succession").
+//   Instead the exiting thread releases ownership and possibly wakes
+//   a successor, so the successor can (re)compete for ownership of the lock.
+//   If the EntryList is empty but the cxq is populated the exiting
+//   thread will drain the cxq into the EntryList.  It does so by
+//   by detaching the cxq (installing null with CAS) and folding
+//   the threads from the cxq into the EntryList.  The EntryList is
+//   doubly linked, while the cxq is singly linked because of the
+//   CAS-based "push" used to enqueue recently arrived threads (RATs).
+//
+// * Concurrency invariants:
+//
+//   -- only the monitor owner may access or mutate the EntryList.
+//      The mutex property of the monitor itself protects the EntryList
+//      from concurrent interference.
+//   -- Only the monitor owner may detach the cxq.
+//
+// * The monitor entry list operations avoid locks, but strictly speaking
+//   they're not lock-free.  Enter is lock-free, exit is not.
+//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+//
+// * The cxq can have multiple concurrent "pushers" but only one concurrent
+//   detaching thread.  This mechanism is immune from the ABA corruption.
+//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
+//
+// * Taken together, the cxq and the EntryList constitute or form a
+//   single logical queue of threads stalled trying to acquire the lock.
+//   We use two distinct lists to improve the odds of a constant-time
+//   dequeue operation after acquisition (in the ::enter() epilog) and
+//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
+//   A key desideratum is to minimize queue & monitor metadata manipulation
+//   that occurs while holding the monitor lock -- that is, we want to
+//   minimize monitor lock holds times.  Note that even a small amount of
+//   fixed spinning will greatly reduce the # of enqueue-dequeue operations
+//   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
+//   locks and monitor metadata.
+//
+//   Cxq points to the the set of Recently Arrived Threads attempting entry.
+//   Because we push threads onto _cxq with CAS, the RATs must take the form of
+//   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
+//   the unlocking thread notices that EntryList is null but _cxq is != null.
+//
+//   The EntryList is ordered by the prevailing queue discipline and
+//   can be organized in any convenient fashion, such as a doubly-linked list or
+//   a circular doubly-linked list.  Critically, we want insert and delete operations
+//   to operate in constant-time.  If we need a priority queue then something akin
+//   to Solaris' sleepq would work nicely.  Viz.,
+//   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
+//   Queue discipline is enforced at ::exit() time, when the unlocking thread
+//   drains the cxq into the EntryList, and orders or reorders the threads on the
+//   EntryList accordingly.
+//
+//   Barring "lock barging", this mechanism provides fair cyclic ordering,
+//   somewhat similar to an elevator-scan.
+//
+// * The monitor synchronization subsystem avoids the use of native
+//   synchronization primitives except for the narrow platform-specific
+//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
+//   the semantics of park-unpark.  Put another way, this monitor implementation
+//   depends only on atomic operations and park-unpark.  The monitor subsystem
+//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
+//   underlying OS manages the READY<->RUN transitions.
+//
+// * Waiting threads reside on the WaitSet list -- wait() puts
+//   the caller onto the WaitSet.
+//
+// * notify() or notifyAll() simply transfers threads from the WaitSet to
+//   either the EntryList or cxq.  Subsequent exit() operations will
+//   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
+//   it's likely the notifyee would simply impale itself on the lock held
+//   by the notifier.
+//
+// * An interesting alternative is to encode cxq as (List,LockByte) where
+//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
+//   variable, like _recursions, in the scheme.  The threads or Events that form
+//   the list would have to be aligned in 256-byte addresses.  A thread would
+//   try to acquire the lock or enqueue itself with CAS, but exiting threads
+//   could use a 1-0 protocol and simply STB to set the LockByte to 0.
+//   Note that is is *not* word-tearing, but it does presume that full-word
+//   CAS operations are coherent with intermix with STB operations.  That's true
+//   on most common processors.
+//
+// * See also http://blogs.sun.com/dave
+
+
+// -----------------------------------------------------------------------------
+// Enter support
+
+bool ObjectMonitor::try_enter(Thread* THREAD) {
+  if (THREAD != _owner) {
+    if (THREAD->is_lock_owned ((address)_owner)) {
+       assert(_recursions == 0, "internal state error");
+       _owner = THREAD ;
+       _recursions = 1 ;
+       OwnerIsThread = 1 ;
+       return true;
+    }
+    if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+      return false;
+    }
+    return true;
+  } else {
+    _recursions++;
+    return true;
+  }
+}
+
+void ATTR ObjectMonitor::enter(TRAPS) {
+  // The following code is ordered to check the most common cases first
+  // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
+  Thread * const Self = THREAD ;
+  void * cur ;
+
+  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
+  if (cur == NULL) {
+     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
+     assert (_recursions == 0   , "invariant") ;
+     assert (_owner      == Self, "invariant") ;
+     // CONSIDER: set or assert OwnerIsThread == 1
+     return ;
+  }
+
+  if (cur == Self) {
+     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
+     _recursions ++ ;
+     return ;
+  }
+
+  if (Self->is_lock_owned ((address)cur)) {
+    assert (_recursions == 0, "internal state error");
+    _recursions = 1 ;
+    // Commute owner from a thread-specific on-stack BasicLockObject address to
+    // a full-fledged "Thread *".
+    _owner = Self ;
+    OwnerIsThread = 1 ;
+    return ;
+  }
+
+  // We've encountered genuine contention.
+  assert (Self->_Stalled == 0, "invariant") ;
+  Self->_Stalled = intptr_t(this) ;
+
+  // Try one round of spinning *before* enqueueing Self
+  // and before going through the awkward and expensive state
+  // transitions.  The following spin is strictly optional ...
+  // Note that if we acquire the monitor from an initial spin
+  // we forgo posting JVMTI events and firing DTRACE probes.
+  if (Knob_SpinEarly && TrySpin (Self) > 0) {
+     assert (_owner == Self      , "invariant") ;
+     assert (_recursions == 0    , "invariant") ;
+     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+     Self->_Stalled = 0 ;
+     return ;
+  }
+
+  assert (_owner != Self          , "invariant") ;
+  assert (_succ  != Self          , "invariant") ;
+  assert (Self->is_Java_thread()  , "invariant") ;
+  JavaThread * jt = (JavaThread *) Self ;
+  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
+  assert (jt->thread_state() != _thread_blocked   , "invariant") ;
+  assert (this->object() != NULL  , "invariant") ;
+  assert (_count >= 0, "invariant") ;
+
+  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
+  // Ensure the object-monitor relationship remains stable while there's contention.
+  Atomic::inc_ptr(&_count);
+
+  { // Change java thread status to indicate blocked on monitor enter.
+    JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
+
+    DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
+    if (JvmtiExport::should_post_monitor_contended_enter()) {
+      JvmtiExport::post_monitor_contended_enter(jt, this);
+    }
+
+    OSThreadContendState osts(Self->osthread());
+    ThreadBlockInVM tbivm(jt);
+
+    Self->set_current_pending_monitor(this);
+
+    // TODO-FIXME: change the following for(;;) loop to straight-line code.
+    for (;;) {
+      jt->set_suspend_equivalent();
+      // cleared by handle_special_suspend_equivalent_condition()
+      // or java_suspend_self()
+
+      EnterI (THREAD) ;
+
+      if (!ExitSuspendEquivalent(jt)) break ;
+
+      //
+      // We have acquired the contended monitor, but while we were
+      // waiting another thread suspended us. We don't want to enter
+      // the monitor while suspended because that would surprise the
+      // thread that suspended us.
+      //
+          _recursions = 0 ;
+      _succ = NULL ;
+      exit (Self) ;
+
+      jt->java_suspend_self();
+    }
+    Self->set_current_pending_monitor(NULL);
+  }
+
+  Atomic::dec_ptr(&_count);
+  assert (_count >= 0, "invariant") ;
+  Self->_Stalled = 0 ;
+
+  // Must either set _recursions = 0 or ASSERT _recursions == 0.
+  assert (_recursions == 0     , "invariant") ;
+  assert (_owner == Self       , "invariant") ;
+  assert (_succ  != Self       , "invariant") ;
+  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+
+  // The thread -- now the owner -- is back in vm mode.
+  // Report the glorious news via TI,DTrace and jvmstat.
+  // The probe effect is non-trivial.  All the reportage occurs
+  // while we hold the monitor, increasing the length of the critical
+  // section.  Amdahl's parallel speedup law comes vividly into play.
+  //
+  // Another option might be to aggregate the events (thread local or
+  // per-monitor aggregation) and defer reporting until a more opportune
+  // time -- such as next time some thread encounters contention but has
+  // yet to acquire the lock.  While spinning that thread could
+  // spinning we could increment JVMStat counters, etc.
+
+  DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
+  if (JvmtiExport::should_post_monitor_contended_entered()) {
+    JvmtiExport::post_monitor_contended_entered(jt, this);
+  }
+  if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
+     ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
+  }
+}
+
+
+// Caveat: TryLock() is not necessarily serializing if it returns failure.
+// Callers must compensate as needed.
+
+int ObjectMonitor::TryLock (Thread * Self) {
+   for (;;) {
+      void * own = _owner ;
+      if (own != NULL) return 0 ;
+      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+         // Either guarantee _recursions == 0 or set _recursions = 0.
+         assert (_recursions == 0, "invariant") ;
+         assert (_owner == Self, "invariant") ;
+         // CONSIDER: set or assert that OwnerIsThread == 1
+         return 1 ;
+      }
+      // The lock had been free momentarily, but we lost the race to the lock.
+      // Interference -- the CAS failed.
+      // We can either return -1 or retry.
+      // Retry doesn't make as much sense because the lock was just acquired.
+      if (true) return -1 ;
+   }
+}
+
+void ATTR ObjectMonitor::EnterI (TRAPS) {
+    Thread * Self = THREAD ;
+    assert (Self->is_Java_thread(), "invariant") ;
+    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
+
+    // Try the lock - TATAS
+    if (TryLock (Self) > 0) {
+        assert (_succ != Self              , "invariant") ;
+        assert (_owner == Self             , "invariant") ;
+        assert (_Responsible != Self       , "invariant") ;
+        return ;
+    }
+
+    DeferredInitialize () ;
+
+    // We try one round of spinning *before* enqueueing Self.
+    //
+    // If the _owner is ready but OFFPROC we could use a YieldTo()
+    // operation to donate the remainder of this thread's quantum
+    // to the owner.  This has subtle but beneficial affinity
+    // effects.
+
+    if (TrySpin (Self) > 0) {
+        assert (_owner == Self        , "invariant") ;
+        assert (_succ != Self         , "invariant") ;
+        assert (_Responsible != Self  , "invariant") ;
+        return ;
+    }
+
+    // The Spin failed -- Enqueue and park the thread ...
+    assert (_succ  != Self            , "invariant") ;
+    assert (_owner != Self            , "invariant") ;
+    assert (_Responsible != Self      , "invariant") ;
+
+    // Enqueue "Self" on ObjectMonitor's _cxq.
+    //
+    // Node acts as a proxy for Self.
+    // As an aside, if were to ever rewrite the synchronization code mostly
+    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
+    // Java objects.  This would avoid awkward lifecycle and liveness issues,
+    // as well as eliminate a subset of ABA issues.
+    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
+    //
+
+    ObjectWaiter node(Self) ;
+    Self->_ParkEvent->reset() ;
+    node._prev   = (ObjectWaiter *) 0xBAD ;
+    node.TState  = ObjectWaiter::TS_CXQ ;
+
+    // Push "Self" onto the front of the _cxq.
+    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+    // Note that spinning tends to reduce the rate at which threads
+    // enqueue and dequeue on EntryList|cxq.
+    ObjectWaiter * nxt ;
+    for (;;) {
+        node._next = nxt = _cxq ;
+        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
+
+        // Interference - the CAS failed because _cxq changed.  Just retry.
+        // As an optional optimization we retry the lock.
+        if (TryLock (Self) > 0) {
+            assert (_succ != Self         , "invariant") ;
+            assert (_owner == Self        , "invariant") ;
+            assert (_Responsible != Self  , "invariant") ;
+            return ;
+        }
+    }
+
+    // Check for cxq|EntryList edge transition to non-null.  This indicates
+    // the onset of contention.  While contention persists exiting threads
+    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
+    // operations revert to the faster 1-0 mode.  This enter operation may interleave
+    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
+    // arrange for one of the contending thread to use a timed park() operations
+    // to detect and recover from the race.  (Stranding is form of progress failure
+    // where the monitor is unlocked but all the contending threads remain parked).
+    // That is, at least one of the contended threads will periodically poll _owner.
+    // One of the contending threads will become the designated "Responsible" thread.
+    // The Responsible thread uses a timed park instead of a normal indefinite park
+    // operation -- it periodically wakes and checks for and recovers from potential
+    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
+    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
+    // be responsible for a monitor.
+    //
+    // Currently, one of the contended threads takes on the added role of "Responsible".
+    // A viable alternative would be to use a dedicated "stranding checker" thread
+    // that periodically iterated over all the threads (or active monitors) and unparked
+    // successors where there was risk of stranding.  This would help eliminate the
+    // timer scalability issues we see on some platforms as we'd only have one thread
+    // -- the checker -- parked on a timer.
+
+    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
+        // Try to assume the role of responsible thread for the monitor.
+        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
+        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+    }
+
+    // The lock have been released while this thread was occupied queueing
+    // itself onto _cxq.  To close the race and avoid "stranding" and
+    // progress-liveness failure we must resample-retry _owner before parking.
+    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+    // In this case the ST-MEMBAR is accomplished with CAS().
+    //
+    // TODO: Defer all thread state transitions until park-time.
+    // Since state transitions are heavy and inefficient we'd like
+    // to defer the state transitions until absolutely necessary,
+    // and in doing so avoid some transitions ...
+
+    TEVENT (Inflated enter - Contention) ;
+    int nWakeups = 0 ;
+    int RecheckInterval = 1 ;
+
+    for (;;) {
+
+        if (TryLock (Self) > 0) break ;
+        assert (_owner != Self, "invariant") ;
+
+        if ((SyncFlags & 2) && _Responsible == NULL) {
+           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+        }
+
+        // park self
+        if (_Responsible == Self || (SyncFlags & 1)) {
+            TEVENT (Inflated enter - park TIMED) ;
+            Self->_ParkEvent->park ((jlong) RecheckInterval) ;
+            // Increase the RecheckInterval, but clamp the value.
+            RecheckInterval *= 8 ;
+            if (RecheckInterval > 1000) RecheckInterval = 1000 ;
+        } else {
+            TEVENT (Inflated enter - park UNTIMED) ;
+            Self->_ParkEvent->park() ;
+        }
+
+        if (TryLock(Self) > 0) break ;
+
+        // The lock is still contested.
+        // Keep a tally of the # of futile wakeups.
+        // Note that the counter is not protected by a lock or updated by atomics.
+        // That is by design - we trade "lossy" counters which are exposed to
+        // races during updates for a lower probe effect.
+        TEVENT (Inflated enter - Futile wakeup) ;
+        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+           ObjectMonitor::_sync_FutileWakeups->inc() ;
+        }
+        ++ nWakeups ;
+
+        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+        // We can defer clearing _succ until after the spin completes
+        // TrySpin() must tolerate being called with _succ == Self.
+        // Try yet another round of adaptive spinning.
+        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
+
+        // We can find that we were unpark()ed and redesignated _succ while
+        // we were spinning.  That's harmless.  If we iterate and call park(),
+        // park() will consume the event and return immediately and we'll
+        // just spin again.  This pattern can repeat, leaving _succ to simply
+        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
+        // Alternately, we can sample fired() here, and if set, forgo spinning
+        // in the next iteration.
+
+        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
+           Self->_ParkEvent->reset() ;
+           OrderAccess::fence() ;
+        }
+        if (_succ == Self) _succ = NULL ;
+
+        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+        OrderAccess::fence() ;
+    }
+
+    // Egress :
+    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+    // Normally we'll find Self on the EntryList .
+    // From the perspective of the lock owner (this thread), the
+    // EntryList is stable and cxq is prepend-only.
+    // The head of cxq is volatile but the interior is stable.
+    // In addition, Self.TState is stable.
+
+    assert (_owner == Self      , "invariant") ;
+    assert (object() != NULL    , "invariant") ;
+    // I'd like to write:
+    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+    // but as we're at a safepoint that's not safe.
+
+    UnlinkAfterAcquire (Self, &node) ;
+    if (_succ == Self) _succ = NULL ;
+
+    assert (_succ != Self, "invariant") ;
+    if (_Responsible == Self) {
+        _Responsible = NULL ;
+        // Dekker pivot-point.
+        // Consider OrderAccess::storeload() here
+
+        // We may leave threads on cxq|EntryList without a designated
+        // "Responsible" thread.  This is benign.  When this thread subsequently
+        // exits the monitor it can "see" such preexisting "old" threads --
+        // threads that arrived on the cxq|EntryList before the fence, above --
+        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
+        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+        // non-null and elect a new "Responsible" timer thread.
+        //
+        // This thread executes:
+        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
+        //    LD cxq|EntryList               (in subsequent exit)
+        //
+        // Entering threads in the slow/contended path execute:
+        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+        //    The (ST cxq; MEMBAR) is accomplished with CAS().
+        //
+        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+        // exit operation from floating above the ST Responsible=null.
+        //
+        // In *practice* however, EnterI() is always followed by some atomic
+        // operation such as the decrement of _count in ::enter().  Those atomics
+        // obviate the need for the explicit MEMBAR, above.
+    }
+
+    // We've acquired ownership with CAS().
+    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
+    // But since the CAS() this thread may have also stored into _succ,
+    // EntryList, cxq or Responsible.  These meta-data updates must be
+    // visible __before this thread subsequently drops the lock.
+    // Consider what could occur if we didn't enforce this constraint --
+    // STs to monitor meta-data and user-data could reorder with (become
+    // visible after) the ST in exit that drops ownership of the lock.
+    // Some other thread could then acquire the lock, but observe inconsistent
+    // or old monitor meta-data and heap data.  That violates the JMM.
+    // To that end, the 1-0 exit() operation must have at least STST|LDST
+    // "release" barrier semantics.  Specifically, there must be at least a
+    // STST|LDST barrier in exit() before the ST of null into _owner that drops
+    // the lock.   The barrier ensures that changes to monitor meta-data and data
+    // protected by the lock will be visible before we release the lock, and
+    // therefore before some other thread (CPU) has a chance to acquire the lock.
+    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+    //
+    // Critically, any prior STs to _succ or EntryList must be visible before
+    // the ST of null into _owner in the *subsequent* (following) corresponding
+    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
+    // execute a serializing instruction.
+
+    if (SyncFlags & 8) {
+       OrderAccess::fence() ;
+    }
+    return ;
+}
+
+// ReenterI() is a specialized inline form of the latter half of the
+// contended slow-path from EnterI().  We use ReenterI() only for
+// monitor reentry in wait().
+//
+// In the future we should reconcile EnterI() and ReenterI(), adding
+// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
+// loop accordingly.
+
+void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
+    assert (Self != NULL                , "invariant") ;
+    assert (SelfNode != NULL            , "invariant") ;
+    assert (SelfNode->_thread == Self   , "invariant") ;
+    assert (_waiters > 0                , "invariant") ;
+    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
+    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
+    JavaThread * jt = (JavaThread *) Self ;
+
+    int nWakeups = 0 ;
+    for (;;) {
+        ObjectWaiter::TStates v = SelfNode->TState ;
+        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
+        assert    (_owner != Self, "invariant") ;
+
+        if (TryLock (Self) > 0) break ;
+        if (TrySpin (Self) > 0) break ;
+
+        TEVENT (Wait Reentry - parking) ;
+
+        // State transition wrappers around park() ...
+        // ReenterI() wisely defers state transitions until
+        // it's clear we must park the thread.
+        {
+           OSThreadContendState osts(Self->osthread());
+           ThreadBlockInVM tbivm(jt);
+
+           // cleared by handle_special_suspend_equivalent_condition()
+           // or java_suspend_self()
+           jt->set_suspend_equivalent();
+           if (SyncFlags & 1) {
+              Self->_ParkEvent->park ((jlong)1000) ;
+           } else {
+              Self->_ParkEvent->park () ;
+           }
+
+           // were we externally suspended while we were waiting?
+           for (;;) {
+              if (!ExitSuspendEquivalent (jt)) break ;
+              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+              jt->java_suspend_self();
+              jt->set_suspend_equivalent();
+           }
+        }
+
+        // Try again, but just so we distinguish between futile wakeups and
+        // successful wakeups.  The following test isn't algorithmically
+        // necessary, but it helps us maintain sensible statistics.
+        if (TryLock(Self) > 0) break ;
+
+        // The lock is still contested.
+        // Keep a tally of the # of futile wakeups.
+        // Note that the counter is not protected by a lock or updated by atomics.
+        // That is by design - we trade "lossy" counters which are exposed to
+        // races during updates for a lower probe effect.
+        TEVENT (Wait Reentry - futile wakeup) ;
+        ++ nWakeups ;
+
+        // Assuming this is not a spurious wakeup we'll normally
+        // find that _succ == Self.
+        if (_succ == Self) _succ = NULL ;
+
+        // Invariant: after clearing _succ a contending thread
+        // *must* retry  _owner before parking.
+        OrderAccess::fence() ;
+
+        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+          ObjectMonitor::_sync_FutileWakeups->inc() ;
+        }
+    }
+
+    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+    // Normally we'll find Self on the EntryList.
+    // Unlinking from the EntryList is constant-time and atomic-free.
+    // From the perspective of the lock owner (this thread), the
+    // EntryList is stable and cxq is prepend-only.
+    // The head of cxq is volatile but the interior is stable.
+    // In addition, Self.TState is stable.
+
+    assert (_owner == Self, "invariant") ;
+    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+    UnlinkAfterAcquire (Self, SelfNode) ;
+    if (_succ == Self) _succ = NULL ;
+    assert (_succ != Self, "invariant") ;
+    SelfNode->TState = ObjectWaiter::TS_RUN ;
+    OrderAccess::fence() ;      // see comments at the end of EnterI()
+}
+
+// after the thread acquires the lock in ::enter().  Equally, we could defer
+// unlinking the thread until ::exit()-time.
+
+void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
+{
+    assert (_owner == Self, "invariant") ;
+    assert (SelfNode->_thread == Self, "invariant") ;
+
+    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+        // Normal case: remove Self from the DLL EntryList .
+        // This is a constant-time operation.
+        ObjectWaiter * nxt = SelfNode->_next ;
+        ObjectWaiter * prv = SelfNode->_prev ;
+        if (nxt != NULL) nxt->_prev = prv ;
+        if (prv != NULL) prv->_next = nxt ;
+        if (SelfNode == _EntryList ) _EntryList = nxt ;
+        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        TEVENT (Unlink from EntryList) ;
+    } else {
+        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+        // Inopportune interleaving -- Self is still on the cxq.
+        // This usually means the enqueue of self raced an exiting thread.
+        // Normally we'll find Self near the front of the cxq, so
+        // dequeueing is typically fast.  If needbe we can accelerate
+        // this with some MCS/CHL-like bidirectional list hints and advisory
+        // back-links so dequeueing from the interior will normally operate
+        // in constant-time.
+        // Dequeue Self from either the head (with CAS) or from the interior
+        // with a linear-time scan and normal non-atomic memory operations.
+        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+        // and then unlink Self from EntryList.  We have to drain eventually,
+        // so it might as well be now.
+
+        ObjectWaiter * v = _cxq ;
+        assert (v != NULL, "invariant") ;
+        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+            // The CAS above can fail from interference IFF a "RAT" arrived.
+            // In that case Self must be in the interior and can no longer be
+            // at the head of cxq.
+            if (v == SelfNode) {
+                assert (_cxq != v, "invariant") ;
+                v = _cxq ;          // CAS above failed - start scan at head of list
+            }
+            ObjectWaiter * p ;
+            ObjectWaiter * q = NULL ;
+            for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
+                q = p ;
+                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+            }
+            assert (v != SelfNode,  "invariant") ;
+            assert (p == SelfNode,  "Node not found on cxq") ;
+            assert (p != _cxq,      "invariant") ;
+            assert (q != NULL,      "invariant") ;
+            assert (q->_next == p,  "invariant") ;
+            q->_next = p->_next ;
+        }
+        TEVENT (Unlink from cxq) ;
+    }
+
+    // Diagnostic hygiene ...
+    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
+    SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
+    SelfNode->TState = ObjectWaiter::TS_RUN ;
+}
+
+// -----------------------------------------------------------------------------
+// Exit support
+//
+// exit()
+// ~~~~~~
+// Note that the collector can't reclaim the objectMonitor or deflate
+// the object out from underneath the thread calling ::exit() as the
+// thread calling ::exit() never transitions to a stable state.
+// This inhibits GC, which in turn inhibits asynchronous (and
+// inopportune) reclamation of "this".
+//
+// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
+// There's one exception to the claim above, however.  EnterI() can call
+// exit() to drop a lock if the acquirer has been externally suspended.
+// In that case exit() is called with _thread_state as _thread_blocked,
+// but the monitor's _count field is > 0, which inhibits reclamation.
+//
+// 1-0 exit
+// ~~~~~~~~
+// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
+// the fast-path operators have been optimized so the common ::exit()
+// operation is 1-0.  See i486.ad fast_unlock(), for instance.
+// The code emitted by fast_unlock() elides the usual MEMBAR.  This
+// greatly improves latency -- MEMBAR and CAS having considerable local
+// latency on modern processors -- but at the cost of "stranding".  Absent the
+// MEMBAR, a thread in fast_unlock() can race a thread in the slow
+// ::enter() path, resulting in the entering thread being stranding
+// and a progress-liveness failure.   Stranding is extremely rare.
+// We use timers (timed park operations) & periodic polling to detect
+// and recover from stranding.  Potentially stranded threads periodically
+// wake up and poll the lock.  See the usage of the _Responsible variable.
+//
+// The CAS() in enter provides for safety and exclusion, while the CAS or
+// MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
+// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
+// We detect and recover from stranding with timers.
+//
+// If a thread transiently strands it'll park until (a) another
+// thread acquires the lock and then drops the lock, at which time the
+// exiting thread will notice and unpark the stranded thread, or, (b)
+// the timer expires.  If the lock is high traffic then the stranding latency
+// will be low due to (a).  If the lock is low traffic then the odds of
+// stranding are lower, although the worst-case stranding latency
+// is longer.  Critically, we don't want to put excessive load in the
+// platform's timer subsystem.  We want to minimize both the timer injection
+// rate (timers created/sec) as well as the number of timers active at
+// any one time.  (more precisely, we want to minimize timer-seconds, which is
+// the integral of the # of active timers at any instant over time).
+// Both impinge on OS scalability.  Given that, at most one thread parked on
+// a monitor will use a timer.
+
+void ATTR ObjectMonitor::exit(TRAPS) {
+   Thread * Self = THREAD ;
+   if (THREAD != _owner) {
+     if (THREAD->is_lock_owned((address) _owner)) {
+       // Transmute _owner from a BasicLock pointer to a Thread address.
+       // We don't need to hold _mutex for this transition.
+       // Non-null to Non-null is safe as long as all readers can
+       // tolerate either flavor.
+       assert (_recursions == 0, "invariant") ;
+       _owner = THREAD ;
+       _recursions = 0 ;
+       OwnerIsThread = 1 ;
+     } else {
+       // NOTE: we need to handle unbalanced monitor enter/exit
+       // in native code by throwing an exception.
+       // TODO: Throw an IllegalMonitorStateException ?
+       TEVENT (Exit - Throw IMSX) ;
+       assert(false, "Non-balanced monitor enter/exit!");
+       if (false) {
+          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+       }
+       return;
+     }
+   }
+
+   if (_recursions != 0) {
+     _recursions--;        // this is simple recursive enter
+     TEVENT (Inflated exit - recursive) ;
+     return ;
+   }
+
+   // Invariant: after setting Responsible=null an thread must execute
+   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+   if ((SyncFlags & 4) == 0) {
+      _Responsible = NULL ;
+   }
+
+   for (;;) {
+      assert (THREAD == _owner, "invariant") ;
+
+
+      if (Knob_ExitPolicy == 0) {
+         // release semantics: prior loads and stores from within the critical section
+         // must not float (reorder) past the following store that drops the lock.
+         // On SPARC that requires MEMBAR #loadstore|#storestore.
+         // But of course in TSO #loadstore|#storestore is not required.
+         // I'd like to write one of the following:
+         // A.  OrderAccess::release() ; _owner = NULL
+         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+         // store into a _dummy variable.  That store is not needed, but can result
+         // in massive wasteful coherency traffic on classic SMP systems.
+         // Instead, I use release_store(), which is implemented as just a simple
+         // ST on x64, x86 and SPARC.
+         OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
+         OrderAccess::storeload() ;                         // See if we need to wake a successor
+         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+            TEVENT (Inflated exit - simple egress) ;
+            return ;
+         }
+         TEVENT (Inflated exit - complex egress) ;
+
+         // Normally the exiting thread is responsible for ensuring succession,
+         // but if other successors are ready or other entering threads are spinning
+         // then this thread can simply store NULL into _owner and exit without
+         // waking a successor.  The existence of spinners or ready successors
+         // guarantees proper succession (liveness).  Responsibility passes to the
+         // ready or running successors.  The exiting thread delegates the duty.
+         // More precisely, if a successor already exists this thread is absolved
+         // of the responsibility of waking (unparking) one.
+         //
+         // The _succ variable is critical to reducing futile wakeup frequency.
+         // _succ identifies the "heir presumptive" thread that has been made
+         // ready (unparked) but that has not yet run.  We need only one such
+         // successor thread to guarantee progress.
+         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+         // section 3.3 "Futile Wakeup Throttling" for details.
+         //
+         // Note that spinners in Enter() also set _succ non-null.
+         // In the current implementation spinners opportunistically set
+         // _succ so that exiting threads might avoid waking a successor.
+         // Another less appealing alternative would be for the exiting thread
+         // to drop the lock and then spin briefly to see if a spinner managed
+         // to acquire the lock.  If so, the exiting thread could exit
+         // immediately without waking a successor, otherwise the exiting
+         // thread would need to dequeue and wake a successor.
+         // (Note that we'd need to make the post-drop spin short, but no
+         // shorter than the worst-case round-trip cache-line migration time.
+         // The dropped lock needs to become visible to the spinner, and then
+         // the acquisition of the lock by the spinner must become visible to
+         // the exiting thread).
+         //
+
+         // It appears that an heir-presumptive (successor) must be made ready.
+         // Only the current lock owner can manipulate the EntryList or
+         // drain _cxq, so we need to reacquire the lock.  If we fail
+         // to reacquire the lock the responsibility for ensuring succession
+         // falls to the new owner.
+         //
+         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+            return ;
+         }
+         TEVENT (Exit - Reacquired) ;
+      } else {
+         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+            OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
+            OrderAccess::storeload() ;
+            // Ratify the previously observed values.
+            if (_cxq == NULL || _succ != NULL) {
+                TEVENT (Inflated exit - simple egress) ;
+                return ;
+            }
+
+            // inopportune interleaving -- the exiting thread (this thread)
+            // in the fast-exit path raced an entering thread in the slow-enter
+            // path.
+            // We have two choices:
+            // A.  Try to reacquire the lock.
+            //     If the CAS() fails return immediately, otherwise
+            //     we either restart/rerun the exit operation, or simply
+            //     fall-through into the code below which wakes a successor.
+            // B.  If the elements forming the EntryList|cxq are TSM
+            //     we could simply unpark() the lead thread and return
+            //     without having set _succ.
+            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+               TEVENT (Inflated exit - reacquired succeeded) ;
+               return ;
+            }
+            TEVENT (Inflated exit - reacquired failed) ;
+         } else {
+            TEVENT (Inflated exit - complex egress) ;
+         }
+      }
+
+      guarantee (_owner == THREAD, "invariant") ;
+
+      ObjectWaiter * w = NULL ;
+      int QMode = Knob_QMode ;
+
+      if (QMode == 2 && _cxq != NULL) {
+          // QMode == 2 : cxq has precedence over EntryList.
+          // Try to directly wake a successor from the cxq.
+          // If successful, the successor will need to unlink itself from cxq.
+          w = _cxq ;
+          assert (w != NULL, "invariant") ;
+          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+          ExitEpilog (Self, w) ;
+          return ;
+      }
+
+      if (QMode == 3 && _cxq != NULL) {
+          // Aggressively drain cxq into EntryList at the first opportunity.
+          // This policy ensure that recently-run threads live at the head of EntryList.
+          // Drain _cxq into EntryList - bulk transfer.
+          // First, detach _cxq.
+          // The following loop is tantamount to: w = swap (&cxq, NULL)
+          w = _cxq ;
+          for (;;) {
+             assert (w != NULL, "Invariant") ;
+             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+             if (u == w) break ;
+             w = u ;
+          }
+          assert (w != NULL              , "invariant") ;
+
+          ObjectWaiter * q = NULL ;
+          ObjectWaiter * p ;
+          for (p = w ; p != NULL ; p = p->_next) {
+              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+              p->TState = ObjectWaiter::TS_ENTER ;
+              p->_prev = q ;
+              q = p ;
+          }
+
+          // Append the RATs to the EntryList
+          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+          ObjectWaiter * Tail ;
+          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
+          if (Tail == NULL) {
+              _EntryList = w ;
+          } else {
+              Tail->_next = w ;
+              w->_prev = Tail ;
+          }
+
+          // Fall thru into code that tries to wake a successor from EntryList
+      }
+
+      if (QMode == 4 && _cxq != NULL) {
+          // Aggressively drain cxq into EntryList at the first opportunity.
+          // This policy ensure that recently-run threads live at the head of EntryList.
+
+          // Drain _cxq into EntryList - bulk transfer.
+          // First, detach _cxq.
+          // The following loop is tantamount to: w = swap (&cxq, NULL)
+          w = _cxq ;
+          for (;;) {
+             assert (w != NULL, "Invariant") ;
+             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+             if (u == w) break ;
+             w = u ;
+          }
+          assert (w != NULL              , "invariant") ;
+
+          ObjectWaiter * q = NULL ;
+          ObjectWaiter * p ;
+          for (p = w ; p != NULL ; p = p->_next) {
+              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+              p->TState = ObjectWaiter::TS_ENTER ;
+              p->_prev = q ;
+              q = p ;
+          }
+
+          // Prepend the RATs to the EntryList
+          if (_EntryList != NULL) {
+              q->_next = _EntryList ;
+              _EntryList->_prev = q ;
+          }
+          _EntryList = w ;
+
+          // Fall thru into code that tries to wake a successor from EntryList
+      }
+
+      w = _EntryList  ;
+      if (w != NULL) {
+          // I'd like to write: guarantee (w->_thread != Self).
+          // But in practice an exiting thread may find itself on the EntryList.
+          // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+          // then calls exit().  Exit release the lock by setting O._owner to NULL.
+          // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
+          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+          // release the lock "O".  T2 resumes immediately after the ST of null into
+          // _owner, above.  T2 notices that the EntryList is populated, so it
+          // reacquires the lock and then finds itself on the EntryList.
+          // Given all that, we have to tolerate the circumstance where "w" is
+          // associated with Self.
+          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+          ExitEpilog (Self, w) ;
+          return ;
+      }
+
+      // If we find that both _cxq and EntryList are null then just
+      // re-run the exit protocol from the top.
+      w = _cxq ;
+      if (w == NULL) continue ;
+
+      // Drain _cxq into EntryList - bulk transfer.
+      // First, detach _cxq.
+      // The following loop is tantamount to: w = swap (&cxq, NULL)
+      for (;;) {
+          assert (w != NULL, "Invariant") ;
+          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+          if (u == w) break ;
+          w = u ;
+      }
+      TEVENT (Inflated exit - drain cxq into EntryList) ;
+
+      assert (w != NULL              , "invariant") ;
+      assert (_EntryList  == NULL    , "invariant") ;
+
+      // Convert the LIFO SLL anchored by _cxq into a DLL.
+      // The list reorganization step operates in O(LENGTH(w)) time.
+      // It's critical that this step operate quickly as
+      // "Self" still holds the outer-lock, restricting parallelism
+      // and effectively lengthening the critical section.
+      // Invariant: s chases t chases u.
+      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+      // we have faster access to the tail.
+
+      if (QMode == 1) {
+         // QMode == 1 : drain cxq to EntryList, reversing order
+         // We also reverse the order of the list.
+         ObjectWaiter * s = NULL ;
+         ObjectWaiter * t = w ;
+         ObjectWaiter * u = NULL ;
+         while (t != NULL) {
+             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+             t->TState = ObjectWaiter::TS_ENTER ;
+             u = t->_next ;
+             t->_prev = u ;
+             t->_next = s ;
+             s = t;
+             t = u ;
+         }
+         _EntryList  = s ;
+         assert (s != NULL, "invariant") ;
+      } else {
+         // QMode == 0 or QMode == 2
+         _EntryList = w ;
+         ObjectWaiter * q = NULL ;
+         ObjectWaiter * p ;
+         for (p = w ; p != NULL ; p = p->_next) {
+             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+             p->TState = ObjectWaiter::TS_ENTER ;
+             p->_prev = q ;
+             q = p ;
+         }
+      }
+
+      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+
+      // See if we can abdicate to a spinner instead of waking a thread.
+      // A primary goal of the implementation is to reduce the
+      // context-switch rate.
+      if (_succ != NULL) continue;
+
+      w = _EntryList  ;
+      if (w != NULL) {
+          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+          ExitEpilog (Self, w) ;
+          return ;
+      }
+   }
+}
+
+// ExitSuspendEquivalent:
+// A faster alternate to handle_special_suspend_equivalent_condition()
+//
+// handle_special_suspend_equivalent_condition() unconditionally
+// acquires the SR_lock.  On some platforms uncontended MutexLocker()
+// operations have high latency.  Note that in ::enter() we call HSSEC
+// while holding the monitor, so we effectively lengthen the critical sections.
+//
+// There are a number of possible solutions:
+//
+// A.  To ameliorate the problem we might also defer state transitions
+//     to as late as possible -- just prior to parking.
+//     Given that, we'd call HSSEC after having returned from park(),
+//     but before attempting to acquire the monitor.  This is only a
+//     partial solution.  It avoids calling HSSEC while holding the
+//     monitor (good), but it still increases successor reacquisition latency --
+//     the interval between unparking a successor and the time the successor
+//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
+//     If we use this technique we can also avoid EnterI()-exit() loop
+//     in ::enter() where we iteratively drop the lock and then attempt
+//     to reacquire it after suspending.
+//
+// B.  In the future we might fold all the suspend bits into a
+//     composite per-thread suspend flag and then update it with CAS().
+//     Alternately, a Dekker-like mechanism with multiple variables
+//     would suffice:
+//       ST Self->_suspend_equivalent = false
+//       MEMBAR
+//       LD Self_>_suspend_flags
+//
+
+
+bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
+   int Mode = Knob_FastHSSEC ;
+   if (Mode && !jSelf->is_external_suspend()) {
+      assert (jSelf->is_suspend_equivalent(), "invariant") ;
+      jSelf->clear_suspend_equivalent() ;
+      if (2 == Mode) OrderAccess::storeload() ;
+      if (!jSelf->is_external_suspend()) return false ;
+      // We raced a suspension -- fall thru into the slow path
+      TEVENT (ExitSuspendEquivalent - raced) ;
+      jSelf->set_suspend_equivalent() ;
+   }
+   return jSelf->handle_special_suspend_equivalent_condition() ;
+}
+
+
+void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
+   assert (_owner == Self, "invariant") ;
+
+   // Exit protocol:
+   // 1. ST _succ = wakee
+   // 2. membar #loadstore|#storestore;
+   // 2. ST _owner = NULL
+   // 3. unpark(wakee)
+
+   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
+   ParkEvent * Trigger = Wakee->_event ;
+
+   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
+   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+   // out-of-scope (non-extant).
+   Wakee  = NULL ;
+
+   // Drop the lock
+   OrderAccess::release_store_ptr (&_owner, NULL) ;
+   OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
+
+   if (SafepointSynchronize::do_call_back()) {
+      TEVENT (unpark before SAFEPOINT) ;
+   }
+
+   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+   Trigger->unpark() ;
+
+   // Maintain stats and report events to JVMTI
+   if (ObjectMonitor::_sync_Parks != NULL) {
+      ObjectMonitor::_sync_Parks->inc() ;
+   }
+}
+
+
+// -----------------------------------------------------------------------------
+// Class Loader deadlock handling.
+//
+// complete_exit exits a lock returning recursion count
+// complete_exit/reenter operate as a wait without waiting
+// complete_exit requires an inflated monitor
+// The _owner field is not always the Thread addr even with an
+// inflated monitor, e.g. the monitor can be inflated by a non-owning
+// thread due to contention.
+intptr_t ObjectMonitor::complete_exit(TRAPS) {
+   Thread * const Self = THREAD;
+   assert(Self->is_Java_thread(), "Must be Java thread!");
+   JavaThread *jt = (JavaThread *)THREAD;
+
+   DeferredInitialize();
+
+   if (THREAD != _owner) {
+    if (THREAD->is_lock_owned ((address)_owner)) {
+       assert(_recursions == 0, "internal state error");
+       _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
+       _recursions = 0 ;
+       OwnerIsThread = 1 ;
+    }
+   }
+
+   guarantee(Self == _owner, "complete_exit not owner");
+   intptr_t save = _recursions; // record the old recursion count
+   _recursions = 0;        // set the recursion level to be 0
+   exit (Self) ;           // exit the monitor
+   guarantee (_owner != Self, "invariant");
+   return save;
+}
+
+// reenter() enters a lock and sets recursion count
+// complete_exit/reenter operate as a wait without waiting
+void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
+   Thread * const Self = THREAD;
+   assert(Self->is_Java_thread(), "Must be Java thread!");
+   JavaThread *jt = (JavaThread *)THREAD;
+
+   guarantee(_owner != Self, "reenter already owner");
+   enter (THREAD);       // enter the monitor
+   guarantee (_recursions == 0, "reenter recursion");
+   _recursions = recursions;
+   return;
+}
+
+
+// -----------------------------------------------------------------------------
+// A macro is used below because there may already be a pending
+// exception which should not abort the execution of the routines
+// which use this (which is why we don't put this into check_slow and
+// call it with a CHECK argument).
+
+#define CHECK_OWNER()                                                             \
+  do {                                                                            \
+    if (THREAD != _owner) {                                                       \
+      if (THREAD->is_lock_owned((address) _owner)) {                              \
+        _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
+        _recursions = 0;                                                          \
+        OwnerIsThread = 1 ;                                                       \
+      } else {                                                                    \
+        TEVENT (Throw IMSX) ;                                                     \
+        THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
+      }                                                                           \
+    }                                                                             \
+  } while (false)
+
+// check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
+// TODO-FIXME: remove check_slow() -- it's likely dead.
+
+void ObjectMonitor::check_slow(TRAPS) {
+  TEVENT (check_slow - throw IMSX) ;
+  assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
+  THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
+}
+
+static int Adjust (volatile int * adr, int dx) {
+  int v ;
+  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
+  return v ;
+}
+// -----------------------------------------------------------------------------
+// Wait/Notify/NotifyAll
+//
+// Note: a subset of changes to ObjectMonitor::wait()
+// will need to be replicated in complete_exit above
+void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
+   Thread * const Self = THREAD ;
+   assert(Self->is_Java_thread(), "Must be Java thread!");
+   JavaThread *jt = (JavaThread *)THREAD;
+
+   DeferredInitialize () ;
+
+   // Throw IMSX or IEX.
+   CHECK_OWNER();
+
+   // check for a pending interrupt
+   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+     // post monitor waited event.  Note that this is past-tense, we are done waiting.
+     if (JvmtiExport::should_post_monitor_waited()) {
+        // Note: 'false' parameter is passed here because the
+        // wait was not timed out due to thread interrupt.
+        JvmtiExport::post_monitor_waited(jt, this, false);
+     }
+     TEVENT (Wait - Throw IEX) ;
+     THROW(vmSymbols::java_lang_InterruptedException());
+     return ;
+   }
+   TEVENT (Wait) ;
+
+   assert (Self->_Stalled == 0, "invariant") ;
+   Self->_Stalled = intptr_t(this) ;
+   jt->set_current_waiting_monitor(this);
+
+   // create a node to be put into the queue
+   // Critically, after we reset() the event but prior to park(), we must check
+   // for a pending interrupt.
+   ObjectWaiter node(Self);
+   node.TState = ObjectWaiter::TS_WAIT ;
+   Self->_ParkEvent->reset() ;
+   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
+
+   // Enter the waiting queue, which is a circular doubly linked list in this case
+   // but it could be a priority queue or any data structure.
+   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
+   // by the the owner of the monitor *except* in the case where park()
+   // returns because of a timeout of interrupt.  Contention is exceptionally rare
+   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+
+   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
+   AddWaiter (&node) ;
+   Thread::SpinRelease (&_WaitSetLock) ;
+
+   if ((SyncFlags & 4) == 0) {
+      _Responsible = NULL ;
+   }
+   intptr_t save = _recursions; // record the old recursion count
+   _waiters++;                  // increment the number of waiters
+   _recursions = 0;             // set the recursion level to be 1
+   exit (Self) ;                    // exit the monitor
+   guarantee (_owner != Self, "invariant") ;
+
+   // As soon as the ObjectMonitor's ownership is dropped in the exit()
+   // call above, another thread can enter() the ObjectMonitor, do the
+   // notify(), and exit() the ObjectMonitor. If the other thread's
+   // exit() call chooses this thread as the successor and the unpark()
+   // call happens to occur while this thread is posting a
+   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
+   // handler using RawMonitors and consuming the unpark().
+   //
+   // To avoid the problem, we re-post the event. This does no harm
+   // even if the original unpark() was not consumed because we are the
+   // chosen successor for this monitor.
+   if (node._notified != 0 && _succ == Self) {
+      node._event->unpark();
+   }
+
+   // The thread is on the WaitSet list - now park() it.
+   // On MP systems it's conceivable that a brief spin before we park
+   // could be profitable.
+   //
+   // TODO-FIXME: change the following logic to a loop of the form
+   //   while (!timeout && !interrupted && _notified == 0) park()
+
+   int ret = OS_OK ;
+   int WasNotified = 0 ;
+   { // State transition wrappers
+     OSThread* osthread = Self->osthread();
+     OSThreadWaitState osts(osthread, true);
+     {
+       ThreadBlockInVM tbivm(jt);
+       // Thread is in thread_blocked state and oop access is unsafe.
+       jt->set_suspend_equivalent();
+
+       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+           // Intentionally empty
+       } else
+       if (node._notified == 0) {
+         if (millis <= 0) {
+            Self->_ParkEvent->park () ;
+         } else {
+            ret = Self->_ParkEvent->park (millis) ;
+         }
+       }
+
+       // were we externally suspended while we were waiting?
+       if (ExitSuspendEquivalent (jt)) {
+          // TODO-FIXME: add -- if succ == Self then succ = null.
+          jt->java_suspend_self();
+       }
+
+     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
+
+
+     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+     // from the WaitSet to the EntryList.
+     // See if we need to remove Node from the WaitSet.
+     // We use double-checked locking to avoid grabbing _WaitSetLock
+     // if the thread is not on the wait queue.
+     //
+     // Note that we don't need a fence before the fetch of TState.
+     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+     // written by the is thread. (perhaps the fetch might even be satisfied
+     // by a look-aside into the processor's own store buffer, although given
+     // the length of the code path between the prior ST and this load that's
+     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
+     // then we'll acquire the lock and then re-fetch a fresh TState value.
+     // That is, we fail toward safety.
+
+     if (node.TState == ObjectWaiter::TS_WAIT) {
+         Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
+         if (node.TState == ObjectWaiter::TS_WAIT) {
+            DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
+            assert(node._notified == 0, "invariant");
+            node.TState = ObjectWaiter::TS_RUN ;
+         }
+         Thread::SpinRelease (&_WaitSetLock) ;
+     }
+
+     // The thread is now either on off-list (TS_RUN),
+     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
+     // The Node's TState variable is stable from the perspective of this thread.
+     // No other threads will asynchronously modify TState.
+     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
+     OrderAccess::loadload() ;
+     if (_succ == Self) _succ = NULL ;
+     WasNotified = node._notified ;
+
+     // Reentry phase -- reacquire the monitor.
+     // re-enter contended monitor after object.wait().
+     // retain OBJECT_WAIT state until re-enter successfully completes
+     // Thread state is thread_in_vm and oop access is again safe,
+     // although the raw address of the object may have changed.
+     // (Don't cache naked oops over safepoints, of course).
+
+     // post monitor waited event. Note that this is past-tense, we are done waiting.
+     if (JvmtiExport::should_post_monitor_waited()) {
+       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+     }
+     OrderAccess::fence() ;
+
+     assert (Self->_Stalled != 0, "invariant") ;
+     Self->_Stalled = 0 ;
+
+     assert (_owner != Self, "invariant") ;
+     ObjectWaiter::TStates v = node.TState ;
+     if (v == ObjectWaiter::TS_RUN) {
+         enter (Self) ;
+     } else {
+         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
+         ReenterI (Self, &node) ;
+         node.wait_reenter_end(this);
+     }
+
+     // Self has reacquired the lock.
+     // Lifecycle - the node representing Self must not appear on any queues.
+     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+     // want residual elements associated with this thread left on any lists.
+     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
+     assert    (_owner == Self, "invariant") ;
+     assert    (_succ != Self , "invariant") ;
+   } // OSThreadWaitState()
+
+   jt->set_current_waiting_monitor(NULL);
+
+   guarantee (_recursions == 0, "invariant") ;
+   _recursions = save;     // restore the old recursion count
+   _waiters--;             // decrement the number of waiters
+
+   // Verify a few postconditions
+   assert (_owner == Self       , "invariant") ;
+   assert (_succ  != Self       , "invariant") ;
+   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+
+   if (SyncFlags & 32) {
+      OrderAccess::fence() ;
+   }
+
+   // check if the notification happened
+   if (!WasNotified) {
+     // no, it could be timeout or Thread.interrupt() or both
+     // check for interrupt event, otherwise it is timeout
+     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+       TEVENT (Wait - throw IEX from epilog) ;
+       THROW(vmSymbols::java_lang_InterruptedException());
+     }
+   }
+
+   // NOTE: Spurious wake up will be consider as timeout.
+   // Monitor notify has precedence over thread interrupt.
+}
+
+
+// Consider:
+// If the lock is cool (cxq == null && succ == null) and we're on an MP system
+// then instead of transferring a thread from the WaitSet to the EntryList
+// we might just dequeue a thread from the WaitSet and directly unpark() it.
+
+void ObjectMonitor::notify(TRAPS) {
+  CHECK_OWNER();
+  if (_WaitSet == NULL) {
+     TEVENT (Empty-Notify) ;
+     return ;
+  }
+  DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
+
+  int Policy = Knob_MoveNotifyee ;
+
+  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
+  ObjectWaiter * iterator = DequeueWaiter() ;
+  if (iterator != NULL) {
+     TEVENT (Notify1 - Transfer) ;
+     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+     guarantee (iterator->_notified == 0, "invariant") ;
+     if (Policy != 4) {
+        iterator->TState = ObjectWaiter::TS_ENTER ;
+     }
+     iterator->_notified = 1 ;
+
+     ObjectWaiter * List = _EntryList ;
+     if (List != NULL) {
+        assert (List->_prev == NULL, "invariant") ;
+        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        assert (List != iterator, "invariant") ;
+     }
+
+     if (Policy == 0) {       // prepend to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+             List->_prev = iterator ;
+             iterator->_next = List ;
+             iterator->_prev = NULL ;
+             _EntryList = iterator ;
+        }
+     } else
+     if (Policy == 1) {      // append to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+            // CONSIDER:  finding the tail currently requires a linear-time walk of
+            // the EntryList.  We can make tail access constant-time by converting to
+            // a CDLL instead of using our current DLL.
+            ObjectWaiter * Tail ;
+            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
+            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
+            Tail->_next = iterator ;
+            iterator->_prev = Tail ;
+            iterator->_next = NULL ;
+        }
+     } else
+     if (Policy == 2) {      // prepend to cxq
+         // prepend to cxq
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+            iterator->TState = ObjectWaiter::TS_CXQ ;
+            for (;;) {
+                ObjectWaiter * Front = _cxq ;
+                iterator->_next = Front ;
+                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+                    break ;
+                }
+            }
+         }
+     } else
+     if (Policy == 3) {      // append to cxq
+        iterator->TState = ObjectWaiter::TS_CXQ ;
+        for (;;) {
+            ObjectWaiter * Tail ;
+            Tail = _cxq ;
+            if (Tail == NULL) {
+                iterator->_next = NULL ;
+                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+                   break ;
+                }
+            } else {
+                while (Tail->_next != NULL) Tail = Tail->_next ;
+                Tail->_next = iterator ;
+                iterator->_prev = Tail ;
+                iterator->_next = NULL ;
+                break ;
+            }
+        }
+     } else {
+        ParkEvent * ev = iterator->_event ;
+        iterator->TState = ObjectWaiter::TS_RUN ;
+        OrderAccess::fence() ;
+        ev->unpark() ;
+     }
+
+     if (Policy < 4) {
+       iterator->wait_reenter_begin(this);
+     }
+
+     // _WaitSetLock protects the wait queue, not the EntryList.  We could
+     // move the add-to-EntryList operation, above, outside the critical section
+     // protected by _WaitSetLock.  In practice that's not useful.  With the
+     // exception of  wait() timeouts and interrupts the monitor owner
+     // is the only thread that grabs _WaitSetLock.  There's almost no contention
+     // on _WaitSetLock so it's not profitable to reduce the length of the
+     // critical section.
+  }
+
+  Thread::SpinRelease (&_WaitSetLock) ;
+
+  if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
+     ObjectMonitor::_sync_Notifications->inc() ;
+  }
+}
+
+
+void ObjectMonitor::notifyAll(TRAPS) {
+  CHECK_OWNER();
+  ObjectWaiter* iterator;
+  if (_WaitSet == NULL) {
+      TEVENT (Empty-NotifyAll) ;
+      return ;
+  }
+  DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
+
+  int Policy = Knob_MoveNotifyee ;
+  int Tally = 0 ;
+  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
+
+  for (;;) {
+     iterator = DequeueWaiter () ;
+     if (iterator == NULL) break ;
+     TEVENT (NotifyAll - Transfer1) ;
+     ++Tally ;
+
+     // Disposition - what might we do with iterator ?
+     // a.  add it directly to the EntryList - either tail or head.
+     // b.  push it onto the front of the _cxq.
+     // For now we use (a).
+
+     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+     guarantee (iterator->_notified == 0, "invariant") ;
+     iterator->_notified = 1 ;
+     if (Policy != 4) {
+        iterator->TState = ObjectWaiter::TS_ENTER ;
+     }
+
+     ObjectWaiter * List = _EntryList ;
+     if (List != NULL) {
+        assert (List->_prev == NULL, "invariant") ;
+        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        assert (List != iterator, "invariant") ;
+     }
+
+     if (Policy == 0) {       // prepend to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+             List->_prev = iterator ;
+             iterator->_next = List ;
+             iterator->_prev = NULL ;
+             _EntryList = iterator ;
+        }
+     } else
+     if (Policy == 1) {      // append to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+            // CONSIDER:  finding the tail currently requires a linear-time walk of
+            // the EntryList.  We can make tail access constant-time by converting to
+            // a CDLL instead of using our current DLL.
+            ObjectWaiter * Tail ;
+            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
+            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
+            Tail->_next = iterator ;