changeset 5194:ba7f04720557

Merge
author asaha
date Wed, 24 Mar 2010 14:16:57 -0700
parents 44feb685d646 c0e0c9a9d338
children dcc229e35a4e
files hotspot/src/share/vm/gc_implementation/g1/ptrQueue.inline.hpp jdk/make/java/redist/FILES.gmk jdk/make/java/text/FILES_java.gmk jdk/make/sun/nio/FILES_java.gmk jdk/src/share/classes/java/beans/Statement.java jdk/src/share/classes/java/util/zip/Deflater.java jdk/src/share/classes/javax/swing/plaf/synth/DefaultMenuLayout.java jdk/src/share/classes/sun/awt/ComponentAccessor.java jdk/src/share/classes/sun/awt/WindowAccessor.java jdk/src/share/classes/sun/dyn/util/BytecodeSignature.java jdk/src/share/classes/sun/security/provider/IdentityDatabase.java jdk/src/share/classes/sun/security/provider/PolicyFile.java jdk/src/share/classes/sun/security/provider/SystemIdentity.java jdk/src/share/classes/sun/security/provider/SystemSigner.java jdk/src/share/classes/sun/security/x509/X500Signer.java jdk/src/share/classes/sun/security/x509/X509Cert.java jdk/src/share/classes/sun/swing/plaf/synth/SynthUI.java jdk/src/share/classes/sun/tools/jar/JarVerifierStream.java jdk/src/solaris/classes/sun/nio/ch/SctpSocketDispatcher.java jdk/test/java/awt/regtesthelpers/process/ProcessCommunicator.java jdk/test/java/net/Socket/FDClose.java langtools/src/share/classes/com/sun/tools/classfile/ModuleExportTable_attribute.java langtools/src/share/classes/com/sun/tools/classfile/ModuleMemberTable_attribute.java langtools/src/share/classes/com/sun/tools/classfile/Module_attribute.java langtools/src/share/classes/com/sun/tools/javac/file/CloseableURLClassLoader.java
diffstat 1502 files changed, 94108 insertions(+), 17688 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Sat Mar 06 03:37:53 2010 +0300
+++ b/.hgignore	Wed Mar 24 14:16:57 2010 -0700
@@ -1,3 +1,3 @@
 ^build/
 ^dist/
-^nbproject/private/
+/nbproject/private/
--- a/.hgtags	Sat Mar 06 03:37:53 2010 +0300
+++ b/.hgtags	Wed Mar 24 14:16:57 2010 -0700
@@ -50,3 +50,13 @@
 ce74bd35ce948d629a356e168797f44b593b1578 jdk7-b73
 4e7661eaa211e186674f6cbefec4aef1144ac2a0 jdk7-b74
 946518568340c4e511549318f19f47f06b7f5f9b jdk7-b75
+09e0b33177af2b98a03c9ca19eedf61440bd1cf6 jdk7-b76
+1d0121b741f029dc4b828e4b36ba6fda92907dd7 jdk7-b77
+4061c66ba1af1a2e27c2c839ba887407dd3ce050 jdk7-b78
+e9c98378f6b9256c0595ef2985ca5899f0c0e274 jdk7-b79
+e6abd38682d237306d6c147c17538ec9e7f8e3a7 jdk7-b80
+dcc938ac40cc45f1ef454d76020b5db5d943001c jdk7-b81
+a30062be6d9ca1d48579826f870f85974300004e jdk7-b82
+34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83
+b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84
+b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85
--- a/.hgtags-top-repo	Sat Mar 06 03:37:53 2010 +0300
+++ b/.hgtags-top-repo	Wed Mar 24 14:16:57 2010 -0700
@@ -50,3 +50,13 @@
 3ac6dcf7823205546fbbc3d4ea59f37358d0b0d4 jdk7-b73
 2c88089b6e1c053597418099a14232182c387edc jdk7-b74
 d1516b9f23954b29b8e76e6f4efc467c08c78133 jdk7-b75
+c8b63075403d53a208104a8a6ea5072c1cb66aab jdk7-b76
+1f17ca8353babb13f4908c1f87d11508232518c8 jdk7-b77
+ab4ae8f4514693a9fe17ca2fec0239d8f8450d2c jdk7-b78
+20aeeb51713990dbea6929a2e100a8bbf5df70d4 jdk7-b79
+a3242906c7747b5d9bcc3d118c7c3c69aa40f4b7 jdk7-b80
+8403096d1fe7ff5318df9708cfec84a3fd3e1cf9 jdk7-b81
+e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
+6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83
+2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84
+cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
--- a/Makefile	Sat Mar 06 03:37:53 2010 +0300
+++ b/Makefile	Wed Mar 24 14:16:57 2010 -0700
@@ -51,7 +51,7 @@
 
 # For start and finish echo lines
 TITLE_TEXT = Control $(PLATFORM) $(ARCH) $(RELEASE)
-DAYE_STAMP = `$(DATE) '+%y-%m-%d %H:%M'`
+DATE_STAMP = `$(DATE) '+%y-%m-%d %H:%M'`
 START_ECHO  = echo "$(TITLE_TEXT) $@ build started: $(DATE_STAMP)"
 FINISH_ECHO = echo "$(TITLE_TEXT) $@ build finished: $(DATE_STAMP)"
 
@@ -188,7 +188,7 @@
 create_fresh_product_bootdir: FRC
 	@$(START_ECHO)
 	$(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \
-		NO_DOCS=true \
+		GENERATE_DOCS=false \
 		BOOT_CYCLE_SETTINGS= \
 		build_product_image
 	@$(FINISH_ECHO)
@@ -196,7 +196,7 @@
 create_fresh_debug_bootdir: FRC
 	@$(START_ECHO)
 	$(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \
-		NO_DOCS=true \
+		GENERATE_DOCS=false \
 		BOOT_CYCLE_DEBUG_SETTINGS= \
 		build_debug_image
 	@$(FINISH_ECHO)
@@ -204,7 +204,7 @@
 create_fresh_fastdebug_bootdir: FRC
 	@$(START_ECHO)
 	$(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \
-		NO_DOCS=true \
+		GENERATE_DOCS=false \
 		BOOT_CYCLE_DEBUG_SETTINGS= \
 		build_fastdebug_image
 	@$(FINISH_ECHO)
@@ -253,7 +253,7 @@
 	$(MAKE) \
 		ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)-$(DEBUG_NAME) \
 	        DEBUG_NAME=$(DEBUG_NAME) \
-		NO_DOCS=true \
+		GENERATE_DOCS=false \
 	        $(BOOT_CYCLE_DEBUG_SETTINGS) \
 		generic_build_repo_series
 	@$(FINISH_ECHO)
@@ -323,7 +323,7 @@
 	$(MKDIR) -p $(OPENJDK_OUTPUTDIR)
 	($(CD) $(OPENJDK_BUILDDIR) && $(MAKE) \
 	  OPENJDK=true \
-	  NO_DOCS=true \
+	  GENERATE_DOCS=false \
 	  ALT_JDK_DEVTOOLS_DIR=$(JDK_DEVTOOLS_DIR) \
 	  ALT_OUTPUTDIR=$(OPENJDK_OUTPUTDIR) \
 	  ALT_BINARY_PLUGS_PATH=$(OPENJDK_PLUGS) \
--- a/corba/.hgignore	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/.hgignore	Wed Mar 24 14:16:57 2010 -0700
@@ -1,3 +1,3 @@
 ^build/
 ^dist/
-^nbproject/private/
+/nbproject/private/
--- a/corba/.hgtags	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/.hgtags	Wed Mar 24 14:16:57 2010 -0700
@@ -50,3 +50,13 @@
 b751c528c55560cf2adeaeef24b39ca1f4d1cbf7 jdk7-b73
 5d0cf59a3203b9f57aceebc33ae656b884987955 jdk7-b74
 0fb137085952c8e47878e240d1cb40f14de463c4 jdk7-b75
+937144222e2219939101b0129d26a872a7956b13 jdk7-b76
+6881f0383f623394b5ec73f27a5f329ff55d0467 jdk7-b77
+a7f7276b48cd74d8eb1baa83fbf3d1ef4a2603c8 jdk7-b78
+ec0421b5703b677e2226cf4bf7ae4eaafd8061c5 jdk7-b79
+0336e70ca0aeabc783cc01658f36cb6e27ea7934 jdk7-b80
+e08a42a2a94d97ea8eedb187a94dbff822c8fbba jdk7-b81
+1e8c1bfad1abb4b81407a0f2645e0fb85764ca48 jdk7-b82
+fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
+68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84
+c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85
--- a/corba/make/common/shared/Platform.gmk	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/make/common/shared/Platform.gmk	Wed Mar 24 14:16:57 2010 -0700
@@ -187,6 +187,9 @@
                 sparc*) \
                     echo sparc \
                     ;; \
+                arm*) \
+                    echo arm \
+                    ;; \
                 *) \
                     echo $(mach) \
                     ;; \
--- a/corba/src/share/classes/com/sun/tools/corba/se/idl/constExpr/Expression.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/com/sun/tools/corba/se/idl/constExpr/Expression.java	Wed Mar 24 14:16:57 2010 -0700
@@ -123,7 +123,7 @@
 
   /**
    * Coerces a number to the target type of this expression.
-   * @parm  number  The number to coerce.
+   * @param  obj  The number to coerce.
    * @return  the value of number coerced to the (target) type of
    *  this expression.
    **/
@@ -142,7 +142,7 @@
   /**
    * Coerces an integral value (BigInteger) to its corresponding unsigned
    * representation, if the target type of this expression is unsigned.
-   * @parm b The BigInteger to be coerced.
+   * @param b The BigInteger to be coerced.
    * @return the value of an integral type coerced to its corresponding
    *  unsigned integral type, if the target type of this expression is
    *  unsigned.
@@ -170,7 +170,7 @@
   /**
    * Coerces an integral value (BigInteger) to its corresponding signed
    * representation, if the target type of this expression is signed.
-   * @parm  b  The BigInteger to be coerced.
+   * @param  b  The BigInteger to be coerced.
    * @return  the value of an integral type coerced to its corresponding
    *  signed integral type, if the target type of this expression is
    *  signed.
--- a/corba/src/share/classes/javax/rmi/PortableRemoteObject.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/javax/rmi/PortableRemoteObject.java	Wed Mar 24 14:16:57 2010 -0700
@@ -161,7 +161,7 @@
      * happens implicitly when the object is sent or received as an argument
      * on a remote method call, but in some circumstances it is useful to
      * perform this action by making an explicit call.  See the
-     * {@link Stub#connect} method for more information.
+     * {@link javax.rmi.CORBA.Stub#connect} method for more information.
      * @param target the object to connect.
      * @param source a previously connected object.
      * @throws RemoteException if <code>source</code> is not connected
--- a/corba/src/share/classes/org/omg/CORBA/SetOverrideType.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/org/omg/CORBA/SetOverrideType.java	Wed Mar 24 14:16:57 2010 -0700
@@ -31,7 +31,7 @@
  * indicate whether policies should replace the
  * existing policies of an <code>Object</code> or be added to them.
  * <P>
- * The method {@link omg.org.CORBA.Object._set_policy_override} takes
+ * The method {@link org.omg.CORBA.Object#_set_policy_override} takes
  * either <code>SetOverrideType.SET_OVERRIDE</code> or
  * <code>SetOverrideType.ADD_OVERRIDE</code> as its second argument.
  * The method <code>_set_policy_override</code>
--- a/corba/src/share/classes/org/omg/CORBA/TCKind.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/org/omg/CORBA/TCKind.java	Wed Mar 24 14:16:57 2010 -0700
@@ -545,8 +545,6 @@
     * @param  _value the <code>int</code> to convert.  It must be one of
     *         the <code>int</code> constants in the class
     *         <code>TCKind</code>.
-    * @return  a new <code>TCKind</code> instance whose <code>value</code>
-    * field matches the given <code>int</code>
     */
     @Deprecated
     protected TCKind(int _value){
--- a/corba/src/share/classes/org/omg/CORBA/UnknownUserException.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/org/omg/CORBA/UnknownUserException.java	Wed Mar 24 14:16:57 2010 -0700
@@ -56,7 +56,7 @@
      * Constructs an <code>UnknownUserException</code> object that contains the given
      * <code>Any</code> object.
      *
-     * @ param a an <code>Any</code> object that contains a user exception returned
+     * @param a an <code>Any</code> object that contains a user exception returned
      *         by the server
      */
     public UnknownUserException(Any a) {
--- a/corba/src/share/classes/org/omg/CORBA/portable/ServantObject.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/org/omg/CORBA/portable/ServantObject.java	Wed Mar 24 14:16:57 2010 -0700
@@ -43,7 +43,6 @@
     /** The real servant. The local stub may cast this field to the expected type, and then
      * invoke the operation directly. Note, the object may or may not be the actual servant
      * instance.
-     * @return The real servant
      */
     public java.lang.Object servant;
 }
--- a/corba/src/share/classes/org/omg/CosNaming/nameservice.idl	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/org/omg/CosNaming/nameservice.idl	Wed Mar 24 14:16:57 2010 -0700
@@ -256,7 +256,7 @@
  * 
  * @param  n Name of the object <p>
  * 
- * @parm obj The Object to rebind with the given name <p>
+ * @param obj The Object to rebind with the given name <p>
  * 
  * @exception org.omg.CosNaming.NamingContextPackage.NotFound Indicates the name does not identify a binding.<p>
  * 
--- a/corba/src/share/classes/org/omg/PortableInterceptor/Interceptors.idl	Sat Mar 06 03:37:53 2010 +0300
+++ b/corba/src/share/classes/org/omg/PortableInterceptor/Interceptors.idl	Wed Mar 24 14:16:57 2010 -0700
@@ -1730,7 +1730,7 @@
      * <p>
      * Any number of components may exist with the same component ID. 
      * 
-     * @param a_component The IOP.TaggedComponent to add.
+     * @param tagged_component The IOP.TaggedComponent to add.
      */
     void add_ior_component 
       (in IOP::TaggedComponent tagged_component);
@@ -1744,7 +1744,7 @@
      * <p>
      * Any number of components may exist with the same component ID. 
      * 
-     * @param a_component The <code>IOP.TaggedComponent</code> to add. 
+     * @param tagged_component The <code>IOP.TaggedComponent</code> to add. 
      * @param profile_id The profile id of the profile to 
      *     which this component will be added.
      * @exception BAD_PARAM thrown, with a standard minor code of 29, if the 
--- a/hotspot/.hgignore	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/.hgignore	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 ^build/
 ^dist/
-^nbproject/private/
+/nbproject/private/
 ^src/share/tools/hsdis/build/
 ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
 ^src/share/tools/IdealGraphVisualizer/build/
--- a/hotspot/.hgtags	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/.hgtags	Wed Mar 24 14:16:57 2010 -0700
@@ -50,3 +50,36 @@
 faf94d94786b621f8e13cbcc941ca69c6d967c3f jdk7-b73
 f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
 d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
+9174bb32e934965288121f75394874eeb1fcb649 jdk7-b76
+455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77
+e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
+a5a6adfca6ecefb5894a848debabfe442ff50e25 jdk7-b79
+3003ddd1d4330b06cb4691ae74d600d3685899eb jdk7-b80
+1f9b07674480c224828852ffe137beea36b3cab5 jdk7-b81
+1999f5b12482d66c8b0daf6709daea4f51893a04 jdk7-b82
+a94714c550658fd6741793ef036cb9625dc2ab1a hs17-b01
+faf94d94786b621f8e13cbcc941ca69c6d967c3f hs17-b02
+f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 hs17-b03
+d8dd291a362acb656026a9c0a9da48501505a1e7 hs17-b04
+9174bb32e934965288121f75394874eeb1fcb649 hs17-b05
+a5a6adfca6ecefb5894a848debabfe442ff50e25 hs17-b06
+3003ddd1d4330b06cb4691ae74d600d3685899eb hs17-b07
+1f9b07674480c224828852ffe137beea36b3cab5 hs17-b08
+ff3232b68fbb35185b338d7ff4695b52460243f3 hs17-b09
+981375ca07b7f0605f92f57aad95122e8c385a4d hs16-b01
+f4cbf78110c726919f46b59a3b054c54c7e889b4 hs16-b02
+07c1c01e031513bfe6a7d17c6cf30d2752824ae9 hs16-b03
+08f86fa55a31113df626a75c8a626e66a543a1bd hs16-b04
+32c83fb84370a35344676991a48440378e6b6c8a hs16-b05
+ba313800759b678979434d6da8ed3bf49eb8bea4 hs16-b06
+3c0f729815607e1678bd0c41ae68494c700dcc71 hs16-b07
+ac59d4e6dae51ac5fc31a9a4940d1857f91161b1 hs16-b08
+3f844a28c5f4912bd04043b44f21b25b0805ffc2 hs15-b01
+1605bb4eb5a7a1703b13d5b077a22cc665fe45f7 hs15-b02
+2581d90c6c9b2012da930eb4742add94a03069a0 hs15-b03
+9ab385cb0c42997e16a7761ebcd25c90560a2714 hs15-b04
+fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
+3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
+ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
+6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85
+418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Wed Mar 24 14:16:57 2010 -0700
@@ -63,12 +63,12 @@
     javaSystemLoaderField = type.getOopField("_java_system_loader");
     nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
 
-    objectKlassField = type.getOopField(WK_KLASS("object_klass"));
-    classLoaderKlassField = type.getOopField(WK_KLASS("classloader_klass"));
-    stringKlassField = type.getOopField(WK_KLASS("string_klass"));
-    systemKlassField = type.getOopField(WK_KLASS("system_klass"));
-    threadKlassField = type.getOopField(WK_KLASS("thread_klass"));
-    threadGroupKlassField = type.getOopField(WK_KLASS("threadGroup_klass"));
+    objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
+    classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
+    stringKlassField = type.getOopField(WK_KLASS("String_klass"));
+    systemKlassField = type.getOopField(WK_KLASS("System_klass"));
+    threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
+    threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
   }
 
   // This WK functions must follow the definitions in systemDictionary.hpp:
--- a/hotspot/make/Makefile	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/Makefile	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
+# Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -312,10 +312,13 @@
 $(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
 	$(install-file)
 
-# Include files (jvmti.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
+# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
 $(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
 	$(install-file)
 
+$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
+	$(install-file)
+
 $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/prims/%
 	$(install-file)
 
--- a/hotspot/make/defs.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/defs.make	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright 2006-2008 Sun Microsystems, Inc.  All Rights Reserved.
+# Copyright 2006-2010 Sun Microsystems, Inc.  All Rights Reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -261,6 +261,7 @@
 
 # Common export list of files
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmti.h
+EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmticmlr.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
--- a/hotspot/make/hotspot_version	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/hotspot_version	Wed Mar 24 14:16:57 2010 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=17
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=05
+HS_BUILD_NUMBER=10
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/hotspot/make/linux/makefiles/debug.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/debug.make	Wed Mar 24 14:16:57 2010 -0700
@@ -38,7 +38,7 @@
  "Please use 'make jvmg' to build debug JVM.                            \n" \
  "----------------------------------------------------------------------\n")
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/make/linux/makefiles/fastdebug.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/fastdebug.make	Wed Mar 24 14:16:57 2010 -0700
@@ -58,7 +58,7 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/make/linux/makefiles/jsig.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/jsig.make	Wed Mar 24 14:16:57 2010 -0700
@@ -25,9 +25,12 @@
 # Rules to build signal interposition library, used by vm.make
 
 # libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
+JSIG = jsig
 LIBJSIG = lib$(JSIG).so
 
+JSIG_G    = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
+
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
 DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
@@ -50,6 +53,7 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
--- a/hotspot/make/linux/makefiles/jvmg.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/jvmg.make	Wed Mar 24 14:16:57 2010 -0700
@@ -35,7 +35,7 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/make/linux/makefiles/launcher.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/launcher.make	Wed Mar 24 14:16:57 2010 -0700
@@ -25,7 +25,9 @@
 # Rules to build gamma launcher, used by vm.make
 
 # gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+
+LAUNCHER   = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
 
 LAUNCHERDIR   = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
 LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -70,4 +72,5 @@
 	    $(LINK_LAUNCHER/PRE_HOOK) \
 	    $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
 	    $(LINK_LAUNCHER/POST_HOOK) \
+	    [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
         }
--- a/hotspot/make/linux/makefiles/saproc.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/saproc.make	Wed Mar 24 14:16:57 2010 -0700
@@ -25,9 +25,13 @@
 # Rules to build serviceability agent library, used by vm.make
 
 # libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
 LIBSAPROC = lib$(SAPROC).so
 
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
@@ -75,6 +79,7 @@
 	           $(SA_DEBUG_CFLAGS)                                   \
 	           -o $@                                                \
 	           -lthread_db
+	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 
 install_saproc: checkAndBuildSA
 	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
--- a/hotspot/make/linux/makefiles/vm.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/linux/makefiles/vm.make	Wed Mar 24 14:16:57 2010 -0700
@@ -113,8 +113,9 @@
 #----------------------------------------------------------------------
 # JVM
 
-JVM    = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
 JVM_OBJ_FILES = $(Obj_Files)
 
@@ -201,6 +202,7 @@
 		       $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);       \
 	    $(LINK_LIB.CC/POST_HOOK)                                    \
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
+	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	    if [ -x /usr/sbin/selinuxenabled ] ; then                   \
 	      /usr/sbin/selinuxenabled;                                 \
               if [ $$? = 0 ] ; then					\
--- a/hotspot/make/solaris/makefiles/debug.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/debug.make	Wed Mar 24 14:16:57 2010 -0700
@@ -54,7 +54,7 @@
  "Please use 'gnumake jvmg' to build debug JVM.                            \n" \
  "-------------------------------------------------------------------------\n")
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/make/solaris/makefiles/dtrace.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/dtrace.make	Wed Mar 24 14:16:57 2010 -0700
@@ -24,8 +24,8 @@
 
 # Rules to build jvm_db/dtrace, used by vm.make
 
-# we build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
-# but not for CORE configuration
+# We build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
+# but not for CORE or KERNEL configurations.
 
 ifneq ("${TYPE}", "CORE")
 ifneq ("${TYPE}", "KERNEL")
@@ -37,12 +37,13 @@
 
 else
 
-
 JVM_DB = libjvm_db
-LIBJVM_DB = libjvm$(G_SUFFIX)_db.so
+LIBJVM_DB = libjvm_db.so
+LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
 
 JVM_DTRACE = jvm_dtrace
-LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.so
+LIBJVM_DTRACE = libjvm_dtrace.so
+LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
 
 JVMOFFS = JvmOffsets
 JVMOFFS.o = $(JVMOFFS).o
@@ -77,7 +78,7 @@
 LFLAGS_JVM_DTRACE += -D_REENTRANT $(PICFLAG)
 else
 LFLAGS_JVM_DB += -mt $(PICFLAG) -xnolib
-LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib
+LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib -ldl
 endif
 
 ISA = $(subst i386,i486,$(shell isainfo -n))
@@ -86,18 +87,24 @@
 ifneq ("${ISA}","${BUILDARCH}")
 
 XLIBJVM_DB = 64/$(LIBJVM_DB)
+XLIBJVM_DB_G = 64/$(LIBJVM_DB_G)
 XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
+XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
 
 $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
+
 $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
+
 endif # ifneq ("${ISA}","${BUILDARCH}")
 
 ifdef USE_GCC
@@ -142,11 +149,13 @@
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
 
 $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
 
 $(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
              $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
--- a/hotspot/make/solaris/makefiles/fastdebug.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/fastdebug.make	Wed Mar 24 14:16:57 2010 -0700
@@ -90,7 +90,6 @@
 # for this method for now. (fix this when dtrace bug 6258412 is fixed)
 OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
 
-
 # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
 
 # If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
@@ -115,8 +114,7 @@
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
 
-
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/hotspot/make/solaris/makefiles/jsig.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/jsig.make	Wed Mar 24 14:16:57 2010 -0700
@@ -25,8 +25,11 @@
 # Rules to build signal interposition library, used by vm.make
 
 # libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
-LIBJSIG = lib$(JSIG).so
+JSIG      = jsig
+LIBJSIG   = lib$(JSIG).so
+
+JSIG_G    = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
 
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
@@ -46,6 +49,7 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) -o $@ $< -ldl
+	[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
--- a/hotspot/make/solaris/makefiles/jvmg.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/jvmg.make	Wed Mar 24 14:16:57 2010 -0700
@@ -51,7 +51,7 @@
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/make/solaris/makefiles/launcher.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/launcher.make	Wed Mar 24 14:16:57 2010 -0700
@@ -25,7 +25,8 @@
 # Rules to build gamma launcher, used by vm.make
 
 # gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+LAUNCHER   = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
 
 LAUNCHERDIR   = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
 LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -88,5 +89,6 @@
 	    $(LINK_LAUNCHER/PRE_HOOK) \
 	    $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
 	    $(LINK_LAUNCHER/POST_HOOK) \
+	    [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
 	    ;; \
 	esac
--- a/hotspot/make/solaris/makefiles/saproc.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/saproc.make	Wed Mar 24 14:16:57 2010 -0700
@@ -25,9 +25,13 @@
 # Rules to build serviceability agent library, used by vm.make
 
 # libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
 LIBSAPROC = lib$(SAPROC).so
 
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc
@@ -69,6 +73,7 @@
 	           $(SA_LFLAGS)                                         \
 	           -o $@                                                \
 	           -ldl -ldemangle -lthread -lc
+	[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 
 install_saproc: checkAndBuildSA
 	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then             \
--- a/hotspot/make/solaris/makefiles/sparcWorks.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make	Wed Mar 24 14:16:57 2010 -0700
@@ -281,8 +281,6 @@
 OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS)
 endif
 
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/solaris_sparc.il
-
 endif # sparc
 
 ifeq ("${Platform_arch_model}", "x86_32")
@@ -293,13 +291,14 @@
 # [phh] Is this still true for 6.1?
 OPT_CFLAGS+=-xO3
 
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il
-
 endif # 32bit x86
 
 # no more exceptions
 CFLAGS/NOEX=-noex
 
+# Inline functions
+CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_${Platform_arch}/vm/solaris_${Platform_arch_model}.il
+
 # Reduce code bloat by reverting back to 5.0 behavior for static initializers
 CFLAGS += -Qoption ccfe -one_static_init
 
@@ -312,6 +311,15 @@
 PICFLAG/BETTER  = $(PICFLAG/DEFAULT)
 PICFLAG/BYFILE  = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
 
+# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
+MAPFLAG = -M FILENAME
+
+# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
+SONAMEFLAG = -h SONAME
+
+# Build shared library
+SHARED_FLAG = -G
+
 # Would be better if these weren't needed, since we link with CC, but
 # at present removing them causes run-time errors
 LFLAGS += -library=Crun
--- a/hotspot/make/solaris/makefiles/vm.make	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/make/solaris/makefiles/vm.make	Wed Mar 24 14:16:57 2010 -0700
@@ -108,11 +108,16 @@
 #   older libm before libCrun, just to make sure it's found and used first.
 LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc
 else
+ifeq ($(COMPILER_REV_NUMERIC), 502)
+# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
+LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor
+else
 LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor
-endif
+endif # 502
+endif # 505
 else
 LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc
-endif
+endif # sparcWorks
 
 # By default, link the *.o into the library, not the executable.
 LINK_INTO$(LINK_INTO) = LIBJVM
@@ -126,8 +131,9 @@
 #----------------------------------------------------------------------
 # JVM
 
-JVM    = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
 JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
 
@@ -173,11 +179,12 @@
 	-sbfast|-xsbfast) \
 	    ;; \
 	*) \
-	    echo Linking vm...;                                                  \
-	    $(LINK_LIB.CC/PRE_HOOK)                                              \
-	    $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);                \
-	    $(LINK_LIB.CC/POST_HOOK)                                             \
-	    rm -f $@.1; ln -s $@ $@.1;                                           \
+	    echo Linking vm...; \
+	    $(LINK_LIB.CC/PRE_HOOK) \
+	    $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
+	    $(LINK_LIB.CC/POST_HOOK) \
+	    rm -f $@.1; ln -s $@ $@.1; \
+	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	    ;; \
 	esac
 
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,14 +189,17 @@
   Register OSR_buf = osrBufferPointer()->as_register();
   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
     int monitor_offset = BytesPerWord * method()->max_locals() +
-      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+      (2 * BytesPerWord) * (number_of_locks - 1);
+    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+    // the OSR buffer using 2 word entries: first the lock and then
+    // the oop.
     for (int i = 0; i < number_of_locks; i++) {
-      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 #ifdef ASSERT
       // verify the interpreter's monitor has a non-null object
       {
         Label L;
-        __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+        __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
         __ cmp(G0, O7);
         __ br(Assembler::notEqual, false, Assembler::pt, L);
         __ delayed()->nop();
@@ -205,9 +208,9 @@
       }
 #endif // ASSERT
       // Copy the lock field into the compiled activation.
-      __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
+      __ ld_ptr(OSR_buf, slot_offset + 0, O7);
       __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
-      __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+      __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
       __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
     }
   }
@@ -354,7 +357,7 @@
 }
 
 
-void LIR_Assembler::emit_exception_handler() {
+int LIR_Assembler::emit_exception_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
@@ -370,15 +373,12 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("exception handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
-
-
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
+
+  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
     __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
     __ delayed()->nop();
   }
@@ -387,11 +387,13 @@
   __ delayed()->nop();
   debug_only(__ stop("should have gone to the caller");)
   assert(code_offset() - offset <= exception_handler_size, "overflow");
-
   __ end_a_stub();
+
+  return offset;
 }
 
-void LIR_Assembler::emit_deopt_handler() {
+
+int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
@@ -405,23 +407,18 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("deopt handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
-
   AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
-
   __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
   __ delayed()->nop();
-
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
-
   debug_only(__ stop("should have gone to the caller");)
-
   __ end_a_stub();
+
+  return offset;
 }
 
 
@@ -953,9 +950,11 @@
         } else {
 #ifdef _LP64
           assert(base != to_reg->as_register_lo(), "can't handle this");
+          assert(O7 != to_reg->as_register_lo(), "can't handle this");
           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
+          __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
-          __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
+          __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
 #else
           if (base == to_reg->as_register_lo()) {
             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
@@ -976,8 +975,8 @@
           FloatRegister reg = to_reg->as_double_reg();
           // split unaligned loads
           if (unaligned || PatchALot) {
-            __ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
-            __ ldf(FloatRegisterImpl::S, base, offset,                reg);
+            __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
+            __ ldf(FloatRegisterImpl::S, base, offset,     reg);
           } else {
             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
           }
@@ -2200,6 +2199,7 @@
   Register len     = O2;
 
   __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
+  LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
   if (shift == 0) {
     __ add(src_ptr, src_pos, src_ptr);
   } else {
@@ -2208,6 +2208,7 @@
   }
 
   __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
+  LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
   if (shift == 0) {
     __ add(dst_ptr, dst_pos, dst_ptr);
   } else {
@@ -2729,9 +2730,6 @@
   }
 
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
-  __ lduw(counter_addr, tmp1);
-  __ add(tmp1, DataLayout::counter_increment, tmp1);
-  __ stw(tmp1, counter_addr);
   Bytecodes::Code bc = method->java_code_at_bci(bci);
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
@@ -2821,15 +2819,23 @@
         __ set(DataLayout::counter_increment, tmp1);
         __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
                   mdo_offset_bias);
-        if (i < (VirtualCallData::row_limit() - 1)) {
-          __ br(Assembler::always, false, Assembler::pt, update_done);
-          __ delayed()->nop();
-        }
+        __ br(Assembler::always, false, Assembler::pt, update_done);
+        __ delayed()->nop();
         __ bind(next_test);
       }
+      // Receiver did not match any saved receiver and there is no empty row for it.
+      // Increment total counter to indicate polymorphic case.
+      __ lduw(counter_addr, tmp1);
+      __ add(tmp1, DataLayout::counter_increment, tmp1);
+      __ stw(tmp1, counter_addr);
 
       __ bind(update_done);
     }
+  } else {
+    // Static call
+    __ lduw(counter_addr, tmp1);
+    __ add(tmp1, DataLayout::counter_increment, tmp1);
+    __ stw(tmp1, counter_addr);
   }
 }
 
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -144,17 +144,17 @@
   if (index->is_register()) {
     // apply the shift and accumulate the displacement
     if (shift > 0) {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       __ shift_left(index, shift, tmp);
       index = tmp;
     }
     if (disp != 0) {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       if (Assembler::is_simm13(disp)) {
-        __ add(tmp, LIR_OprFact::intConst(disp), tmp);
+        __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
         index = tmp;
       } else {
-        __ move(LIR_OprFact::intConst(disp), tmp);
+        __ move(LIR_OprFact::intptrConst(disp), tmp);
         __ add(tmp, index, tmp);
         index = tmp;
       }
@@ -162,8 +162,8 @@
     }
   } else if (disp != 0 && !Assembler::is_simm13(disp)) {
     // index is illegal so replace it with the displacement loaded into a register
-    index = new_register(T_INT);
-    __ move(LIR_OprFact::intConst(disp), index);
+    index = new_pointer_register();
+    __ move(LIR_OprFact::intptrConst(disp), index);
     disp = 0;
   }
 
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the client compiler.
 // (see c1_globals.hpp)
-//
+
 #ifndef TIERED
 define_pd_global(bool, BackgroundCompilation,        true );
 define_pd_global(bool, CICompileOSR,                 true );
@@ -48,27 +47,24 @@
 define_pd_global(bool, UseTLAB,                      true );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(intx, NewRatio,                     8    ); // Design center runs on 1.3.1
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx, PermSize,                    12*M );
-define_pd_global(uintx, MaxPermSize,                 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
+define_pd_global(uintx,PermSize,                     12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
 define_pd_global(intx, NewSizeThreadIncrease,        16*K );
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
-#endif // TIERED
+#endif // !TIERED
 
 define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               false);
 
-
-define_pd_global(bool, LIRFillDelaySlots,            true);
+define_pd_global(bool, LIRFillDelaySlots,            true );
 define_pd_global(bool, OptimizeSinglePrecision,      false);
-define_pd_global(bool, CSEArrayLength,               true);
+define_pd_global(bool, CSEArrayLength,               true );
 define_pd_global(bool, TwoOperandLIRForm,            false);
 
-
-define_pd_global(intx, SafepointPollOffset, 0);
+define_pd_global(intx, SafepointPollOffset,          0    );
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -59,7 +59,6 @@
 define_pd_global(intx, FreqInlineSize,               175);
 define_pd_global(intx, INTPRESSURE,                  48);  // large register set
 define_pd_global(intx, InteriorEntryAlignment,       16);  // = CodeEntryAlignment
-define_pd_global(intx, NewRatio,                     2);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 // The default setting 16/16 seems to work best.
 // (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
@@ -83,25 +82,25 @@
 // sequence of instructions to load a 64 bit pointer.
 //
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,     2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,    48*M);
-define_pd_global(intx, CodeCacheExpansionSize,   64*K);
+define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM,           32*G);
+define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
 #else
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,     1536*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,    32*M);
-define_pd_global(intx, CodeCacheExpansionSize,   32*K);
+define_pd_global(intx, InitialCodeCacheSize,         1536*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM,           1*G);
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif
-define_pd_global(uintx,CodeCacheMinBlockLength,  4);
+define_pd_global(uintx,CodeCacheMinBlockLength,      4);
 
 // Heap related flags
-define_pd_global(uintx, PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -366,8 +366,9 @@
   // as get_original_pc() needs correct value for unextended_sp()
   if (_pc != NULL) {
     _cb = CodeCache::find_blob(_pc);
-    if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-      _pc = ((nmethod*)_cb)->get_original_pc(this);
+    address original_pc = nmethod::get_deopt_original_pc(this);
+    if (original_pc != NULL) {
+      _pc = original_pc;
       _deopt_state = is_deoptimized;
     } else {
       _deopt_state = not_deoptimized;
@@ -519,9 +520,9 @@
   _cb = CodeCache::find_blob(pc);
   *O7_addr() = pc - pc_return_offset;
   _cb = CodeCache::find_blob(_pc);
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    address orig = ((nmethod*)_cb)->get_original_pc(this);
-    assert(orig == _pc, "expected original to be stored before patching");
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    assert(original_pc == _pc, "expected original to be stored before patching");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,10 +22,8 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
 
 // For sparc we do not do call backs when a thread is in the interpreter, because the
 // interpreter dispatch needs at least two instructions - first to load the dispatch address
@@ -41,26 +39,23 @@
 define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
 define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
 
-define_pd_global(intx,  CodeEntryAlignment,    32);
-define_pd_global(uintx, TLABSize,              0);
-define_pd_global(uintx, NewSize, ScaleForWordSize((2048 * K) + (2 * (64 * K))));
-define_pd_global(intx,  SurvivorRatio,         8);
-define_pd_global(intx,  InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
-define_pd_global(intx,  InlineSmallCode,       1500);
+define_pd_global(intx, CodeEntryAlignment,    32);
+define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
+define_pd_global(intx, InlineSmallCode,       1500);
 #ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
-define_pd_global(intx,  ThreadStackSize,       1024);
-define_pd_global(intx,  VMThreadStackSize,     1024);
+define_pd_global(intx, ThreadStackSize,       1024);
+define_pd_global(intx, VMThreadStackSize,     1024);
 #else
-define_pd_global(intx,  ThreadStackSize,       512);
-define_pd_global(intx,  VMThreadStackSize,     512);
+define_pd_global(intx, ThreadStackSize,       512);
+define_pd_global(intx, VMThreadStackSize,     512);
 #endif
 
 define_pd_global(intx, StackYellowPages, 2);
 define_pd_global(intx, StackRedPages, 1);
 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 
-define_pd_global(intx,  PreInflateSpin,        40);  // Determined by running design center
+define_pd_global(intx, PreInflateSpin,       40);  // Determined by running design center
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1681,11 +1681,8 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
-
     // Record the receiver type.
-    record_klass_in_profile(receiver, scratch);
+    record_klass_in_profile(receiver, scratch, true);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@@ -1695,9 +1692,13 @@
 
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register scratch,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        int start_row, Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1714,6 +1715,7 @@
     // See if the receiver is receiver[n].
     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
+    // delayed()->tst(scratch);
 
     // The receiver is receiver[n].  Increment count[n].
     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
@@ -1723,20 +1725,31 @@
     bind(next_test);
 
     if (test_for_null_also) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        brx(Assembler::notZero, false, Assembler::pt, done);
-        delayed()->nop();
+        if (is_virtual_call) {
+          brx(Assembler::zero, false, Assembler::pn, found_null);
+          delayed()->nop();
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polymorphic case.
+          increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+          ba(false, done);
+          delayed()->nop();
+          bind(found_null);
+        } else {
+          brx(Assembler::notZero, false, Assembler::pt, done);
+          delayed()->nop();
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       brx(Assembler::zero, false, Assembler::pn, found_null);
       delayed()->nop();
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, scratch, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1753,16 +1766,18 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   mov(DataLayout::counter_increment, scratch);
   set_mdp_data_at(count_offset, scratch);
-  ba(false, done);
-  delayed()->nop();
+  if (start_row > 0) {
+    ba(false, done);
+    delayed()->nop();
+  }
 }
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register scratch) {
+                                                        Register scratch, bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, scratch, 0, done);
+  record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1840,7 +1855,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, scratch);
+      record_klass_in_profile(klass, scratch, false);
     }
 
     // The method data pointer needs to be updated.
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -290,9 +290,9 @@
   void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
                         Register scratch);
 
-  void record_klass_in_profile(Register receiver, Register scratch);
+  void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register scratch,
-                                      int start_row, Label& done);
+                                      int start_row, Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(int offset_of_disp, Register scratch);
   void update_mdp_by_offset(Register reg, int offset_of_disp,
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -394,6 +394,11 @@
 }
 
 
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  // No special entry points that preclude compilation
+  return true;
+}
+
 // This method tells the deoptimizer how big an interpreted frame must be:
 int AbstractInterpreter::size_activation(methodOop method,
                                          int tempcount,
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -851,10 +851,10 @@
       __ set(reg2offset(r_1) + extraspace + bias, ld_off);
 #else
       int ld_off = reg2offset(r_1) + extraspace + bias;
+#endif // _LP64
 #ifdef ASSERT
       G1_forced = true;
 #endif // ASSERT
-#endif // _LP64
       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
       else                  __ ldx(base, ld_off, G1_scratch);
@@ -865,9 +865,11 @@
       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
         store_c2i_object(r, base, st_off);
       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+#ifndef _LP64
         if (TieredCompilation) {
           assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
         }
+#endif // _LP64
         store_c2i_long(r, base, st_off, r_2->is_stack());
       } else {
         store_c2i_int(r, base, st_off);
@@ -1189,7 +1191,8 @@
                                                             // VMReg max_arg,
                                                             int comp_args_on_stack, // VMRegStackSlots
                                                             const BasicType *sig_bt,
-                                                            const VMRegPair *regs) {
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
   address i2c_entry = __ pc();
 
   AdapterGenerator agen(masm);
@@ -1258,7 +1261,7 @@
   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 
 }
 
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -1885,6 +1885,10 @@
   return RegMask();
 }
 
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return RegMask();
+}
+
 %}
 
 
@@ -6664,7 +6668,7 @@
   ins_pipe(ialu_imm);
 %}
 
-instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
+instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
   ins_cost(150);
   size(4);
@@ -6673,7 +6677,7 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
+instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
   ins_cost(140);
   size(4);
@@ -6719,6 +6723,16 @@
   ins_pipe(ialu_reg);
 %}
 
+// This instruction also works with CmpN so we don't need cmovNN_reg.
+instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
+  match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst" %}
+  ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_reg);
+%}
+
 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
   match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
   ins_cost(150);
@@ -6756,6 +6770,16 @@
   ins_pipe(ialu_reg);
 %}
 
+instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst\t! ptr" %}
+  ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_reg);
+%}
+
 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
   ins_cost(140);
@@ -6766,6 +6790,16 @@
   ins_pipe(ialu_imm);
 %}
 
+instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst\t! ptr" %}
+  ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_imm);
+%}
+
 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
   match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
   ins_cost(150);
@@ -6805,6 +6839,17 @@
   ins_pipe(int_conditional_float_move);
 %}
 
+instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FMOVS$cmp $icc,$src,$dst" %}
+  opcode(0x101);
+  ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(int_conditional_float_move);
+%}
+
 // Conditional move,
 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
   match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
@@ -6838,6 +6883,17 @@
   ins_pipe(int_conditional_double_move);
 %}
 
+instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FMOVD$cmp $icc,$src,$dst" %}
+  opcode(0x102);
+  ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(int_conditional_double_move);
+%}
+
 // Conditional move,
 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
   match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
@@ -6877,6 +6933,17 @@
 %}
 
 
+instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst\t! long" %}
+  ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_reg);
+%}
+
+
 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
   match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
   ins_cost(150);
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2862,6 +2862,9 @@
 
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
+
+    // Don't initialize the platform math functions since sparc
+    // doesn't have intrinsics for these operations.
   }
 
 
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -150,8 +150,7 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
-  assert(!unbox, "NYI");//6815692//
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   address compiled_entry = __ pc();
   Label cont;
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -2251,6 +2251,7 @@
   emit_byte(0x9D);
 }
 
+#ifndef _LP64 // no 32bit push/pop on amd64
 void Assembler::popl(Address dst) {
   // NOTE: this will adjust stack by 8byte on 64bits
   InstructionMark im(this);
@@ -2258,6 +2259,7 @@
   emit_byte(0x8F);
   emit_operand(rax, dst);
 }
+#endif
 
 void Assembler::prefetch_prefix(Address src) {
   prefix(src);
@@ -2428,6 +2430,7 @@
   emit_byte(0x9C);
 }
 
+#ifndef _LP64 // no 32bit push/pop on amd64
 void Assembler::pushl(Address src) {
   // Note this will push 64bit on 64bit
   InstructionMark im(this);
@@ -2435,6 +2438,7 @@
   emit_byte(0xFF);
   emit_operand(rsi, src);
 }
+#endif
 
 void Assembler::pxor(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -5591,7 +5595,12 @@
 }
 
 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
-  andpd(dst, as_Address(src));
+  if (reachable(src)) {
+    andpd(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    andpd(dst, Address(rscratch1, 0));
+  }
 }
 
 void MacroAssembler::andptr(Register dst, int32_t imm32) {
@@ -6078,11 +6087,21 @@
 }
 
 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
-  comisd(dst, as_Address(src));
+  if (reachable(src)) {
+    comisd(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    comisd(dst, Address(rscratch1, 0));
+  }
 }
 
 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
-  comiss(dst, as_Address(src));
+  if (reachable(src)) {
+    comiss(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    comiss(dst, Address(rscratch1, 0));
+  }
 }
 
 
@@ -7647,7 +7666,7 @@
 
 #ifdef ASSERT
   Label L;
-  testl(tmp, tmp);
+  testptr(tmp, tmp);
   jccb(Assembler::notZero, L);
   hlt();
   bind(L);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1244,7 +1244,9 @@
   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
 
+#ifndef _LP64 // no 32bit push/pop on amd64
   void popl(Address dst);
+#endif
 
 #ifdef _LP64
   void popq(Address dst);
@@ -1285,7 +1287,9 @@
   // Interleave Low Bytes
   void punpcklbw(XMMRegister dst, XMMRegister src);
 
+#ifndef _LP64 // no 32bit push/pop on amd64
   void pushl(Address src);
+#endif
 
   void pushq(Address src);
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -301,22 +301,25 @@
   Register OSR_buf = osrBufferPointer()->as_pointer_register();
   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
     int monitor_offset = BytesPerWord * method()->max_locals() +
-      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+      (2 * BytesPerWord) * (number_of_locks - 1);
+    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+    // the OSR buffer using 2 word entries: first the lock and then
+    // the oop.
     for (int i = 0; i < number_of_locks; i++) {
-      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 #ifdef ASSERT
       // verify the interpreter's monitor has a non-null object
       {
         Label L;
-        __ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
+        __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
         __ jcc(Assembler::notZero, L);
         __ stop("locked object is NULL");
         __ bind(L);
       }
 #endif
-      __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
+      __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
       __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
-      __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
+      __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
       __ movptr(frame_map()->address_for_monitor_object(i), rbx);
     }
   }
@@ -415,13 +418,12 @@
 }
 
 
-void LIR_Assembler::emit_exception_handler() {
+int LIR_Assembler::emit_exception_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
   // failures when searching for the corresponding bci => add a nop
   // (was bug 5/14/1999 - gri)
-
   __ nop();
 
   // generate code for exception handler
@@ -429,17 +431,14 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("exception handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-
-  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
 
   // if the method does not have an exception handler, then there is
   // no reason to search for one
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
+  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
     // the exception oop and pc are in rax, and rdx
     // no other registers need to be preserved, so invalidate them
     __ invalidate_registers(false, true, true, false, true, true);
@@ -471,19 +470,19 @@
   // unwind activation and forward exception to caller
   // rax,: exception
   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
-
   assert(code_offset() - offset <= exception_handler_size, "overflow");
-
   __ end_a_stub();
+
+  return offset;
 }
 
-void LIR_Assembler::emit_deopt_handler() {
+
+int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
   // failures when searching for the corresponding bci => add a nop
   // (was bug 5/14/1999 - gri)
-
   __ nop();
 
   // generate code for exception handler
@@ -491,23 +490,17 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("deopt handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-
-  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
-
   InternalAddress here(__ pc());
   __ pushptr(here.addr());
-
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
-
   __ end_a_stub();
 
+  return offset;
 }
 
 
@@ -785,7 +778,13 @@
           ShouldNotReachHere();
           __ movoop(as_Address(addr, noreg), c->as_jobject());
         } else {
+#ifdef _LP64
+          __ movoop(rscratch1, c->as_jobject());
+          null_check_here = code_offset();
+          __ movptr(as_Address_lo(addr), rscratch1);
+#else
           __ movoop(as_Address(addr), c->as_jobject());
+#endif
         }
       }
       break;
@@ -1118,8 +1117,14 @@
       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
     } else {
+#ifndef _LP64
       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
+#else
+      //no pushl on 64bits
+      __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
+      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
+#endif
     }
 
   } else if (src->is_double_stack()) {
@@ -3136,8 +3141,10 @@
 
 #ifdef _LP64
   assert_different_registers(c_rarg0, dst, dst_pos, length);
+  __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
   __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
   assert_different_registers(c_rarg1, length);
+  __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
   __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
   __ mov(c_rarg2, length);
 
@@ -3202,7 +3209,6 @@
   Register mdo  = op->mdo()->as_register();
   __ movoop(mdo, md->constant_encoding());
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
-  __ addl(counter_addr, DataLayout::counter_increment);
   Bytecodes::Code bc = method->java_code_at_bci(bci);
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
@@ -3269,14 +3275,18 @@
         __ jcc(Assembler::notEqual, next_test);
         __ movptr(recv_addr, recv);
         __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
-        if (i < (VirtualCallData::row_limit() - 1)) {
-          __ jmp(update_done);
-        }
+        __ jmp(update_done);
         __ bind(next_test);
       }
+      // Receiver did not match any saved receiver and there is no empty row for it.
+      // Increment total counter to indicate polymorphic case.
+      __ addl(counter_addr, DataLayout::counter_increment);
 
       __ bind(update_done);
     }
+  } else {
+    // Static call
+    __ addl(counter_addr, DataLayout::counter_increment);
   }
 }
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -755,8 +755,19 @@
   }
 
   LIR_Opr addr = new_pointer_register();
-  __ move(obj.result(), addr);
-  __ add(addr, offset.result(), addr);
+  LIR_Address* a;
+  if(offset.result()->is_constant()) {
+    a = new LIR_Address(obj.result(),
+                        NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
+                        as_BasicType(type));
+  } else {
+    a = new LIR_Address(obj.result(),
+                        offset.result(),
+                        LIR_Address::times_1,
+                        0,
+                        as_BasicType(type));
+  }
+  __ leal(LIR_OprFact::address(a), addr);
 
   if (type == objectType) {  // Write-barrier needed for Object fields.
     // Do the pre-write barrier, if any.
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,10 +22,8 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the client compiler.
 // (see c1_globals.hpp)
-//
 
 #ifndef TIERED
 define_pd_global(bool, BackgroundCompilation,        true );
@@ -48,27 +46,24 @@
 
 define_pd_global(intx, OnStackReplacePercentage,     933  );
 define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(intx, NewRatio,                     12   );
 define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx, PermSize,                    12*M );
-define_pd_global(uintx, MaxPermSize,                 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uintx,PermSize,                     12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(bool, CICompileOSR,                 true );
-#endif // TIERED
+#endif // !TIERED
 define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               true );
 
+define_pd_global(bool, LIRFillDelaySlots,            false);
+define_pd_global(bool, OptimizeSinglePrecision,      true );
+define_pd_global(bool, CSEArrayLength,               false);
+define_pd_global(bool, TwoOperandLIRForm,            true );
 
-define_pd_global(bool, LIRFillDelaySlots,            false);
-define_pd_global(bool, OptimizeSinglePrecision,      true);
-define_pd_global(bool, CSEArrayLength,               false);
-define_pd_global(bool, TwoOperandLIRForm,            true);
-
-
-define_pd_global(intx, SafepointPollOffset, 256);
+define_pd_global(intx, SafepointPollOffset,          256  );
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,7 +22,6 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the server compiler.
 // (see c2_globals.hpp).  Alpha-sorted.
 
@@ -46,8 +45,8 @@
 define_pd_global(intx, CompileThreshold,             10000);
 #endif // TIERED
 define_pd_global(intx, Tier2CompileThreshold,        10000);
-define_pd_global(intx, Tier3CompileThreshold,        20000 );
-define_pd_global(intx, Tier4CompileThreshold,        40000 );
+define_pd_global(intx, Tier3CompileThreshold,        20000);
+define_pd_global(intx, Tier4CompileThreshold,        40000);
 
 define_pd_global(intx, BackEdgeThreshold,            100000);
 define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
@@ -61,7 +60,6 @@
 #ifdef AMD64
 define_pd_global(intx, INTPRESSURE,                  13);
 define_pd_global(intx, InteriorEntryAlignment,       16);
-define_pd_global(intx, NewRatio,                     2);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 define_pd_global(intx, LoopUnrollLimit,              60);
 // InitialCodeCacheSize derived from specjbb2000 run.
@@ -69,19 +67,18 @@
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 32*G);
+define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
 #else
 define_pd_global(intx, INTPRESSURE,                  6);
 define_pd_global(intx, InteriorEntryAlignment,       4);
-define_pd_global(intx, NewRatio,                     8); // Design center runs on 1.3.1
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
-define_pd_global(intx, LoopUnrollLimit,              50); // Design center runs on 1.3.1
+define_pd_global(intx, LoopUnrollLimit,              50);     // Design center runs on 1.3.1
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         2304*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif // AMD64
 define_pd_global(intx, OptoLoopAlignment,            16);
 define_pd_global(intx, RegisterCostAreaRatio,        16000);
@@ -97,8 +94,8 @@
 define_pd_global(uintx,CodeCacheMinBlockLength,      4);
 
 // Heap related flags
-define_pd_global(uintx, PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,9 +222,9 @@
   }
   ((address *)sp())[-1] = pc;
   _cb = CodeCache::find_blob(pc);
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    address orig = (((nmethod*)_cb)->get_original_pc(this));
-    assert(orig == _pc, "expected original to be stored before patching");
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    assert(original_pc == _pc, "expected original PC to be stored before patching");
     _deopt_state = is_deoptimized;
     // leave _pc as is
   } else {
@@ -323,13 +323,63 @@
   return fr;
 }
 
+
+//------------------------------------------------------------------------------
+// frame::verify_deopt_original_pc
+//
+// Verifies the calculated original PC of a deoptimization PC for the
+// given unextended SP.  The unextended SP might also be the saved SP
+// for MethodHandle call sites.
+#if ASSERT
+void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
+  frame fr;
+
+  // This is ugly but it's better than to change {get,set}_original_pc
+  // to take an SP value as argument.  And it's only a debugging
+  // method anyway.
+  fr._unextended_sp = unextended_sp;
+
+  address original_pc = nm->get_original_pc(&fr);
+  assert(nm->code_contains(original_pc), "original PC must be in nmethod");
+  assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
+}
+#endif
+
+
+//------------------------------------------------------------------------------
+// frame::sender_for_interpreter_frame
 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
-  // sp is the raw sp from the sender after adapter or interpreter extension
-  intptr_t* sp = (intptr_t*) addr_at(sender_sp_offset);
+  // SP is the raw SP from the sender after adapter or interpreter
+  // extension.
+  intptr_t* sender_sp = this->sender_sp();
 
   // This is the sp before any possible extension (adapter/locals).
   intptr_t* unextended_sp = interpreter_frame_sender_sp();
 
+  // Stored FP.
+  intptr_t* saved_fp = link();
+
+  address sender_pc = this->sender_pc();
+  CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+  assert(sender_cb, "sanity");
+  nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
+      unextended_sp = saved_fp;
+    }
+    else if (sender_nm->is_deopt_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(sender_pc)) {
+      unextended_sp = saved_fp;
+    }
+  }
+
   // The interpreter and compiler(s) always save EBP/RBP in a known
   // location on entry. We must record where that location is
   // so this if EBP/RBP was live on callout from c2 we can find
@@ -351,29 +401,52 @@
     }
 #endif // AMD64
   }
-#endif /* COMPILER2 */
-  return frame(sp, unextended_sp, link(), sender_pc());
+#endif // COMPILER2
+
+  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
 }
 
 
-//------------------------------sender_for_compiled_frame-----------------------
+//------------------------------------------------------------------------------
+// frame::sender_for_compiled_frame
 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
   assert(map != NULL, "map must be set");
-  const bool c1_compiled = _cb->is_compiled_by_c1();
 
   // frame owned by optimizing compiler
-  intptr_t* sender_sp = NULL;
-
   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
-  sender_sp = unextended_sp() + _cb->frame_size();
+  intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
+  intptr_t* unextended_sp = sender_sp;
 
   // On Intel the return_address is always the word on the stack
   address sender_pc = (address) *(sender_sp-1);
 
-  // This is the saved value of ebp which may or may not really be an fp.
-  // it is only an fp if the sender is an interpreter frame (or c1?)
+  // This is the saved value of EBP which may or may not really be an FP.
+  // It is only an FP if the sender is an interpreter frame (or C1?).
+  intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
 
-  intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
+  // If we are returning to a compiled MethodHandle call site, the
+  // saved_fp will in fact be a saved value of the unextended SP.  The
+  // simplest way to tell whether we are returning to such a call site
+  // is as follows:
+  CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+  assert(sender_cb, "sanity");
+  nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
+      unextended_sp = saved_fp;
+    }
+    else if (sender_nm->is_deopt_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(sender_pc)) {
+      unextended_sp = saved_fp;
+    }
+  }
 
   if (map->update_map()) {
     // Tell GC to use argument oopmaps for some runtime stubs that need it.
@@ -383,7 +456,7 @@
     if (_cb->oop_maps() != NULL) {
       OopMapSet::update_register_map(this, map);
     }
-    // Since the prolog does the save and restore of epb there is no oopmap
+    // Since the prolog does the save and restore of EBP there is no oopmap
     // for it so we must fill in its location as if there was an oopmap entry
     // since if our caller was compiled code there could be live jvm state in it.
     map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
@@ -399,9 +472,12 @@
   }
 
   assert(sender_sp != sp(), "must have changed");
-  return frame(sender_sp, saved_fp, sender_pc);
+  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
 }
 
+
+//------------------------------------------------------------------------------
+// frame::sender
 frame frame::sender(RegisterMap* map) const {
   // Default is we done have to follow them. The sender_for_xxx will
   // update it accordingly
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -163,6 +163,14 @@
     return (intptr_t*) addr_at(offset);
   }
 
+#if ASSERT
+  // Used in frame::sender_for_{interpreter,compiled}_frame
+  static void verify_deopt_original_pc(   nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
+  static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
+    verify_deopt_original_pc(nm, unextended_sp, true);
+  }
+#endif
+
  public:
   // Constructors
 
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,32 +35,35 @@
   _deopt_state = unknown;
 }
 
-inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) {
+inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
   }
 }
 
-inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
+inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = unextended_sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
+    assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
@@ -86,9 +89,9 @@
 
   _cb = CodeCache::find_blob(_pc);
 
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
@@ -225,11 +228,13 @@
 // top of expression stack
 inline intptr_t* frame::interpreter_frame_tos_address() const {
   intptr_t* last_sp = interpreter_frame_last_sp();
-  if (last_sp == NULL ) {
+  if (last_sp == NULL) {
     return sp();
   } else {
-    // sp() may have been extended by an adapter
-    assert(last_sp < fp() && last_sp >= sp(), "bad tos");
+    // sp() may have been extended or shrunk by an adapter.  At least
+    // check that we don't fall behind the legal region.
+    // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
+    assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
     return last_sp;
   }
 }
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,17 +22,16 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
 
-define_pd_global(bool,  ConvertSleepToYield,      true);
-define_pd_global(bool,  ShareVtableStubs,         true);
-define_pd_global(bool,  CountInterpCalls,         true);
+define_pd_global(bool, ConvertSleepToYield,      true);
+define_pd_global(bool, ShareVtableStubs,         true);
+define_pd_global(bool, CountInterpCalls,         true);
+define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
-define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
+define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs past to check cast
 
 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
 // assign a different value for C2 without touching a number of files. Use
@@ -42,29 +41,24 @@
 // the uep and the vep doesn't get real alignment but just slops on by
 // only assured that the entry instruction meets the 5 byte size requirement.
 #ifdef COMPILER2
-define_pd_global(intx,  CodeEntryAlignment,       32);
+define_pd_global(intx, CodeEntryAlignment,       32);
 #else
-define_pd_global(intx,  CodeEntryAlignment,       16);
+define_pd_global(intx, CodeEntryAlignment,       16);
 #endif // COMPILER2
+define_pd_global(intx, InlineFrequencyCount,     100);
+define_pd_global(intx, InlineSmallCode,          1000);
 
-define_pd_global(bool, NeedsDeoptSuspend,           false); // only register window machines need this
-
-define_pd_global(uintx, TLABSize,                 0);
+define_pd_global(intx, StackYellowPages, 2);
+define_pd_global(intx, StackRedPages, 1);
 #ifdef AMD64
-define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
 // Very large C++ stack frames using solaris-amd64 optimized builds
 // due to lack of optimization caused by C++ compiler bugs
 define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(uintx, NewSize,                  1024 * K);
 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 #endif // AMD64
-define_pd_global(intx,  InlineFrequencyCount,     100);
-define_pd_global(intx,  InlineSmallCode,          1000);
-define_pd_global(intx,  PreInflateSpin,           10);
 
-define_pd_global(intx, StackYellowPages, 2);
-define_pd_global(intx, StackRedPages, 1);
+define_pd_global(intx, PreInflateSpin,           10);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -196,6 +196,9 @@
   } else {
     assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
     movl(reg, Address(rsi, bcp_offset));
+    // Check if the secondary index definition is still ~x, otherwise
+    // we have to change the following assembler code to calculate the
+    // plain index.
     assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
     notl(reg);  // convert to plain index
   }
@@ -1236,17 +1239,19 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(mdp, profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
-
     Label skip_receiver_profile;
     if (receiver_can_be_null) {
+      Label not_null;
       testptr(receiver, receiver);
-      jcc(Assembler::zero, skip_receiver_profile);
+      jccb(Assembler::notZero, not_null);
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+      jmp(skip_receiver_profile);
+      bind(not_null);
     }
 
     // Record the receiver type.
-    record_klass_in_profile(receiver, mdp, reg2);
+    record_klass_in_profile(receiver, mdp, reg2, true);
     bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
@@ -1260,10 +1265,14 @@
 
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register mdp,
-                                        Register reg2,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        Register reg2, int start_row,
+                                        Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1291,19 +1300,28 @@
     bind(next_test);
 
     if (row == start_row) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       testptr(reg2, reg2);
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        jcc(Assembler::notZero, done);
+        if (is_virtual_call) {
+          jccb(Assembler::zero, found_null);
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polymorphic case.
+          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+          jmp(done);
+          bind(found_null);
+        } else {
+          jcc(Assembler::notZero, done);
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       jcc(Assembler::zero, found_null);
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1320,16 +1338,18 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   movptr(reg2, (int32_t)DataLayout::counter_increment);
   set_mdp_data_at(mdp, count_offset, reg2);
-  jmp(done);
+  if (start_row > 0) {
+    jmp(done);
+  }
 }
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register mdp,
-                                                        Register reg2) {
+                                                        Register mdp, Register reg2,
+                                                        bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
+  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1422,7 +1442,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, mdp, reg2);
+      record_klass_in_profile(klass, mdp, reg2, false);
       assert(reg2 == rdi, "we know how to fix this blown reg");
       restore_locals();         // Restore EDI
     }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -213,10 +213,10 @@
                         Label& not_equal_continue);
 
   void record_klass_in_profile(Register receiver, Register mdp,
-                               Register reg2);
+                               Register reg2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register mdp,
-                                      Register reg2,
-                                      int start_row, Label& done);
+                                      Register reg2, int start_row,
+                                      Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
   void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -185,12 +185,30 @@
 }
 
 
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
+                                                       int bcp_offset,
+                                                       bool giant_index) {
+  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  if (!giant_index) {
+    load_unsigned_short(index, Address(r13, bcp_offset));
+  } else {
+    assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
+    movl(index, Address(r13, bcp_offset));
+    // Check if the secondary index definition is still ~x, otherwise
+    // we have to change the following assembler code to calculate the
+    // plain index.
+    assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+    notl(index);  // convert to plain index
+  }
+}
+
+
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
                                                            Register index,
-                                                           int bcp_offset) {
-  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+                                                           int bcp_offset,
+                                                           bool giant_index) {
   assert(cache != index, "must use different registers");
-  load_unsigned_short(index, Address(r13, bcp_offset));
+  get_cache_index_at_bcp(index, bcp_offset, giant_index);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
@@ -200,10 +218,10 @@
 
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
                                                                Register tmp,
-                                                               int bcp_offset) {
-  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+                                                               int bcp_offset,
+                                                               bool giant_index) {
   assert(cache != tmp, "must use different register");
-  load_unsigned_short(tmp, Address(r13, bcp_offset));
+  get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
   // and from word offset to byte offset
@@ -1236,18 +1254,28 @@
 
 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
                                                      Register mdp,
-                                                     Register reg2) {
+                                                     Register reg2,
+                                                     bool receiver_can_be_null) {
   if (ProfileInterpreter) {
     Label profile_continue;
 
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(mdp, profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    Label skip_receiver_profile;
+    if (receiver_can_be_null) {
+      Label not_null;
+      testptr(receiver, receiver);
+      jccb(Assembler::notZero, not_null);
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+      jmp(skip_receiver_profile);
+      bind(not_null);
+    }
 
     // Record the receiver type.
-    record_klass_in_profile(receiver, mdp, reg2);
+    record_klass_in_profile(receiver, mdp, reg2, true);
+    bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(mdp,
@@ -1270,10 +1298,14 @@
 // See below for example code.
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register mdp,
-                                        Register reg2,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        Register reg2, int start_row,
+                                        Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1301,19 +1333,28 @@
     bind(next_test);
 
     if (test_for_null_also) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       testptr(reg2, reg2);
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        jcc(Assembler::notZero, done);
+        if (is_virtual_call) {
+          jccb(Assembler::zero, found_null);
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polymorphic case.
+          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+          jmp(done);
+          bind(found_null);
+        } else {
+          jcc(Assembler::notZero, done);
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       jcc(Assembler::zero, found_null);
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1330,7 +1371,9 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   movl(reg2, DataLayout::counter_increment);
   set_mdp_data_at(mdp, count_offset, reg2);
-  jmp(done);
+  if (start_row > 0) {
+    jmp(done);
+  }
 }
 
 // Example state machine code for three profile rows:
@@ -1342,7 +1385,7 @@
 //     if (row[1].rec != NULL) {
 //       // degenerate decision tree, rooted at row[2]
 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
-//       if (row[2].rec != NULL) { goto done; } // overflow
+//       if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
 //       row[2].init(rec); goto done;
 //     } else {
 //       // remember row[1] is empty
@@ -1355,14 +1398,15 @@
 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
 //     row[0].init(rec); goto done;
 //   }
+//   done:
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register mdp,
-                                                        Register reg2) {
+                                                        Register mdp, Register reg2,
+                                                        bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
+  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1458,7 +1502,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, mdp, reg2);
+      record_klass_in_profile(klass, mdp, reg2, false);
     }
     update_mdp_by_constant(mdp, mdp_delta);
 
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -95,9 +95,10 @@
 
   void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
   void get_cache_and_index_at_bcp(Register cache, Register index,
-                                  int bcp_offset);
+                                  int bcp_offset, bool giant_index = false);
   void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
-                                      int bcp_offset);
+                                      int bcp_offset, bool giant_index = false);
+  void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
 
 
   void pop_ptr(Register r = rax);
@@ -221,10 +222,10 @@
                         Label& not_equal_continue);
 
   void record_klass_in_profile(Register receiver, Register mdp,
-                               Register reg2);
+                               Register reg2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register mdp,
-                                      Register reg2,
-                                      int start_row, Label& done);
+                                      Register reg2, int start_row,
+                                      Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
   void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
@@ -236,7 +237,8 @@
   void profile_call(Register mdp);
   void profile_final_call(Register mdp);
   void profile_virtual_call(Register receiver, Register mdp,
-                            Register scratch2);
+                            Register scratch2,
+                            bool receiver_can_be_null = false);
   void profile_ret(Register return_bci, Register mdp);
   void profile_null_seen(Register mdp);
   void profile_typecheck(Register mdp, Register klass, Register scratch);
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -277,12 +277,11 @@
   address entry_point = __ pc();
 
   // abstract method entry
-  // remove return address. Not really needed, since exception
-  // handling throws away expression stack
-  __ pop(rbx);
 
-  // adjust stack to what a normal return would do
-  __ mov(rsp, r13);
+  //  pop return address, reset last_sp to NULL
+  __ empty_expression_stack();
+  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
+  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 
   // throw exception
   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
@@ -300,7 +299,10 @@
   if (!EnableMethodHandles) {
     return generate_abstract_entry();
   }
-  return generate_abstract_entry(); //6815692//
+
+  address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
+
+  return entry_point;
 }
 
 
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -65,9 +65,9 @@
   // Verify that argslot lies within (rsp, rbp].
   Label L_ok, L_bad;
   __ cmpptr(rax_argslot, rbp);
-  __ jcc(Assembler::above, L_bad);
+  __ jccb(Assembler::above, L_bad);
   __ cmpptr(rsp, rax_argslot);
-  __ jcc(Assembler::below, L_ok);
+  __ jccb(Assembler::below, L_ok);
   __ bind(L_bad);
   __ stop(error_message);
   __ bind(L_ok);
@@ -136,9 +136,9 @@
   if (arg_slots.is_register()) {
     Label L_ok, L_bad;
     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ jcc(Assembler::greater, L_bad);
+    __ jccb(Assembler::greater, L_bad);
     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
-    __ jcc(Assembler::zero, L_ok);
+    __ jccb(Assembler::zero, L_ok);
     __ bind(L_bad);
     __ stop("assert arg_slots <= 0 and clear low bits");
     __ bind(L_ok);
@@ -173,7 +173,7 @@
     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
     __ addptr(rdx_temp, wordSize);
     __ cmpptr(rdx_temp, rax_argslot);
-    __ jcc(Assembler::less, loop);
+    __ jccb(Assembler::less, loop);
   }
 
   // Now move the argslot down, to point to the opened-up space.
@@ -211,9 +211,9 @@
     Label L_ok, L_bad;
     __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
     __ cmpptr(rbx_temp, rbp);
-    __ jcc(Assembler::above, L_bad);
+    __ jccb(Assembler::above, L_bad);
     __ cmpptr(rsp, rax_argslot);
-    __ jcc(Assembler::below, L_ok);
+    __ jccb(Assembler::below, L_ok);
     __ bind(L_bad);
     __ stop("deleted argument(s) must fall within current frame");
     __ bind(L_ok);
@@ -221,9 +221,9 @@
   if (arg_slots.is_register()) {
     Label L_ok, L_bad;
     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ jcc(Assembler::less, L_bad);
+    __ jccb(Assembler::less, L_bad);
     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
-    __ jcc(Assembler::zero, L_ok);
+    __ jccb(Assembler::zero, L_ok);
     __ bind(L_bad);
     __ stop("assert arg_slots >= 0 and clear low bits");
     __ bind(L_ok);
@@ -258,7 +258,7 @@
     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
     __ addptr(rdx_temp, -wordSize);
     __ cmpptr(rdx_temp, rsp);
-    __ jcc(Assembler::greaterEqual, loop);
+    __ jccb(Assembler::greaterEqual, loop);
   }
 
   // Now move the argslot up, to point to the just-copied block.
@@ -268,8 +268,9 @@
 }
 
 #ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
-                              oopDesc* mh,
+                              oop mh,
                               intptr_t* entry_sp,
                               intptr_t* saved_sp,
                               intptr_t* saved_bp) {
@@ -280,6 +281,7 @@
          adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   if (last_sp != saved_sp)
     printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+  if (Verbose)  print_method_handle(mh);
 }
 #endif //PRODUCT
 
@@ -382,11 +384,11 @@
       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
       __ testptr(rbx_method, rbx_method);
-      __ jcc(Assembler::zero, no_method);
+      __ jccb(Assembler::zero, no_method);
       int jobject_oop_offset = 0;
       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
       __ testptr(rbx_method, rbx_method);
-      __ jcc(Assembler::zero, no_method);
+      __ jccb(Assembler::zero, no_method);
       __ verify_oop(rbx_method);
       __ push(rdi_pc);          // and restore caller PC
       __ jmp(rbx_method_fie);
@@ -448,7 +450,7 @@
                                 rbx_index, Address::times_ptr,
                                 base + vtableEntry::method_offset_in_bytes());
       Register rbx_method = rbx_temp;
-      __ movl(rbx_method, vtable_entry_addr);
+      __ movptr(rbx_method, vtable_entry_addr);
 
       __ verify_oop(rbx_method);
       __ jmp(rbx_method_fie);
@@ -533,16 +535,15 @@
       if (arg_type == T_OBJECT) {
         __ movptr(Address(rax_argslot, 0), rbx_temp);
       } else {
-        __ load_sized_value(rbx_temp, prim_value_addr,
+        __ load_sized_value(rdx_temp, prim_value_addr,
                             type2aelembytes(arg_type), is_signed_subword_type(arg_type));
-        __ movptr(Address(rax_argslot, 0), rbx_temp);
+        __ movptr(Address(rax_argslot, 0), rdx_temp);
 #ifndef _LP64
         if (arg_slots == 2) {
-          __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
-          __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
+          __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
+          __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
         }
 #endif //_LP64
-        break;
       }
 
       if (direct_to_method) {
@@ -584,7 +585,7 @@
       Label done;
       __ movptr(rdx_temp, vmarg);
       __ testl(rdx_temp, rdx_temp);
-      __ jcc(Assembler::zero, done);          // no cast if null
+      __ jccb(Assembler::zero, done);         // no cast if null
       __ load_klass(rdx_temp, rdx_temp);
 
       // live at this point:
@@ -675,24 +676,24 @@
       // (now we are done with the old MH)
 
       // original 32-bit vmdata word must be of this form:
-      //    | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
-      __ xchgl(rcx, rbx_vminfo);                // free rcx for shifts
+      //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
+      __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
       __ shll(rdx_temp /*, rcx*/);
       Label zero_extend, done;
       __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
-      __ jcc(Assembler::zero, zero_extend);
+      __ jccb(Assembler::zero, zero_extend);
 
       // this path is taken for int->byte, int->short
       __ sarl(rdx_temp /*, rcx*/);
-      __ jmp(done);
+      __ jmpb(done);
 
       __ bind(zero_extend);
       // this is taken for int->char
       __ shrl(rdx_temp /*, rcx*/);
 
       __ bind(done);
-      __ movptr(vmarg, rdx_temp);
-      __ xchgl(rcx, rbx_vminfo);                // restore rcx_recv
+      __ movl(vmarg, rdx_temp);
+      __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv
 
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
@@ -861,7 +862,7 @@
             // Verify that argslot > destslot, by at least swap_bytes.
             Label L_ok;
             __ cmpptr(rax_argslot, rbx_destslot);
-            __ jcc(Assembler::aboveEqual, L_ok);
+            __ jccb(Assembler::aboveEqual, L_ok);
             __ stop("source must be above destination (upward rotation)");
             __ bind(L_ok);
           }
@@ -877,7 +878,7 @@
           __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
           __ addptr(rax_argslot, -wordSize);
           __ cmpptr(rax_argslot, rbx_destslot);
-          __ jcc(Assembler::aboveEqual, loop);
+          __ jccb(Assembler::aboveEqual, loop);
         } else {
           __ addptr(rax_argslot, swap_bytes);
 #ifdef ASSERT
@@ -885,7 +886,7 @@
             // Verify that argslot < destslot, by at least swap_bytes.
             Label L_ok;
             __ cmpptr(rax_argslot, rbx_destslot);
-            __ jcc(Assembler::belowEqual, L_ok);
+            __ jccb(Assembler::belowEqual, L_ok);
             __ stop("source must be below destination (downward rotation)");
             __ bind(L_ok);
           }
@@ -901,7 +902,7 @@
           __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
           __ addptr(rax_argslot, wordSize);
           __ cmpptr(rax_argslot, rbx_destslot);
-          __ jcc(Assembler::belowEqual, loop);
+          __ jccb(Assembler::belowEqual, loop);
         }
 
         // pop the original first chunk into the destination slot, now free
@@ -967,7 +968,7 @@
       __ addptr(rax_argslot, wordSize);
       __ addptr(rdx_newarg, wordSize);
       __ cmpptr(rdx_newarg, rbx_oldarg);
-      __ jcc(Assembler::less, loop);
+      __ jccb(Assembler::less, loop);
 
       __ pop(rdi);              // restore temp
 
@@ -1119,7 +1120,7 @@
         }
         __ addptr(rax_argslot, Interpreter::stackElementSize());
         __ cmpptr(rax_argslot, rdx_argslot_limit);
-        __ jcc(Assembler::less, loop);
+        __ jccb(Assembler::less, loop);
       } else if (length_constant == 0) {
         __ bind(skip_array_check);
         // nothing to copy
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -43,11 +43,11 @@
 // This code is entered with a jmp.
 //
 // Arguments:
-//   rax,: exception oop
+//   rax: exception oop
 //   rdx: exception pc
 //
 // Results:
-//   rax,: exception oop
+//   rax: exception oop
 //   rdx: exception pc in caller or ???
 //   destination: exception handler of caller
 //
@@ -113,17 +113,17 @@
   __ addptr(rsp, return_off * wordSize);   // Epilog!
   __ pop(rdx); // Exception pc
 
+  // rax: exception handler for given <exception oop/exception pc>
 
-  // rax,: exception handler for given <exception oop/exception pc>
+  // Restore SP from BP if the exception PC is a MethodHandle call.
+  __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // We have a handler in rax, (could be deopt blob)
   // rdx - throwing pc, deopt blob will need it.
 
   __ push(rax);
 
-  // rcx contains handler address
-
-  __ get_thread(rcx);           // TLS
   // Get the exception
   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
   // Get the exception pc in case we are deoptimized
@@ -137,7 +137,7 @@
 
   __ pop(rcx);
 
-  // rax,: exception oop
+  // rax: exception oop
   // rcx: exception handler
   // rdx: exception pc
   __ jmp (rcx);
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -907,7 +907,8 @@
                                                             int total_args_passed,
                                                             int comp_args_on_stack,
                                                             const BasicType *sig_bt,
-                                                            const VMRegPair *regs) {
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
   address i2c_entry = __ pc();
 
   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
@@ -954,7 +955,7 @@
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -638,6 +638,10 @@
 
   __ movptr(rax, Address(rsp, 0));
 
+  // Must preserve original SP for loading incoming arguments because
+  // we need to align the outgoing SP for compiled code.
+  __ movptr(r11, rsp);
+
   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
   // in registers, we will occasionally have no stack args.
   int comp_words_on_stack = 0;
@@ -661,6 +665,10 @@
   // as far as the placement of the call instruction
   __ push(rax);
 
+  // Put saved SP in another register
+  const Register saved_sp = rax;
+  __ movptr(saved_sp, r11);
+
   // Will jump to the compiled code just as if compiled code was doing it.
   // Pre-load the register-jump target early, to schedule it better.
   __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
@@ -680,11 +688,7 @@
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    // int ld_off = (total_args_passed + comp_words_on_stack -i)*wordSize;
-    // base ld_off on r13 (sender_sp) as the stack alignment makes offsets from rsp
-    // unpredictable
-    int ld_off = ((total_args_passed - 1) - i)*Interpreter::stackElementSize();
-
+    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
     // Point to interpreter value (vs. tag)
     int next_off = ld_off - Interpreter::stackElementSize();
     //
@@ -699,10 +703,14 @@
     if (r_1->is_stack()) {
       // Convert stack slot to an SP offset (+ wordSize to account for return address )
       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
+
+      // We can use r13 as a temp here because compiled code doesn't need r13 as an input
+      // and if we end up going thru a c2i because of a miss a reasonable value of r13
+      // will be generated.
       if (!r_2->is_valid()) {
         // sign extend???
-        __ movl(rax, Address(r13, ld_off));
-        __ movptr(Address(rsp, st_off), rax);
+        __ movl(r13, Address(saved_sp, ld_off));
+        __ movptr(Address(rsp, st_off), r13);
       } else {
         //
         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
@@ -715,9 +723,9 @@
         // ld_off is MSW so get LSW
         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
                            next_off : ld_off;
-        __ movq(rax, Address(r13, offset));
+        __ movq(r13, Address(saved_sp, offset));
         // st_off is LSW (i.e. reg.first())
-        __ movq(Address(rsp, st_off), rax);
+        __ movq(Address(rsp, st_off), r13);
       }
     } else if (r_1->is_Register()) {  // Register argument
       Register r = r_1->as_Register();
@@ -732,16 +740,16 @@
                            next_off : ld_off;
 
         // this can be a misaligned move
-        __ movq(r, Address(r13, offset));
+        __ movq(r, Address(saved_sp, offset));
       } else {
         // sign extend and use a full word?
-        __ movl(r, Address(r13, ld_off));
+        __ movl(r, Address(saved_sp, ld_off));
       }
     } else {
       if (!r_2->is_valid()) {
-        __ movflt(r_1->as_XMMRegister(), Address(r13, ld_off));
+        __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
       } else {
-        __ movdbl(r_1->as_XMMRegister(), Address(r13, next_off));
+        __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
       }
     }
   }
@@ -770,7 +778,8 @@
                                                             int total_args_passed,
                                                             int comp_args_on_stack,
                                                             const BasicType *sig_bt,
-                                                            const VMRegPair *regs) {
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
   address i2c_entry = __ pc();
 
   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
@@ -816,7 +825,7 @@
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
@@ -3319,6 +3328,10 @@
 
   // rax: exception handler
 
+  // Restore SP from BP if the exception PC is a MethodHandle call.
+  __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp);
+
   // We have a handler in rax (could be deopt blob).
   __ mov(r8, rax);
 
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -718,10 +718,8 @@
       case BarrierSet::G1SATBCTLogging:
         {
           __ pusha();                      // push registers
-          __ push(count);
-          __ push(start);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
-          __ addptr(rsp, 2*wordSize);
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
+                          start, count);
           __ popa();
         }
         break;
@@ -752,10 +750,8 @@
       case BarrierSet::G1SATBCTLogging:
         {
           __ pusha();                      // push registers
-          __ push(count);
-          __ push(start);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
-          __ addptr(rsp, 2*wordSize);
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
+                          start, count);
           __ popa();
         }
         break;
@@ -2030,6 +2026,54 @@
                                entry_checkcast_arraycopy);
   }
 
+  void generate_math_stubs() {
+    {
+      StubCodeMark mark(this, "StubRoutines", "log");
+      StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ flog();
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "log10");
+      StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ flog10();
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "sin");
+      StubRoutines::_intrinsic_sin = (double (*)(double))  __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ trigfunc('s');
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "cos");
+      StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ trigfunc('c');
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "tan");
+      StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ trigfunc('t');
+      __ ret(0);
+    }
+
+    // The intrinsic version of these seem to return the same value as
+    // the strict version.
+    StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
+    StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
+  }
+
  public:
   // Information about frame layout at time of blocking runtime call.
   // Note that we only have to preserve callee-saved registers since
@@ -2228,6 +2272,8 @@
         MethodHandles::generate_method_handle_stub(_masm, ek);
       }
     }
+
+    generate_math_stubs();
   }
 
 
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1172,7 +1172,7 @@
             __ movptr(c_rarg0, addr);
             __ movptr(c_rarg1, count);
           }
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
           __ popa();
         }
         break;
@@ -1212,7 +1212,7 @@
           __ shrptr(scratch, LogBytesPerHeapOop);  // convert to element count
           __ mov(c_rarg0, start);
           __ mov(c_rarg1, scratch);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
           __ popa();
         }
         break;
@@ -2731,6 +2731,79 @@
     StubRoutines::_arrayof_oop_arraycopy             = StubRoutines::_oop_arraycopy;
   }
 
+  void generate_math_stubs() {
+    {
+      StubCodeMark mark(this, "StubRoutines", "log");
+      StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ flog();
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "log10");
+      StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ flog10();
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "sin");
+      StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ trigfunc('s');
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "cos");
+      StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ trigfunc('c');
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "tan");
+      StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ trigfunc('t');
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+
+    // The intrinsic version of these seem to return the same value as
+    // the strict version.
+    StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
+    StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
+  }
+
 #undef __
 #define __ masm->
 
@@ -2935,6 +3008,18 @@
 
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
+
+    // generic method handle stubs
+    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
+      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
+           ek < MethodHandles::_EK_LIMIT;
+           ek = MethodHandles::EntryKind(1 + (int)ek)) {
+        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+        MethodHandles::generate_method_handle_stub(_masm, ek);
+      }
+    }
+
+    generate_math_stubs();
   }
 
  public:
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,15 +155,8 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   TosState incoming_state = state;
-  if (EnableInvokeDynamic) {
-    if (unbox) {
-      incoming_state = atos;
-    }
-  } else {
-    assert(!unbox, "old behavior");
-  }
 
   Label interpreter_entry;
   address compiled_entry = __ pc();
@@ -216,46 +209,6 @@
   __ restore_bcp();
   __ restore_locals();
 
-  Label L_fail;
-
-  if (unbox && state != atos) {
-    // cast and unbox
-    BasicType type = as_BasicType(state);
-    if (type == T_BYTE)  type = T_BOOLEAN; // FIXME
-    KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
-    __ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
-    __ testl(rax, rax);
-    Label L_got_value, L_get_value;
-    // convert nulls to zeroes (avoid NPEs here)
-    if (!(type == T_FLOAT || type == T_DOUBLE)) {
-      // if rax already contains zero bits, forge ahead
-      __ jcc(Assembler::zero, L_got_value);
-    } else {
-      __ jcc(Assembler::notZero, L_get_value);
-      __ fldz();
-      __ jmp(L_got_value);
-    }
-    __ bind(L_get_value);
-    __ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
-    __ jcc(Assembler::notEqual, L_fail);
-    int offset = java_lang_boxing_object::value_offset_in_bytes(type);
-    // Cf. TemplateTable::getfield_or_static
-    switch (type) {
-      case T_BYTE:     // fall through:
-      case T_BOOLEAN:  __ load_signed_byte(rax, Address(rax, offset));    break;
-      case T_CHAR:     __ load_unsigned_short(rax, Address(rax, offset)); break;
-      case T_SHORT:    __ load_signed_short(rax, Address(rax, offset));   break;
-      case T_INT:      __ movl(rax, Address(rax, offset));                break;
-      case T_FLOAT:    __ fld_s(Address(rax, offset));                    break;
-      case T_DOUBLE:   __ fld_d(Address(rax, offset));                    break;
-      // Access to java.lang.Double.value does not need to be atomic:
-      case T_LONG:   { __ movl(rdx, Address(rax, offset + 4));
-                       __ movl(rax, Address(rax, offset + 0));  }         break;
-      default: ShouldNotReachHere();
-    }
-    __ bind(L_got_value);
-  }
-
   Label L_got_cache, L_giant_index;
   if (EnableInvokeDynamic) {
     __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
@@ -263,32 +216,6 @@
   }
   __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
   __ bind(L_got_cache);
-  if (unbox && state == atos) {
-    // insert a casting conversion, to keep verifier sane
-    Label L_ok, L_ok_pops;
-    __ testl(rax, rax);
-    __ jcc(Assembler::zero, L_ok);
-    __ push(rax);               // save the object to check
-    __ push(rbx);               // save CP cache reference
-    __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
-    __ movl(rbx, Address(rbx, rcx,
-                      Address::times_4, constantPoolCacheOopDesc::base_offset() +
-                      ConstantPoolCacheEntry::f1_offset()));
-    __ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
-    __ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
-    __ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
-    __ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
-    __ pop(rcx);                // pop and discard CP cache
-    __ mov(rbx, rax);           // target supertype into rbx for L_fail
-    __ pop(rax);                // failed object into rax for L_fail
-    __ jmp(L_fail);
-
-    __ bind(L_ok_pops);
-    // restore pushed temp regs:
-    __ pop(rbx);
-    __ pop(rax);
-    __ bind(L_ok);
-  }
   __ movl(rbx, Address(rbx, rcx,
                     Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
                     ConstantPoolCacheEntry::flags_offset()));
@@ -301,14 +228,6 @@
     __ bind(L_giant_index);
     __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
     __ jmp(L_got_cache);
-
-    if (unbox) {
-      __ bind(L_fail);
-      __ push(rbx);             // missed klass (required)
-      __ push(rax);             // bad object (actual)
-      __ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
-      __ call(rdx);
-    }
   }
 
   return entry;
@@ -1512,6 +1431,23 @@
 
 }
 
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    :
+      return false;
+    default:
+      return true;
+  }
+}
+
 // How much stack a method activation needs in words.
 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
 
@@ -1569,7 +1505,10 @@
 
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
-    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
+    if (!EnableMethodHandles)
+      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+      // Probably, since deoptimization doesn't work yet.
+      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
     assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
 #endif
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,21 +100,26 @@
   return entry;
 }
 
-// Arguments are: required type in rarg1, failing object (or NULL) in rarg2
+// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
   address entry = __ pc();
 
   __ pop(c_rarg2);              // failing object is at TOS
   __ pop(c_rarg1);              // required type is at TOS+8
 
-  // expression stack must be empty before entering the VM if an
-  // exception happened
+  __ verify_oop(c_rarg1);
+  __ verify_oop(c_rarg2);
+
+  // Various method handle types use interpreter registers as temps.
+  __ restore_bcp();
+  __ restore_locals();
+
+  // Expression stack must be empty before entering the VM for an exception.
   __ empty_expression_stack();
 
   __ call_VM(noreg,
              CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_WrongMethodTypeException),
+                              InterpreterRuntime::throw_WrongMethodTypeException),
              // pass required type, failing object (or NULL)
              c_rarg1, c_rarg2);
   return entry;
@@ -166,8 +171,7 @@
 
 
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
-                                                                int step, bool unbox) {
-  assert(!unbox, "NYI");//6815692//
+                                                                int step) {
 
   // amd64 doesn't need to do anything special about compiled returns
   // to the interpreter so the code that exists on x86 to place a sentinel
@@ -183,15 +187,29 @@
   __ restore_bcp();
   __ restore_locals();
 
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1);
+  Label L_got_cache, L_giant_index;
+  if (EnableInvokeDynamic) {
+    __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
+    __ jcc(Assembler::equal, L_giant_index);
+  }
+  __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+  __ bind(L_got_cache);
   __ movl(rbx, Address(rbx, rcx,
-                       Address::times_8,
+                       Address::times_ptr,
                        in_bytes(constantPoolCacheOopDesc::base_offset()) +
                        3 * wordSize));
   __ andl(rbx, 0xFF);
   if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
   __ lea(rsp, Address(rsp, rbx, Address::times_8));
   __ dispatch_next(state, step);
+
+  // out of the main line of code...
+  if (EnableInvokeDynamic) {
+    __ bind(L_giant_index);
+    __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+    __ jmp(L_got_cache);
+  }
+
   return entry;
 }
 
@@ -431,8 +449,12 @@
   __ addptr(rax, stack_base);
   __ subptr(rax, stack_size);
 
+  // Use the maximum number of pages we might bang.
+  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+                                                                              (StackRedPages+StackYellowPages);
+
   // add in the red and yellow zone sizes
-  __ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
+  __ addptr(rax, max_pages * page_size);
 
   // check against the current stack bottom
   __ cmpptr(rsp, rax);
@@ -1434,6 +1456,23 @@
                                 generate_normal_entry(synchronized);
 }
 
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    :
+      return false;
+    default:
+      return true;
+  }
+}
+
 // How much stack a method activation needs in words.
 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
   const int entry_size = frame::interpreter_frame_monitor_size();
@@ -1484,8 +1523,10 @@
          tempcount* Interpreter::stackElementWords() + popframe_extra_args;
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
-    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
-           "Frame not properly walkable");
+    if (!EnableMethodHandles)
+      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+      // Probably, since deoptimization doesn't work yet.
+      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
     assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
 #endif
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -2890,9 +2890,6 @@
 
 
 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
-  bool is_invdyn_bootstrap = (byte_no < 0);
-  if (is_invdyn_bootstrap)  byte_no = -byte_no;
-
   // determine flags
   Bytecodes::Code code = bytecode();
   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
@@ -2907,8 +2904,6 @@
   const Register flags  = rdx;
   assert_different_registers(method, index, recv, flags);
 
-  assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
-
   // save 'interpreter return address'
   __ save_bcp();
 
@@ -2944,9 +2939,7 @@
   // load return address
   {
     address table_addr;
-    if (is_invdyn_bootstrap)
-      table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
-    else if (is_invokeinterface || is_invokedynamic)
+    if (is_invokeinterface || is_invokedynamic)
       table_addr = (address)Interpreter::return_5_addrs_by_index_table();
     else
       table_addr = (address)Interpreter::return_3_addrs_by_index_table();
@@ -3153,54 +3146,10 @@
     __ profile_call(rsi);
   }
 
-  Label handle_unlinked_site;
-  __ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
-  __ testptr(rcx, rcx);
-  __ jcc(Assembler::zero, handle_unlinked_site);
-
+  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx);
   __ prepare_to_jump_from_interpreted();
   __ jump_to_method_handle_entry(rcx, rdx);
-
-  // Initial calls come here...
-  __ bind(handle_unlinked_site);
-  __ pop(rcx);                 // remove return address pushed by prepare_invoke
-
-  // box stacked arguments into an array for the bootstrap method
-  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
-  __ restore_bcp();      // rsi must be correct for call_VM
-  __ call_VM(rax, entry, rax);
-  __ movl(rdi, rax);            // protect bootstrap MH from prepare_invoke
-
-  // recompute return address
-  __ restore_bcp();      // rsi must be correct for prepare_invoke
-  prepare_invoke(rax, rbx, -byte_no);  // smashes rcx, rdx
-  // rax: CallSite object (f1)
-  // rbx: unused (f2)
-  // rdi: bootstrap MH
-  // rdx: flags
-
-  // now load up the arglist, which has been neatly boxed
-  __ get_thread(rcx);
-  __ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
-  __ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
-  __ verify_oop(rdx);
-  // rdx = arglist
-
-  // save SP now, before we add the bootstrap call to the stack
-  // We must preserve a fiction that the original arguments are outgoing,
-  // because the return sequence will reset the stack to this point
-  // and then pop all those arguments.  It seems error-prone to use
-  // a different argument list size just for bootstrapping.
-  __ prepare_to_jump_from_interpreted();
-
-  // Now let's play adapter, pushing the real arguments on the stack.
-  __ pop(rbx);                  // return PC
-  __ push(rdi);                 // boot MH
-  __ push(rax);                 // call site
-  __ push(rdx);                 // arglist
-  __ push(rbx);                 // return PC, again
-  __ mov(rcx, rdi);
-  __ jump_to_method_handle_entry(rcx, rdx);
 }
 
 //----------------------------------------------------------------------------------------------------
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -203,18 +203,15 @@
     __ jcc(Assembler::notEqual, fast_patch);
     __ get_method(scratch);
     // Let breakpoint table handling rewrite to quicker bytecode
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::set_original_bytecode_at),
-               scratch, r13, bc);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
 #ifndef ASSERT
     __ jmpb(patch_done);
+#else
+    __ jmp(patch_done);
+#endif
     __ bind(fast_patch);
   }
-#else
-    __ jmp(patch_done);
-    __ bind(fast_patch);
-  }
+#ifdef ASSERT
   Label okay;
   __ load_unsigned_byte(scratch, at_bcp(0));
   __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
@@ -2054,26 +2051,28 @@
   }
 }
 
-void TemplateTable::resolve_cache_and_index(int byte_no,
-                                            Register Rcache,
-                                            Register index) {
+void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
   assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
+  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
 
   const Register temp = rbx;
   assert_different_registers(Rcache, index, temp);
 
   const int shift_count = (1 + byte_no) * BitsPerByte;
   Label resolved;
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
-  __ movl(temp, Address(Rcache,
-                        index, Address::times_8,
-                        constantPoolCacheOopDesc::base_offset() +
-                        ConstantPoolCacheEntry::indices_offset()));
-  __ shrl(temp, shift_count);
-  // have we resolved this bytecode?
-  __ andl(temp, 0xFF);
-  __ cmpl(temp, (int) bytecode());
-  __ jcc(Assembler::equal, resolved);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  if (is_invokedynamic) {
+    // we are resolved if the f1 field contains a non-null CallSite object
+    __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+    __ jcc(Assembler::notEqual, resolved);
+  } else {
+    __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+    __ shrl(temp, shift_count);
+    // have we resolved this bytecode?
+    __ andl(temp, 0xFF);
+    __ cmpl(temp, (int) bytecode());
+    __ jcc(Assembler::equal, resolved);
+  }
 
   // resolve first time through
   address entry;
@@ -2090,6 +2089,9 @@
   case Bytecodes::_invokeinterface:
     entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
     break;
+  case Bytecodes::_invokedynamic:
+    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
+    break;
   default:
     ShouldNotReachHere();
     break;
@@ -2098,7 +2100,7 @@
   __ call_VM(noreg, entry, temp);
 
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   __ bind(resolved);
 }
 
@@ -2832,15 +2834,14 @@
   ShouldNotReachHere();
 }
 
-void TemplateTable::prepare_invoke(Register method,
-                                   Register index,
-                                   int byte_no,
-                                   Bytecodes::Code code) {
+void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
   // determine flags
+  Bytecodes::Code code = bytecode();
   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
+  const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
-  const bool load_receiver       = code != Bytecodes::_invokestatic;
+  const bool load_receiver      = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
   const bool receiver_null_check = is_invokespecial;
   const bool save_flags = is_invokeinterface || is_invokevirtual;
   // setup registers & access constant pool cache
@@ -2858,9 +2859,13 @@
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
     if (TaggedStackInterpreter) __ shll(recv, 1);  // index*2
-    __ movptr(recv, Address(rsp, recv, Address::times_8,
-                                 -Interpreter::expr_offset_in_bytes(1)));
-    __ verify_oop(recv);
+    Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
+    if (is_invokedynamic) {
+      __ lea(recv, recv_addr);
+    } else {
+      __ movptr(recv, recv_addr);
+      __ verify_oop(recv);
+    }
   }
 
   // do null check if needed
@@ -2878,10 +2883,14 @@
   ConstantPoolCacheEntry::verify_tosBits();
   // load return address
   {
-    ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table());
-    ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table());
-    __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3));
-    __ movptr(flags, Address(rscratch1, flags, Address::times_8));
+    address table_addr;
+    if (is_invokeinterface || is_invokedynamic)
+      table_addr = (address)Interpreter::return_5_addrs_by_index_table();
+    else
+      table_addr = (address)Interpreter::return_3_addrs_by_index_table();
+    ExternalAddress table(table_addr);
+    __ lea(rscratch1, table);
+    __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
   }
 
   // push return address
@@ -2947,7 +2956,7 @@
 
 void TemplateTable::invokevirtual(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rbx, noreg, byte_no, bytecode());
+  prepare_invoke(rbx, noreg, byte_no);
 
   // rbx: index
   // rcx: receiver
@@ -2959,7 +2968,7 @@
 
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rbx, noreg, byte_no, bytecode());
+  prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
   __ profile_call(rax);
@@ -2969,7 +2978,7 @@
 
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rbx, noreg, byte_no, bytecode());
+  prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
   __ profile_call(rax);
@@ -2983,7 +2992,7 @@
 
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rax, rbx, byte_no, bytecode());
+  prepare_invoke(rax, rbx, byte_no);
 
   // rax: Interface
   // rbx: index
@@ -3072,7 +3081,24 @@
     return;
   }
 
-  __ stop("invokedynamic NYI");//6815692//
+  prepare_invoke(rax, rbx, byte_no);
+
+  // rax: CallSite object (f1)
+  // rbx: unused (f2)
+  // rcx: receiver address
+  // rdx: flags (unused)
+
+  if (ProfileInterpreter) {
+    Label L;
+    // %%% should make a type profile for any invokedynamic that takes a ref argument
+    // profile this call
+    __ profile_call(r13);
+  }
+
+  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx);
+  __ prepare_to_jump_from_interpreted();
+  __ jump_to_method_handle_entry(rcx, rdx);
 }
 
 
@@ -3212,17 +3238,19 @@
     __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
     __ store_klass_gap(rax, rcx);  // zero klass gap for compressed oops
     __ store_klass(rax, rsi);      // store klass last
+
+    {
+      SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
+      // Trigger dtrace event for fastpath
+      __ push(atos); // save the return value
+      __ call_VM_leaf(
+           CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
+      __ pop(atos); // restore the return value
+
+    }
     __ jmp(done);
   }
 
-  {
-    SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
-    // Trigger dtrace event for fastpath
-    __ push(atos); // save the return value
-    __ call_VM_leaf(
-         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
-    __ pop(atos); // restore the return value
-  }
 
   // slow case
   __ bind(slow_case);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,8 +22,7 @@
  *
  */
 
-  static void prepare_invoke(Register method, Register index, int byte_no,
-                             Bytecodes::Code code);
+  static void prepare_invoke(Register method, Register index, int byte_no);
   static void invokevirtual_helper(Register index, Register recv,
                                    Register flags);
   static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -255,6 +255,8 @@
   if (!VM_Version::supports_sse2()) {
     vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
   }
+  // in 64 bit the use of SSE2 is the minimum
+  if (UseSSE < 2) UseSSE = 2;
 #endif
 
   // If the OS doesn't support SSE, we can't use this feature even if the HW does
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Wed Mar 24 14:16:57 2010 -0700
@@ -235,6 +235,11 @@
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
+source_hpp %{
+// Must be visible to the DFA in dfa_x86_32.cpp
+extern bool is_operand_hi32_zero(Node* n);
+%}
+
 source %{
 #define   RELOC_IMM32    Assembler::imm_operand
 #define   RELOC_DISP32   Assembler::disp32_operand
@@ -268,22 +273,36 @@
 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 
+// Offset hacking within calls.
+static int pre_call_FPU_size() {
+  if (Compile::current()->in_24_bit_fp_mode())
+    return 6; // fldcw
+  return 0;
+}
+
+static int preserve_SP_size() {
+  return LP64_ONLY(1 +) 2;  // [rex,] op, rm(reg/reg)
+}
+
 // !!!!! Special hack to get all type of calls to specify the byte offset
 //       from the start of the call to the point where the return address
 //       will point.
 int MachCallStaticJavaNode::ret_addr_offset() {
-  return 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);  // 5 bytes from start of call to where return address points
+  int offset = 5 + pre_call_FPU_size();  // 5 bytes from start of call to where return address points
+  if (_method_handle_invoke)
+    offset += preserve_SP_size();
+  return offset;
 }
 
 int MachCallDynamicJavaNode::ret_addr_offset() {
-  return 10 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);  // 10 bytes from start of call to where return address points
+  return 10 + pre_call_FPU_size();  // 10 bytes from start of call to where return address points
 }
 
 static int sizeof_FFree_Float_Stack_All = -1;
 
 int MachCallRuntimeNode::ret_addr_offset() {
   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
-  return sizeof_FFree_Float_Stack_All + 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);
+  return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
 }
 
 // Indicate if the safepoint node needs the polling page as an input.
@@ -299,8 +318,16 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
-  if (Compile::current()->in_24_bit_fp_mode())
-    current_offset += 6;    // skip fldcw in pre_call_FPU, if any
+  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += 1;      // skip call opcode byte
+  return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
+  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += preserve_SP_size();   // skip mov rbp, rsp
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
 }
@@ -308,8 +335,7 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
-  if (Compile::current()->in_24_bit_fp_mode())
-    current_offset += 6;    // skip fldcw in pre_call_FPU, if any
+  current_offset += pre_call_FPU_size();  // skip fldcw, if any
   current_offset += 5;      // skip MOV instruction
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
@@ -1460,6 +1486,25 @@
   return RegMask();
 }
 
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return EBP_REG_mask;
+}
+
+// Returns true if the high 32 bits of the value is known to be zero.
+bool is_operand_hi32_zero(Node* n) {
+  int opc = n->Opcode();
+  if (opc == Op_LoadUI2L) {
+    return true;
+  }
+  if (opc == Op_AndL) {
+    Node* o2 = n->in(2);
+    if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
+      return true;
+    }
+  }
+  return false;
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -1772,10 +1817,13 @@
 
   enc_class pre_call_FPU %{
     // If method sets FPU control word restore it here
+    debug_only(int off0 = cbuf.code_size());
     if( Compile::current()->in_24_bit_fp_mode() ) {
       MacroAssembler masm(&cbuf);
       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
     }
+    debug_only(int off1 = cbuf.code_size());
+    assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
   %}
 
   enc_class post_call_FPU %{
@@ -1786,6 +1834,21 @@
     }
   %}
 
+  enc_class preserve_SP %{
+    debug_only(int off0 = cbuf.code_size());
+    MacroAssembler _masm(&cbuf);
+    // RBP is preserved across all calls, even compiled calls.
+    // Use it to preserve RSP in places where the callee might change the SP.
+    __ movptr(rbp, rsp);
+    debug_only(int off1 = cbuf.code_size());
+    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ movptr(rsp, rbp);
+  %}
+
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
@@ -8556,6 +8619,63 @@
   ins_pipe( pipe_slow );
 %}
 
+// Multiply Register Long where the left operand's high 32 bits are zero
+instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(1)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr, TEMP tmp);
+  ins_cost(2*100+2*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
+  format %{ "MOV    $tmp,$src.hi\n\t"
+            "IMUL   $tmp,EAX\n\t"
+            "MUL    EDX:EAX,$src.lo\n\t"
+            "ADD    EDX,$tmp" %}
+  ins_encode %{
+    __ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
+    __ imull($tmp$$Register, rax);
+    __ mull($src$$Register);
+    __ addl(rdx, $tmp$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Multiply Register Long where the right operand's high 32 bits are zero
+instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(2)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr, TEMP tmp);
+  ins_cost(2*100+2*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
+  format %{ "MOV    $tmp,$src.lo\n\t"
+            "IMUL   $tmp,EDX\n\t"
+            "MUL    EDX:EAX,$src.lo\n\t"
+            "ADD    EDX,$tmp" %}
+  ins_encode %{
+    __ movl($tmp$$Register, $src$$Register);
+    __ imull($tmp$$Register, rdx);
+    __ mull($src$$Register);
+    __ addl(rdx, $tmp$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Multiply Register Long where the left and the right operands' high 32 bits are zero
+instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr);
+  ins_cost(1*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
+  format %{ "MUL    EDX:EAX,$src.lo\n\t" %}
+  ins_encode %{
+    __ mull($src$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 // Multiply Register Long by small constant
 instruct mulL_eReg_con(eADXRegL dst, immL_127 src, eRegI tmp, eFlagsReg cr) %{
   match(Set dst (MulL dst src));
@@ -13406,6 +13526,7 @@
 //       compute_padding() functions will have to be adjusted.
 instruct CallStaticJavaDirect(method meth) %{
   match(CallStaticJava);
+  predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
   effect(USE meth);
 
   ins_cost(300);
@@ -13420,6 +13541,30 @@
   ins_alignment(4);
 %}
 
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+//       compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
+  match(CallStaticJava);
+  predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+  effect(USE meth);
+  // EBP is saved by all callees (for interpreter stack correction).
+  // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+  ins_cost(300);
+  format %{ "CALL,static/MethodHandle " %}
+  opcode(0xE8); /* E8 cd */
+  ins_encode( pre_call_FPU,
+              preserve_SP,
+              Java_Static_Call( meth ),
+              restore_SP,
+              call_epilog,
+              post_call_FPU );
+  ins_pipe( pipe_slow );
+  ins_pc_relative(1);
+  ins_alignment(4);
+%}
+
 // Call Java Dynamic Instruction
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Mar 24 14:16:57 2010 -0700
@@ -551,12 +551,19 @@
 
 #define __ _masm.
 
+static int preserve_SP_size() {
+  return LP64_ONLY(1 +) 2;  // [rex,] op, rm(reg/reg)
+}
+
 // !!!!! Special hack to get all types of calls to specify the byte offset
 //       from the start of the call to the point where the return address
 //       will point.
 int MachCallStaticJavaNode::ret_addr_offset()
 {
-  return 5; // 5 bytes from start of call to where return address points
+  int offset = 5; // 5 bytes from start of call to where return address points
+  if (_method_handle_invoke)
+    offset += preserve_SP_size();
+  return offset;
 }
 
 int MachCallDynamicJavaNode::ret_addr_offset()
@@ -589,6 +596,15 @@
 
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const
+{
+  current_offset += preserve_SP_size();   // skip mov rbp, rsp
+  current_offset += 1; // skip call opcode byte
+  return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 {
   current_offset += 11; // skip movq instruction + call opcode byte
@@ -2113,6 +2129,10 @@
   return LONG_RDX_REG_mask;
 }
 
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return PTR_RBP_REG_mask;
+}
+
 static Address build_address(int b, int i, int s, int d) {
   Register index = as_Register(i);
   Address::ScaleFactor scale = (Address::ScaleFactor)s;
@@ -2608,6 +2628,21 @@
                    RELOC_DISP32);
   %}
 
+  enc_class preserve_SP %{
+    debug_only(int off0 = cbuf.code_size());
+    MacroAssembler _masm(&cbuf);
+    // RBP is preserved across all calls, even compiled calls.
+    // Use it to preserve RSP in places where the callee might change the SP.
+    __ movptr(rbp, rsp);
+    debug_only(int off1 = cbuf.code_size());
+    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ movptr(rsp, rbp);
+  %}
+
   enc_class Java_Static_Call(method meth)
   %{
     // JAVA STATIC CALL
@@ -12526,9 +12561,9 @@
 // Call Java Static Instruction
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaDirect(method meth)
-%{
+instruct CallStaticJavaDirect(method meth) %{
   match(CallStaticJava);
+  predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
   effect(USE meth);
 
   ins_cost(300);
@@ -12540,6 +12575,28 @@
   ins_alignment(4);
 %}
 
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+//       compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
+  match(CallStaticJava);
+  predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
+  effect(USE meth);
+  // RBP is saved by all callees (for interpreter stack correction).
+  // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+  ins_cost(300);
+  format %{ "call,static/MethodHandle " %}
+  opcode(0xE8); /* E8 cd */
+  ins_encode(preserve_SP,
+             Java_Static_Call(meth),
+             restore_SP,
+             call_epilog);
+  ins_pipe(pipe_slow);
+  ins_pc_relative(1);
+  ins_alignment(4);
+%}
+
 // Call Java Dynamic Instruction
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@
     }
     else if (istate->msg() == BytecodeInterpreter::return_from_method) {
       // Copy the result into the caller's frame
-      result_slots = type2size[method->result_type()];
+      result_slots = type2size[result_type_of(method)];
       assert(result_slots >= 0 && result_slots <= 2, "what?");
       result = istate->stack() + result_slots;
       break;
@@ -204,6 +204,20 @@
     goto unwind_and_return;
   }
 
+  // Update the invocation counter
+  if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
+    thread->set_do_not_unlock();
+    InvocationCounter *counter = method->invocation_counter();
+    counter->increment();
+    if (counter->reached_InvocationLimit()) {
+      CALL_VM_NOCHECK(
+        InterpreterRuntime::frequency_counter_overflow(thread, NULL));
+      if (HAS_PENDING_EXCEPTION)
+        goto unwind_and_return;
+    }
+    thread->clr_do_not_unlock();
+  }
+
   // Lock if necessary
   BasicObjectLock *monitor;
   monitor = NULL;
@@ -231,7 +245,7 @@
     if (handlerAddr == NULL) {
       CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
       if (HAS_PENDING_EXCEPTION)
-        goto unwind_and_return;
+        goto unlock_unwind_and_return;
 
       handlerAddr = method->signature_handler();
       assert(handlerAddr != NULL, "eh?");
@@ -240,7 +254,7 @@
       CALL_VM_NOCHECK(handlerAddr =
         InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
       if (HAS_PENDING_EXCEPTION)
-        goto unwind_and_return;
+        goto unlock_unwind_and_return;
     }
     handler = \
       InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
@@ -351,10 +365,10 @@
   // Reset handle block
   thread->active_handles()->clear();
 
-  // Unlock if necessary.  It seems totally wrong that this
-  // is skipped in the event of an exception but apparently
-  // the template interpreter does this so we do too.
-  if (monitor && !HAS_PENDING_EXCEPTION) {
+ unlock_unwind_and_return:
+
+  // Unlock if necessary
+  if (monitor) {
     BasicLock *lock = monitor->lock();
     markOop header = lock->displaced_header();
     oop rcvr = monitor->obj();
@@ -380,9 +394,10 @@
 
   // Push our result
   if (!HAS_PENDING_EXCEPTION) {
-    stack->set_sp(stack->sp() - type2size[method->result_type()]);
+    BasicType type = result_type_of(method);
+    stack->set_sp(stack->sp() - type2size[type]);
 
-    switch (method->result_type()) {
+    switch (type) {
     case T_VOID:
       break;
 
@@ -693,6 +708,26 @@
   return i;
 }
 
+BasicType CppInterpreter::result_type_of(methodOop method) {
+  BasicType t;
+  switch (method->result_index()) {
+    case 0 : t = T_BOOLEAN; break;
+    case 1 : t = T_CHAR;    break;
+    case 2 : t = T_BYTE;    break;
+    case 3 : t = T_SHORT;   break;
+    case 4 : t = T_INT;     break;
+    case 5 : t = T_LONG;    break;
+    case 6 : t = T_VOID;    break;
+    case 7 : t = T_FLOAT;   break;
+    case 8 : t = T_DOUBLE;  break;
+    case 9 : t = T_OBJECT;  break;
+    default: ShouldNotReachHere();
+  }
+  assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
+         "out of step with AbstractInterpreter::BasicType_as_index");
+  return t;
+}
+
 address InterpreterGenerator::generate_empty_entry() {
   if (!UseFastEmptyMethods)
     return NULL;
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,3 +41,7 @@
  private:
   // Stack overflow checks
   static bool stack_overflow_imminent(JavaThread *thread);
+
+ private:
+  // Fast result type determination
+  static BasicType result_type_of(methodOop method);
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -36,11 +36,8 @@
   return zeroframe()->is_interpreter_frame();
 }
 
-bool frame::is_fake_stub_frame() const {
-  return zeroframe()->is_fake_stub_frame();
-}
-
 frame frame::sender_for_entry_frame(RegisterMap *map) const {
+  assert(zeroframe()->is_entry_frame(), "wrong type of frame");
   assert(map != NULL, "map must be set");
   assert(!entry_frame_is_first(), "next Java fp must be non zero");
   assert(entry_frame_call_wrapper()->anchor()->last_Java_sp() == sender_sp(),
@@ -50,15 +47,10 @@
   return frame(sender_sp(), sp() + 1);
 }
 
-frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
-  return frame(sender_sp(), sp() + 1);
-}
-
-frame frame::sender_for_compiled_frame(RegisterMap *map) const {
-  return frame(sender_sp(), sp() + 1);
-}
-
-frame frame::sender_for_fake_stub_frame(RegisterMap *map) const {
+frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
+  assert(zeroframe()->is_interpreter_frame() ||
+         zeroframe()->is_shark_frame() ||
+         zeroframe()->is_fake_stub_frame(), "wrong type of frame");
   return frame(sender_sp(), sp() + 1);
 }
 
@@ -69,17 +61,8 @@
 
   if (is_entry_frame())
     return sender_for_entry_frame(map);
-
-  if (is_interpreted_frame())
-    return sender_for_interpreter_frame(map);
-
-  if (is_compiled_frame())
-    return sender_for_compiled_frame(map);
-
-  if (is_fake_stub_frame())
-    return sender_for_fake_stub_frame(map);
-
-  ShouldNotReachHere();
+  else
+    return sender_for_nonentry_frame(map);
 }
 
 #ifdef CC_INTERP
--- a/hotspot/src/cpu/zero/vm/frame_zero.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/frame_zero.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -65,10 +65,7 @@
   }
 
  public:
-  bool is_fake_stub_frame() const;
-
- public:
-  frame sender_for_fake_stub_frame(RegisterMap* map) const;
+  frame sender_for_nonentry_frame(RegisterMap* map) const;
 
  public:
   void zero_print_on_error(int           index,
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,8 @@
  *
  */
 
-//
 // Set the default values for platform dependent flags used by the
 // runtime system.  See globals.hpp for details of what they do.
-//
 
 define_pd_global(bool,  ConvertSleepToYield,  true);
 define_pd_global(bool,  ShareVtableStubs,     true);
@@ -37,19 +35,12 @@
 define_pd_global(bool,  UncommonNullCast,     true);
 
 define_pd_global(intx,  CodeEntryAlignment,   32);
-define_pd_global(uintx, TLABSize,             0);
-#ifdef _LP64
-define_pd_global(uintx, NewSize,              ScaleForWordSize(2048 * K));
-#else
-define_pd_global(uintx, NewSize,              ScaleForWordSize(1024 * K));
-#endif // _LP64
 define_pd_global(intx,  InlineFrequencyCount, 100);
-define_pd_global(intx,  InlineSmallCode,      1000);
 define_pd_global(intx,  PreInflateSpin,       10);
 
 define_pd_global(intx,  StackYellowPages,     2);
 define_pd_global(intx,  StackRedPages,        1);
-define_pd_global(intx,  StackShadowPages,     3 LP64_ONLY(+3) DEBUG_ONLY(+3));
+define_pd_global(intx,  StackShadowPages,     5 LP64_ONLY(+1) DEBUG_ONLY(+3));
 
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,10 @@
   return ShouldNotCallThisEntry();
 }
 
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  return true;
+}
+
 int AbstractInterpreter::size_activation(methodOop method,
                                          int tempcount,
                                          int popframe_extra_args,
--- a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,8 +47,10 @@
                         int total_args_passed,
                         int comp_args_on_stack,
                         const BasicType *sig_bt,
-                        const VMRegPair *regs) {
-  return new AdapterHandlerEntry(
+                        const VMRegPair *regs,
+                        AdapterFingerPrint *fingerprint) {
+  return AdapterHandlerLibrary::new_entry(
+    fingerprint,
     ShouldNotCallThisStub(),
     ShouldNotCallThisStub(),
     ShouldNotCallThisStub());
@@ -61,7 +63,14 @@
                                                 BasicType *in_sig_bt,
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type) {
+#ifdef SHARK
+  return SharkCompiler::compiler()->generate_native_wrapper(masm,
+                                                            method,
+                                                            in_sig_bt,
+                                                            ret_type);
+#else
   ShouldNotCallThis();
+#endif // SHARK
 }
 
 int Deoptimization::last_frame_adjust(int callee_parameters,
--- a/hotspot/src/cpu/zero/vm/sharkFrame_zero.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/cpu/zero/vm/sharkFrame_zero.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
 // |  ...               |
 
 class SharkFrame : public ZeroFrame {
-  friend class SharkFunction;
+  friend class SharkStack;
 
  private:
   SharkFrame() : ZeroFrame() {
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -223,8 +223,8 @@
                      "environment on Linux when /proc filesystem is not mounted.";
 
 void os::Linux::initialize_system_info() {
-  _processor_count = sysconf(_SC_NPROCESSORS_CONF);
-  if (_processor_count == 1) {
+  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
+  if (processor_count() == 1) {
     pid_t pid = os::Linux::gettid();
     char fname[32];
     jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
@@ -236,7 +236,7 @@
     }
   }
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
-  assert(_processor_count > 0, "linux error");
+  assert(processor_count() > 0, "linux error");
 }
 
 void os::init_system_properties_values() {
@@ -4683,6 +4683,7 @@
   // Return immediately if a permit is available.
   if (_counter > 0) {
       _counter = 0 ;
+      OrderAccess::fence();
       return ;
   }
 
@@ -4725,6 +4726,7 @@
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    OrderAccess::fence();
     return;
   }
 
@@ -4765,6 +4767,7 @@
     jt->java_suspend_self();
   }
 
+  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Mar 24 14:16:57 2010 -0700
@@ -937,54 +937,56 @@
   return err;
 }
 
-static int
-scopeDesc_chain(Nmethod_t *N)
-{
+static int scopeDesc_chain(Nmethod_t *N) {
   int32_t decode_offset = 0;
   int32_t err;
 
-  if (debug > 2)
-      fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+  if (debug > 2) {
+    fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+  }
 
   err = ps_pread(N->J->P, N->pc_desc + OFFSET_PcDesc_scope_decode_offset,
                  &decode_offset, SZ32);
   CHECK_FAIL(err);
 
   while (decode_offset > 0) {
-      if (debug > 2)
-          fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+    Vframe_t *vf = &N->vframes[N->vf_cnt];
 
-      Vframe_t *vf = &N->vframes[N->vf_cnt];
+    if (debug > 2) {
+      fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+    }
 
-      err = scope_desc_at(N, decode_offset, vf);
+    err = scope_desc_at(N, decode_offset, vf);
+    CHECK_FAIL(err);
+
+    if (vf->methodIdx > N->oops_len) {
+      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
+      return -1;
+    }
+    err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
+                       &vf->methodOop);
+    CHECK_FAIL(err);
+
+    if (vf->methodOop) {
+      N->vf_cnt++;
+      err = line_number_from_bci(N->J, vf);
       CHECK_FAIL(err);
-
-      if (vf->methodIdx > N->oops_len) {
-          fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
-          return -1;
+      if (debug > 2) {
+        fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
+                vf->methodOop, vf->line);
       }
-      err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
-                               &vf->methodOop);
-      CHECK_FAIL(err);
-
-      if (vf->methodOop) {
-          N->vf_cnt++;
-          err = line_number_from_bci(N->J, vf);
-          CHECK_FAIL(err);
-          if (debug > 2) {
-              fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
-                              vf->methodOop, vf->line);
-          }
-      }
-      decode_offset = vf->sender_decode_offset;
+    }
+    decode_offset = vf->sender_decode_offset;
   }
-  if (debug > 2)
-      fprintf(stderr, "\t scopeDesc_chain: END \n\n");
+  if (debug > 2) {
+    fprintf(stderr, "\t scopeDesc_chain: END \n\n");
+  }
   return PS_OK;
 
  fail:
-  if (debug)
-      fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+  if (debug) {
+    fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+  }
   return err;
 }
 
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -457,7 +457,7 @@
 
 
 void os::Solaris::initialize_system_info() {
-  _processor_count = sysconf(_SC_NPROCESSORS_CONF);
+  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 }
@@ -5803,6 +5803,7 @@
   // Return immediately if a permit is available.
   if (_counter > 0) {
       _counter = 0 ;
+      OrderAccess::fence();
       return ;
   }
 
@@ -5846,6 +5847,7 @@
     _counter = 0;
     status = os::Solaris::mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    OrderAccess::fence();
     return;
   }
 
@@ -5892,6 +5894,7 @@
     jt->java_suspend_self();
   }
 
+  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -142,6 +142,9 @@
 }
 
 #ifndef _WIN64
+// previous UnhandledExceptionFilter, if there is one
+static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
+
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 #endif
 void os::init_system_properties_values() {
@@ -260,7 +263,8 @@
   }
 
 #ifndef _WIN64
-  SetUnhandledExceptionFilter(Handle_FLT_Exception);
+  // set our UnhandledExceptionFilter and save any previous one
+  prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 #endif
 
   // Done
@@ -1969,7 +1973,7 @@
 #ifndef  _WIN64
 //-----------------------------------------------------------------------------
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
-  // handle exception caused by native mothod modifying control word
+  // handle exception caused by native method modifying control word
   PCONTEXT ctx = exceptionInfo->ContextRecord;
   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
 
@@ -1990,6 +1994,13 @@
         return EXCEPTION_CONTINUE_EXECUTION;
       }
   }
+
+  if (prev_uef_handler != NULL) {
+    // We didn't handle this exception so pass it to the previous
+    // UnhandledExceptionFilter.
+    return (prev_uef_handler)(exceptionInfo);
+  }
+
   return EXCEPTION_CONTINUE_SEARCH;
 }
 #else //_WIN64
@@ -3150,7 +3161,7 @@
   _vm_allocation_granularity = si.dwAllocationGranularity;
   _processor_type  = si.dwProcessorType;
   _processor_level = si.wProcessorLevel;
-  _processor_count = si.dwNumberOfProcessors;
+  set_processor_count(si.dwNumberOfProcessors);
 
   MEMORYSTATUSEX ms;
   ms.dwLength = sizeof(ms);
--- a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed Mar 24 14:16:57 2010 -0700
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            false);
 #ifdef AMD64
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
@@ -39,11 +38,10 @@
 #endif // AMD64
 
 define_pd_global(intx, CompilerThreadStackSize,  0);
-define_pd_global(intx, SurvivorRatio,            8);
 
-define_pd_global(uintx, JVMInvokeMethodSlack,    8192);
+define_pd_global(uintx,JVMInvokeMethodSlack,     8192);
 
 // Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress,      2*G);
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Sat Mar 06 03:37:53 2010 +0300
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed Mar 24 14:16:57 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -239,7 +239,21 @@
 }
 
 bool os::is_allocatable(size_t bytes) {
-  ShouldNotCallThis();
+#ifdef _LP64
+  return true;
+#else
+  if (bytes < 2 * G) {
+    return true;
+  }
+
+  char* addr = reserve_memory(bytes, NULL);
+
+  if (addr != NULL) {
+    release_memory(addr, bytes);
+  }
+
+