changeset 5878:257524d78651

Merge
author lana
date Tue, 29 Jun 2010 10:48:02 -0700
parents e14fbdea5504 72382b63f2eb
children b429d27185e0
files
diffstat 222 files changed, 5880 insertions(+), 3859 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Jun 21 11:00:15 2010 -0700
+++ b/.hgtags	Tue Jun 29 10:48:02 2010 -0700
@@ -71,3 +71,5 @@
 8bb281f0f91582104d65d032be22522bfd2d8110 jdk7-b94
 654298d26561b76dfe3cfcffbbd7078080837300 jdk7-b95
 d260f892491e040ae385a8e6df59557a7d721abf jdk7-b96
+7e406ebed9a5968b584f3c3e6b60893b5d6d9741 jdk7-b97
+db6e660120446c407e2d908d52ec046592b21726 jdk7-b98
--- a/.hgtags-top-repo	Mon Jun 21 11:00:15 2010 -0700
+++ b/.hgtags-top-repo	Tue Jun 29 10:48:02 2010 -0700
@@ -72,3 +72,5 @@
 fd3663286e77b9f13c39eee124db2beb079b3ca6 jdk7-b95
 cf71cb5151166f35433afebaf67dbf34a704a170 jdk7-b96
 5e197c942c6ebd8b92f324a31049c5f1d26d40ef jdk7-b97
+6cea9984d73d74de0cd01f30d07ac0a1ed196117 jdk7-b98
+e7f18db469a3e947b7096bfd12e87380e5a042cd jdk7-b99
--- a/README-builds.html	Mon Jun 21 11:00:15 2010 -0700
+++ b/README-builds.html	Tue Jun 29 10:48:02 2010 -0700
@@ -65,8 +65,9 @@
                             <li><a href="#cacerts">Certificate Authority File (cacert)</a> </li>
                             <li><a href="#compilers">Compilers</a> 
                                 <ul>
-                                    <li><a href="#msvc">Microsoft Visual Studio</a> </li>
-                                    <li><a href="#mssdk">Microsoft Platform SDK</a> </li>
+                                    <li><a href="#msvc32">Microsoft Visual Studio Professional/Express for 32 bit</a> </li>
+                                    <li><a href="#msvc64">Microsoft Visual Studio Professional for 64 bit</a> </li>
+                                    <li><a href="#mssdk64">Microsoft Windows SDK for 64 bit</a> </li>
                                     <li><a href="#gcc">Linux gcc/binutils</a> </li>
                                     <li><a href="#studio">Sun Studio</a> </li>
                                 </ul>
@@ -789,11 +790,11 @@
                 </li>
                 <li>
                     Install the
-                    <a href="#msvc">Microsoft Visual Studio Compilers</a>).
+                    <a href="#msvc32">Microsoft Visual Studio Compilers</a>).
                 </li>
                 <li>
                     Setup all environment variables for compilers 
-                    (see <a href="#msvc">compilers</a>).
+                    (see <a href="#msvc32">compilers</a>).
                 </li>
                 <li>
                     Install 
@@ -958,7 +959,7 @@
                     are also an option, although these compilers have not
                     been extensively used yet.
                 </blockquote>
-                <strong><a name="msvc">Windows i586: Microsoft Visual Studio Compilers</a></strong>
+                <strong><a name="msvc32">Windows i586: Microsoft Visual Studio 2010 Compilers</a></strong>
                 <blockquote>
 <p>
 <b>BEGIN WARNING</b>: At this time (Spring/Summer 2010) JDK 7 is starting a transition to
@@ -971,14 +972,13 @@
 We do not guarantee that VS2008 will work, although there is sufficient
 makefile support to make at least basic JDK builds plausible.
 Visual Studio 2010 Express compilers are now able to build all the
-open source repositories, but this is 32 bit only, since
-we have not yet seen the 7.1 Windows SDK with the 64 bit
-compilers. <b>END WARNING.</b>
+open source repositories, but this is 32 bit only. To build 64 bit
+Windows binaries use the the 7.1 Windows SDK.<b>END WARNING.</b>
 <p>
                     The 32-bit OpenJDK Windows build
                     requires 
                     Microsoft Visual Studio C++ 2010 (VS2010) Professional
-                    Edition compiler. 
+                    Edition or Express compiler.
                     The compiler and other tools are expected to reside
                     in the location defined by the variable 
                     <tt>VS100COMNTOOLS</tt> which
@@ -1001,14 +1001,33 @@
                     The path <tt>/usr/bin</tt> must be after the path to the
                     Visual Studio product.
                 </blockquote>
-                <strong><a name="mssdk">Windows x64: Microsoft Visual Studio Compilers</a></strong>
+                <strong><a name="msvc64">Windows x64: Microsoft Visual Studio 2010 Professional Compiler</a></strong>
                 <blockquote>
-                    On <b>X64</b>, the set up is much the same in VS2010
+                    For <b>X64</b>, builds, when using the VS2010 Professional
+                    compiler, the 64 bit build set up is much the same as 32 bit
                     except that you run <tt>amd64\VCVARS64.BAT</tt>
                     to set the compiler environment variables.
-                    Previously 64 builds had used the 64 bit compiler in
-                    an unbundled Windows SDK but this is no longer necessary.
+                    Previously 64 bit builds had used the 64 bit compiler in
+                    an unbundled Windows SDK but this is no longer necessary if
+                    you have VS2010 Professional.
                 </blockquote>
+                <strong><a name="mssdk64">Windows x64: Microsoft Windows 7.1 SDK 64 bit compilers.</a></strong>
+                   For a free alternative for 64 bit builds, use the 7.1 SDK.
+                   Microsoft say that to set up your paths for this run
+<pre>
+    c:\Program Files\Microsoft SDKs\Windows\v7.1\bin\setenv.cmd /x64.
+</pre>
+                   What was tested is just directly setting up LIB, INCLUDE,
+                   PATH and based on the installation directories using the
+                   DOS short name appropriate for the system, (you will
+                   need to set them for yours, not just blindly copy this) eg :
+<pre>
+    set VSINSTALLDIR=c:\PROGRA~2\MICROS~1.0
+    set WindowsSdkDir=c:\PROGRA~1\MICROS~1\Windows\v7.1
+    set PATH=%VSINSTALLDIR%\vc\bin\amd64;%VSINSTALLDIR%\Common7\IDE;%WindowsSdkDir%\bin;%PATH%
+    set INCLUDE=%VSINSTALLDIR%\vc\include;%WindowsSdkDir%\include
+    set LIB=%VSINSTALLDIR%\vc\lib\amd64;%WindowsSdkDir%\lib\x64
+</pre>
             </blockquote>
             <!-- ------------------------------------------------------ --> 
             <h4><a name="zip">Zip and Unzip</a></h4>
--- a/corba/.hgtags	Mon Jun 21 11:00:15 2010 -0700
+++ b/corba/.hgtags	Tue Jun 29 10:48:02 2010 -0700
@@ -71,3 +71,5 @@
 533c11186b44e3a02d6c5fe69a73260505fcfe5e jdk7-b94
 06dbf406818c789bb586c1de4c002024cd26ecd2 jdk7-b95
 edc2a2659c77dabc55cb55bb617bad89e3a05bb3 jdk7-b96
+4ec9d59374caa1e5d72fa802291b4d66955a4936 jdk7-b97
+3b99409057e4c255da946f9f540d051a5ef4ab23 jdk7-b98
--- a/hotspot/.hgtags	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/.hgtags	Tue Jun 29 10:48:02 2010 -0700
@@ -100,3 +100,5 @@
 91d861ba858daca645993a1ab6ba2fa06a8f4a5b jdk7-b95
 573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 jdk7-b96
 573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 hs19-b02
+5f42499e57adc16380780f40541e1a66cd601891 jdk7-b97
+8a045b3f5c13eaad92ff4baf15ca671845fcad1a jdk7-b98
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Tue Jun 29 10:48:02 2010 -0700
@@ -42,8 +42,6 @@
   private static CIntegerField instructionsOffsetField;
   private static CIntegerField frameCompleteOffsetField;
   private static CIntegerField dataOffsetField;
-  private static CIntegerField oopsOffsetField;
-  private static CIntegerField oopsLengthField;
   private static CIntegerField frameSizeField;
   private static AddressField  oopMapsField;
 
@@ -72,8 +70,6 @@
     frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
     instructionsOffsetField  = type.getCIntegerField("_instructions_offset");
     dataOffsetField          = type.getCIntegerField("_data_offset");
-    oopsOffsetField          = type.getCIntegerField("_oops_offset");
-    oopsLengthField          = type.getCIntegerField("_oops_length");
     frameSizeField           = type.getCIntegerField("_frame_size");
     oopMapsField             = type.getAddressField("_oop_maps");
 
@@ -131,19 +127,10 @@
     return headerBegin().addOffsetTo(sizeField.getValue(addr));
   }
 
-  public Address oopsBegin() {
-    return headerBegin().addOffsetTo(oopsOffsetField.getValue(addr));
-  }
-
-  public Address oopsEnd() {
-    return oopsBegin().addOffsetTo(getOopsLength());
-  }
-
   // Offsets
   public int getRelocationOffset()   { return (int) headerSizeField.getValue(addr);         }
   public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); }
   public int getDataOffset()         { return (int) dataOffsetField.getValue(addr);         }
-  public int getOopsOffset()         { return (int) oopsOffsetField.getValue(addr);         }
 
   // Sizes
   public int getSize()             { return (int) sizeField.getValue(addr);                     }
@@ -157,19 +144,9 @@
   // FIXME: add relocationContains
   public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); }
   public boolean dataContains(Address addr)         { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr);                 }
-  public boolean oopsContains(Address addr)         { return oopsBegin().lessThanOrEqual(addr) && oopsEnd().greaterThan(addr);                 }
   public boolean contains(Address addr)             { return instructionsContains(addr);                                                       }
   public boolean isFrameCompleteAt(Address a)       { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); }
 
-  /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
-  public OopHandle getOopAt(int index) {
-    if (index == 0) return null;
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(index > 0 && index <= getOopsLength(), "must be a valid non-zero index");
-    }
-    return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
-  }
-
   // Reclamation support (really only used by the nmethods, but in order to get asserts to work
   // in the CodeCache they are defined virtual here)
   public boolean isZombie()             { return false; }
@@ -223,18 +200,8 @@
   }
 
   protected void printComponentsOn(PrintStream tty) {
-    // FIXME: add relocation information
     tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
                 " data: [" + dataBegin() + ", " + dataEnd() + "), " +
-                " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
                 " frame size: " + getFrameSize());
   }
-
-  //--------------------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  private int getOopsLength() {
-    return (int) oopsLengthField.getValue(addr);
-  }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
   private static CIntegerField deoptOffsetField;
   private static CIntegerField origPCOffsetField;
   private static CIntegerField stubOffsetField;
+  private static CIntegerField oopsOffsetField;
   private static CIntegerField scopesDataOffsetField;
   private static CIntegerField scopesPCsOffsetField;
   private static CIntegerField dependenciesOffsetField;
@@ -98,6 +99,7 @@
     deoptOffsetField            = type.getCIntegerField("_deoptimize_offset");
     origPCOffsetField           = type.getCIntegerField("_orig_pc_offset");
     stubOffsetField             = type.getCIntegerField("_stub_offset");
+    oopsOffsetField             = type.getCIntegerField("_oops_offset");
     scopesDataOffsetField       = type.getCIntegerField("_scopes_data_offset");
     scopesPCsOffsetField        = type.getCIntegerField("_scopes_pcs_offset");
     dependenciesOffsetField     = type.getCIntegerField("_dependencies_offset");
@@ -141,7 +143,9 @@
   public Address exceptionBegin()       { return headerBegin().addOffsetTo(getExceptionOffset());    }
   public Address deoptBegin()           { return headerBegin().addOffsetTo(getDeoptOffset());        }
   public Address stubBegin()            { return headerBegin().addOffsetTo(getStubOffset());         }
-  public Address stubEnd()              { return headerBegin().addOffsetTo(getScopesDataOffset());   }
+  public Address stubEnd()              { return headerBegin().addOffsetTo(getOopsOffset());         }
+  public Address oopsBegin()            { return headerBegin().addOffsetTo(getOopsOffset());         }
+  public Address oopsEnd()              { return headerBegin().addOffsetTo(getScopesDataOffset());   }
   public Address scopesDataBegin()      { return headerBegin().addOffsetTo(getScopesDataOffset());   }
   public Address scopesDataEnd()        { return headerBegin().addOffsetTo(getScopesPCsOffset());    }
   public Address scopesPCsBegin()       { return headerBegin().addOffsetTo(getScopesPCsOffset());    }
@@ -156,6 +160,7 @@
   public int constantsSize()            { return (int) constantsEnd()   .minus(constantsBegin());    }
   public int codeSize()                 { return (int) codeEnd()        .minus(codeBegin());         }
   public int stubSize()                 { return (int) stubEnd()        .minus(stubBegin());         }
+  public int oopsSize()                 { return (int) oopsEnd()        .minus(oopsBegin());         }
   public int scopesDataSize()           { return (int) scopesDataEnd()  .minus(scopesDataBegin());   }
   public int scopesPCsSize()            { return (int) scopesPCsEnd()   .minus(scopesPCsBegin());    }
   public int dependenciesSize()         { return (int) dependenciesEnd().minus(dependenciesBegin()); }
@@ -178,6 +183,7 @@
   public boolean constantsContains   (Address addr) { return constantsBegin()   .lessThanOrEqual(addr) && constantsEnd()   .greaterThan(addr); }
   public boolean codeContains        (Address addr) { return codeBegin()        .lessThanOrEqual(addr) && codeEnd()        .greaterThan(addr); }
   public boolean stubContains        (Address addr) { return stubBegin()        .lessThanOrEqual(addr) && stubEnd()        .greaterThan(addr); }
+  public boolean oopsContains        (Address addr) { return oopsBegin()        .lessThanOrEqual(addr) && oopsEnd()        .greaterThan(addr); }
   public boolean scopesDataContains  (Address addr) { return scopesDataBegin()  .lessThanOrEqual(addr) && scopesDataEnd()  .greaterThan(addr); }
   public boolean scopesPCsContains   (Address addr) { return scopesPCsBegin()   .lessThanOrEqual(addr) && scopesPCsEnd()   .greaterThan(addr); }
   public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); }
@@ -187,6 +193,15 @@
   public Address getEntryPoint()         { return entryPointField.getValue(addr);         }
   public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); }
 
+  /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
+  public OopHandle getOopAt(int index) {
+    if (index == 0) return null;
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(index > 0 && index <= oopsSize(), "must be a valid non-zero index");
+    }
+    return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
+  }
+
   // FIXME: add interpreter_entry_point()
   // FIXME: add lazy_interpreter_entry_point() for C2
 
@@ -338,6 +353,14 @@
     printOn(System.out);
   }
 
+  protected void printComponentsOn(PrintStream tty) {
+    // FIXME: add relocation information
+    tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
+                " data: [" + dataBegin() + ", " + dataEnd() + "), " +
+                " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
+                " frame size: " + getFrameSize());
+  }
+
   public String toString() {
     Method method = getMethod();
     return "NMethod for " +
@@ -367,6 +390,7 @@
   private int getExceptionOffset()    { return (int) exceptionOffsetField   .getValue(addr); }
   private int getDeoptOffset()        { return (int) deoptOffsetField       .getValue(addr); }
   private int getStubOffset()         { return (int) stubOffsetField        .getValue(addr); }
+  private int getOopsOffset()         { return (int) oopsOffsetField        .getValue(addr); }
   private int getScopesDataOffset()   { return (int) scopesDataOffsetField  .getValue(addr); }
   private int getScopesPCsOffset()    { return (int) scopesPCsOffsetField   .getValue(addr); }
   private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Tue Jun 29 10:48:02 2010 -0700
@@ -73,18 +73,11 @@
 
    public CompactibleFreeListSpace(Address addr) {
       super(addr);
-      if ( VM.getVM().isLP64() ) {
-         heapWordSize = 8;
-         IndexSetStart = 1;
-         IndexSetStride = 1;
-      }
-      else {
-         heapWordSize = 4;
-         IndexSetStart = 2;
-         IndexSetStride = 2;
-      }
-
-      IndexSetSize = 257;
+      VM vm = VM.getVM();
+      heapWordSize   = vm.getHeapWordSize();
+      IndexSetStart  = vm.getMinObjAlignmentInBytes() / heapWordSize;
+      IndexSetStride = IndexSetStart;
+      IndexSetSize   = 257;
    }
 
    // Accessing block offset table
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java	Tue Jun 29 10:48:02 2010 -0700
@@ -128,7 +128,7 @@
 
   // Align the object size.
   public static long alignObjectSize(long size) {
-    return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment());
+    return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
   }
 
   // All vm's align longs, so pad out certain offsets.
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Tue Jun 29 10:48:02 2010 -0700
@@ -93,6 +93,7 @@
   /** alignment constants */
   private boolean      isLP64;
   private int          bytesPerLong;
+  private int          objectAlignmentInBytes;
   private int          minObjAlignmentInBytes;
   private int          logMinObjAlignmentInBytes;
   private int          heapWordSize;
@@ -313,9 +314,6 @@
       isLP64 = debugger.getMachineDescription().isLP64();
     }
     bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
-    minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
-    // minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
-    logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
     heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
     oopSize  = db.lookupIntConstant("oopSize").intValue();
 
@@ -323,6 +321,15 @@
     uintxType = db.lookupType("uintx");
     boolType = (CIntegerType) db.lookupType("bool");
 
+    minObjAlignmentInBytes = getObjectAlignmentInBytes();
+    if (minObjAlignmentInBytes == 8) {
+      logMinObjAlignmentInBytes = 3;
+    } else if (minObjAlignmentInBytes == 16) {
+      logMinObjAlignmentInBytes = 4;
+    } else {
+      throw new RuntimeException("Object alignment " + minObjAlignmentInBytes + " not yet supported");
+    }
+
     if (isCompressedOopsEnabled()) {
       // Size info for oops within java objects is fixed
       heapOopSize = (int)getIntSize();
@@ -492,10 +499,6 @@
   }
 
   /** Get minimum object alignment in bytes. */
-  public int getMinObjAlignment() {
-    return minObjAlignmentInBytes;
-  }
-
   public int getMinObjAlignmentInBytes() {
     return minObjAlignmentInBytes;
   }
@@ -754,6 +757,14 @@
     return compressedOopsEnabled.booleanValue();
   }
 
+  public int getObjectAlignmentInBytes() {
+    if (objectAlignmentInBytes == 0) {
+        Flag flag = getCommandLineFlag("ObjectAlignmentInBytes");
+        objectAlignmentInBytes = (flag == null) ? 8 : (int)flag.getIntx();
+    }
+    return objectAlignmentInBytes;
+  }
+
   // returns null, if not available.
   public Flag[] getCommandLineFlags() {
     if (commandLineFlags == null) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,7 +98,12 @@
         }
         loc.inBlobInstructions = loc.blob.instructionsContains(a);
         loc.inBlobData         = loc.blob.dataContains(a);
-        loc.inBlobOops         = loc.blob.oopsContains(a);
+
+        if (loc.blob.isNMethod()) {
+            NMethod nm = (NMethod) loc.blob;
+            loc.inBlobOops = nm.oopsContains(a);
+        }
+
         loc.inBlobUnknownLocation = (!(loc.inBlobInstructions ||
                                        loc.inBlobData ||
                                        loc.inBlobOops));
--- a/hotspot/make/hotspot_version	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/make/hotspot_version	Tue Jun 29 10:48:02 2010 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=19
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=02
+HS_BUILD_NUMBER=03
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/hotspot/make/solaris/makefiles/defs.make	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/make/solaris/makefiles/defs.make	Tue Jun 29 10:48:02 2010 -0700
@@ -80,12 +80,10 @@
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so 
-  ifeq ($(ARCH),sparc)
-    EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
-    EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
-  endif
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
 endif
 
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
--- a/hotspot/make/windows/makefiles/defs.make	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/make/windows/makefiles/defs.make	Tue Jun 29 10:48:02 2010 -0700
@@ -69,8 +69,20 @@
   MAKE_ARGS += Platform_arch_model=x86_64
 endif
 
+ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) EM64T),)
+  ARCH_DATA_MODEL=64
+  PLATFORM=windows-amd64
+  VM_PLATFORM=windows_amd64
+  HS_ARCH=x86
+  MAKE_ARGS += LP64=1
+  MAKE_ARGS += ARCH=x86
+  MAKE_ARGS += BUILDARCH=amd64
+  MAKE_ARGS += Platform_arch=x86
+  MAKE_ARGS += Platform_arch_model=x86_64
+endif
+
 # NB later OS versions than 2003 may report "Intel64"
-ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) "EM64T\|Intel64"),)
+ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),)
   ARCH_DATA_MODEL=64
   PLATFORM=windows-amd64
   VM_PLATFORM=windows_amd64
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -87,6 +87,7 @@
 // JSR 292 fixed register usages:
 REGISTER_DECLARATION(Register, G5_method_type        , G5);
 REGISTER_DECLARATION(Register, G3_method_handle      , G3);
+REGISTER_DECLARATION(Register, L7_mh_SP_save         , L7);
 
 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
 // because a single patchable "set" instruction (NativeMovConstReg,
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -181,8 +181,8 @@
 }
 
 
-void FrameMap::init () {
-  if (_init_done) return;
+void FrameMap::initialize() {
+  assert(!_init_done, "once");
 
   int i=0;
   // Register usage:
@@ -345,6 +345,13 @@
 }
 
 
+// JSR 292
+LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
+  assert(L7 == L7_mh_SP_save, "must be same register");
+  return L7_opr;
+}
+
+
 bool FrameMap::validate_frame() {
   int max_offset = in_bytes(framesize_in_bytes());
   int java_index = 0;
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -143,6 +143,3 @@
 
   static bool is_caller_save_register (LIR_Opr  reg);
   static bool is_caller_save_register (Register r);
-
-  // JSR 292
-  static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -736,7 +736,8 @@
 
 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
   __ call(op->addr(), rtype);
-  // the peephole pass fills the delay slot
+  // The peephole pass fills the delay slot, add_call_info is done in
+  // LIR_Assembler::emit_delay.
 }
 
 
@@ -745,7 +746,8 @@
   __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
   __ relocate(rspec);
   __ call(op->addr(), relocInfo::none);
-  // the peephole pass fills the delay slot
+  // The peephole pass fills the delay slot, add_call_info is done in
+  // LIR_Assembler::emit_delay.
 }
 
 
@@ -766,16 +768,6 @@
 }
 
 
-void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
-  Unimplemented();
-}
-
-
-void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
-  Unimplemented();
-}
-
-
 // load with 32-bit displacement
 int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
   int load_offset = code_offset();
@@ -2934,7 +2926,7 @@
 
   // we may also be emitting the call info for the instruction
   // which we are the delay slot of.
-  CodeEmitInfo * call_info = op->call_info();
+  CodeEmitInfo* call_info = op->call_info();
   if (call_info) {
     add_call_info(code_offset(), call_info);
   }
@@ -3159,6 +3151,7 @@
               tty->print_cr("delayed");
               inst->at(i - 1)->print();
               inst->at(i)->print();
+              tty->cr();
             }
 #endif
             continue;
@@ -3174,8 +3167,8 @@
       case lir_static_call:
       case lir_virtual_call:
       case lir_icvirtual_call:
-      case lir_optvirtual_call: {
-        LIR_Op* delay_op = NULL;
+      case lir_optvirtual_call:
+      case lir_dynamic_call: {
         LIR_Op* prev = inst->at(i - 1);
         if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
             (op->code() != lir_virtual_call ||
@@ -3192,15 +3185,14 @@
             tty->print_cr("delayed");
             inst->at(i - 1)->print();
             inst->at(i)->print();
+            tty->cr();
           }
 #endif
           continue;
         }
 
-        if (!delay_op) {
-          delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
-          inst->insert_before(i + 1, delay_op);
-        }
+        LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
+        inst->insert_before(i + 1, delay_op);
         break;
       }
     }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -221,7 +221,7 @@
   if (needs_card_mark) {
     LIR_Opr ptr = new_pointer_register();
     __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
-    return new LIR_Address(ptr, 0, type);
+    return new LIR_Address(ptr, type);
   } else {
     return new LIR_Address(base_opr, offset, type);
   }
@@ -231,7 +231,7 @@
 void LIRGenerator::increment_counter(address counter, int step) {
   LIR_Opr pointer = new_pointer_register();
   __ move(LIR_OprFact::intptrConst(counter), pointer);
-  LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
+  LIR_Address* addr = new LIR_Address(pointer, T_INT);
   increment_counter(addr, step);
 }
 
@@ -1159,7 +1159,7 @@
       if (type == T_ARRAY || type == T_OBJECT) {
         LIR_Opr tmp = new_pointer_register();
         __ add(base_op, index_op, tmp);
-        addr = new LIR_Address(tmp, 0, type);
+        addr = new LIR_Address(tmp, type);
       } else {
         addr = new LIR_Address(base_op, index_op, type);
       }
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -679,8 +679,15 @@
         __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
                         G2_thread, Oissuing_pc->after_save());
         __ verify_not_null_oop(Oexception->after_save());
-        __ jmp(O0, 0);
-        __ delayed()->restore();
+
+        // Restore SP from L7 if the exception PC is a MethodHandle call site.
+        __ mov(O0, G5);  // Save the target address.
+        __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
+        __ tst(L0);  // Condition codes are preserved over the restore.
+        __ restore();
+
+        __ jmp(G5, 0);
+        __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
       }
       break;
 
--- a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -154,7 +154,7 @@
 }
 
 static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
-  assert(MinObjAlignmentInBytes == BytesPerLong, "need alternate implementation");
+  assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
 
    julong* to = (julong*)tohw;
    julong  v  = ((julong)value << 32) | value;
@@ -162,7 +162,7 @@
    // and be equal to 0 on 64-bit platform.
    size_t odd = count % (BytesPerLong / HeapWordSize) ;
 
-   size_t aligned_count = align_object_size(count - odd) / HeapWordsPerLong;
+   size_t aligned_count = align_object_offset(count - odd) / HeapWordsPerLong;
    julong* end = ((julong*)tohw) + aligned_count - 1;
    while (to <= end) {
      DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;)
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -336,9 +336,11 @@
 #endif // ASSERT
 }
 
-frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_stack) {
-  _sp = sp;
-  _younger_sp = younger_sp;
+frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
+  _sp(sp),
+  _younger_sp(younger_sp),
+  _deopt_state(unknown),
+  _sp_adjustment_by_callee(0) {
   if (younger_sp == NULL) {
     // make a deficient frame which doesn't know where its PC is
     _pc = NULL;
@@ -352,20 +354,32 @@
     // wrong.  (the _last_native_pc will have the right value)
     // So do not put add any asserts on the _pc here.
   }
-  if (younger_frame_adjusted_stack) {
-    // compute adjustment to this frame's SP made by its interpreted callee
-    _sp_adjustment_by_callee = (intptr_t*)((intptr_t)younger_sp[I5_savedSP->sp_offset_in_saved_window()] +
-                                             STACK_BIAS) - sp;
-  } else {
-    _sp_adjustment_by_callee = 0;
+
+  if (_pc != NULL)
+    _cb = CodeCache::find_blob(_pc);
+
+  // Check for MethodHandle call sites.
+  if (_cb != NULL) {
+    nmethod* nm = _cb->as_nmethod_or_null();
+    if (nm != NULL) {
+      if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
+        _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
+        // The SP is already adjusted by this MH call site, don't
+        // overwrite this value with the wrong interpreter value.
+        younger_frame_is_interpreted = false;
+      }
+    }
   }
 
-  _deopt_state = unknown;
+  if (younger_frame_is_interpreted) {
+    // compute adjustment to this frame's SP made by its interpreted callee
+    _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
+  }
 
-  // It is important that frame be fully construct when we do this lookup
-  // as get_original_pc() needs correct value for unextended_sp()
+  // It is important that the frame is fully constructed when we do
+  // this lookup as get_deopt_original_pc() needs a correct value for
+  // unextended_sp() which uses _sp_adjustment_by_callee.
   if (_pc != NULL) {
-    _cb = CodeCache::find_blob(_pc);
     address original_pc = nmethod::get_deopt_original_pc(this);
     if (original_pc != NULL) {
       _pc = original_pc;
@@ -462,9 +476,8 @@
 
   if (is_entry_frame()) return sender_for_entry_frame(map);
 
-  intptr_t* younger_sp     = sp();
-  intptr_t* sp             = sender_sp();
-  bool      adjusted_stack = false;
+  intptr_t* younger_sp = sp();
+  intptr_t* sp         = sender_sp();
 
   // Note:  The version of this operation on any platform with callee-save
   //        registers must update the register map (if not null).
@@ -483,8 +496,8 @@
   // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
   // explicitly recognized.
 
-  adjusted_stack = is_interpreted_frame();
-  if (adjusted_stack) {
+  bool frame_is_interpreted = is_interpreted_frame();
+  if (frame_is_interpreted) {
     map->make_integer_regs_unsaved();
     map->shift_window(sp, younger_sp);
   } else if (_cb != NULL) {
@@ -503,7 +516,7 @@
       }
     }
   }
-  return frame(sp, younger_sp, adjusted_stack);
+  return frame(sp, younger_sp, frame_is_interpreted);
 }
 
 
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -720,25 +720,30 @@
 
 
 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
-                                                       int bcp_offset, bool giant_index) {
+                                                       int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
-  if (!giant_index) {
+  if (index_size == sizeof(u2)) {
     get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
-  } else {
+  } else if (index_size == sizeof(u4)) {
     assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
     get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
     assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
     xor3(tmp, -1, tmp);  // convert to plain index
+  } else if (index_size == sizeof(u1)) {
+    assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
+    ldub(Lbcp, bcp_offset, tmp);
+  } else {
+    ShouldNotReachHere();
   }
 }
 
 
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
-                                                           int bcp_offset, bool giant_index) {
+                                                           int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   assert_different_registers(cache, tmp);
   assert_not_delayed();
-  get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
+  get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
   // convert from field index to ConstantPoolCacheEntry index and from
   // word index to byte offset
   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
@@ -747,12 +752,15 @@
 
 
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
-                                                               int bcp_offset, bool giant_index) {
+                                                               int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   assert_different_registers(cache, tmp);
   assert_not_delayed();
-  assert(!giant_index,"NYI");
-  get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
+  if (index_size == sizeof(u2)) {
+    get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
+  } else {
+    ShouldNotReachHere();  // other sizes not supported here
+  }
               // convert from field index to ConstantPoolCacheEntry index
               // and from word index to byte offset
   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -182,9 +182,9 @@
                                   Register   Rdst,
                                   setCCOrNot should_set_CC = dont_set_CC );
 
-  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
-  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
-  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
+  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
 
 
   // common code
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -375,10 +375,10 @@
       Register O0_scratch = O0_argslot;
       int stackElementSize = Interpreter::stackElementSize;
 
-      // Make space on the stack for the arguments.
-      __ sub(SP,    4*stackElementSize, SP);
-      __ sub(Gargs, 3*stackElementSize, Gargs);
-      //__ sub(Lesp,  3*stackElementSize, Lesp);
+      // Make space on the stack for the arguments and set Gargs
+      // correctly.
+      __ sub(SP, 4*stackElementSize, SP);  // Keep stack aligned.
+      __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
 
       // void raiseException(int code, Object actual, Object required)
       __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -321,7 +321,8 @@
   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
 
   // also store the value into an oop_Relocation cell, if any
-  CodeBlob* nm = CodeCache::find_blob(instruction_address());
+  CodeBlob* cb = CodeCache::find_blob(instruction_address());
+  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
   if (nm != NULL) {
     RelocIterator iter(nm, instruction_address(), next_instruction_address());
     oop* oop_addr = NULL;
@@ -430,7 +431,8 @@
   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
 
   // also store the value into an oop_Relocation cell, if any
-  CodeBlob* nm = CodeCache::find_blob(instruction_address());
+  CodeBlob* cb = CodeCache::find_blob(instruction_address());
+  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
   if (nm != NULL) {
     RelocIterator iter(nm, instruction_address(), next_instruction_address());
     oop* oop_addr = NULL;
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -142,9 +142,12 @@
 REGISTER_DEFINITION(Register, G3_scratch);
 REGISTER_DEFINITION(Register, G4_scratch);
 REGISTER_DEFINITION(Register, Gtemp);
+REGISTER_DEFINITION(Register, Lentry_args);
+
+// JSR 292
 REGISTER_DEFINITION(Register, G5_method_type);
 REGISTER_DEFINITION(Register, G3_method_handle);
-REGISTER_DEFINITION(Register, Lentry_args);
+REGISTER_DEFINITION(Register, L7_mh_SP_save);
 
 #ifdef CC_INTERP
 REGISTER_DEFINITION(Register, Lstate);
--- a/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,6 +116,11 @@
   __ mov(O0, G3_scratch);             // Move handler address to temp
   __ restore();
 
+  // Restore SP from L7 if the exception PC is a MethodHandle call site.
+  __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7);
+  __ tst(O7);
+  __ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);
+
   // G3_scratch contains handler address
   // Since this may be the deopt blob we must set O7 to look like we returned
   // from the original pc that threw the exception
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -908,26 +908,13 @@
   // O0-O5          - Outgoing args in compiled layout
   // O6             - Adjusted or restored SP
   // O7             - Valid return address
-  // L0-L7, I0-I7    - Caller's temps (no frame pushed yet)
+  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
   // F0-F7          - more outgoing args
 
 
   // Gargs is the incoming argument base, and also an outgoing argument.
   __ sub(Gargs, BytesPerWord, Gargs);
 
-#ifdef ASSERT
-  {
-    // on entry OsavedSP and SP should be equal
-    Label ok;
-    __ cmp(O5_savedSP, SP);
-    __ br(Assembler::equal, false, Assembler::pt, ok);
-    __ delayed()->nop();
-    __ stop("I5_savedSP not set");
-    __ should_not_reach_here();
-    __ bind(ok);
-  }
-#endif
-
   // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
   // WITH O7 HOLDING A VALID RETURN PC
   //
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Tue Jun 29 10:48:02 2010 -0700
@@ -534,7 +534,10 @@
 //       The "return address" is the address of the call instruction, plus 8.
 
 int MachCallStaticJavaNode::ret_addr_offset() {
-  return NativeCall::instruction_size;  // call; delay slot
+  int offset = NativeCall::instruction_size;  // call; delay slot
+  if (_method_handle_invoke)
+    offset += 4;  // restore SP
+  return offset;
 }
 
 int MachCallDynamicJavaNode::ret_addr_offset() {
@@ -818,6 +821,10 @@
           !(n->ideal_Opcode()==Op_ConvI2D   && ld_op==Op_LoadF) &&
           !(n->ideal_Opcode()==Op_PrefetchRead  && ld_op==Op_LoadI) &&
           !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
+          !(n->ideal_Opcode()==Op_Load2I    && ld_op==Op_LoadD) &&
+          !(n->ideal_Opcode()==Op_Load4C    && ld_op==Op_LoadD) &&
+          !(n->ideal_Opcode()==Op_Load4S    && ld_op==Op_LoadD) &&
+          !(n->ideal_Opcode()==Op_Load8B    && ld_op==Op_LoadD) &&
           !(n->rule() == loadUB_rule)) {
         verify_oops_warning(n, n->ideal_Opcode(), ld_op);
       }
@@ -829,6 +836,9 @@
           !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
           !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
           !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
+          !(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) &&
+          !(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) &&
+          !(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) &&
           !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
         verify_oops_warning(n, n->ideal_Opcode(), st_op);
       }
@@ -1750,6 +1760,12 @@
 // registers?  True for Intel but false for most RISCs
 const bool Matcher::clone_shift_expressions = false;
 
+bool Matcher::narrow_oop_use_complex_address() {
+  NOT_LP64(ShouldNotCallThis());
+  assert(UseCompressedOops, "only for compressed oops code");
+  return false;
+}
+
 // Is it better to copy float constants, or load them directly from memory?
 // Intel can load a float constant from a direct address, requiring no
 // extra registers.  Most RISCs will have to materialize an address into a
@@ -1858,7 +1874,7 @@
 }
 
 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
-  return RegMask();
+  return L7_REGP_mask;
 }
 
 %}
@@ -2441,6 +2457,16 @@
                     /*preserve_g2=*/true, /*force far call*/true);
   %}
 
+  enc_class preserve_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ mov(SP, L7_mh_SP_save);
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ mov(L7_mh_SP_save, SP);
+  %}
+
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
@@ -9213,6 +9239,7 @@
 // Call Java Static Instruction
 instruct CallStaticJavaDirect( method meth ) %{
   match(CallStaticJava);
+  predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
   effect(USE meth);
 
   size(8);
@@ -9223,6 +9250,20 @@
   ins_pipe(simple_call);
 %}
 
+// Call Java Static Instruction (method handle version)
+instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
+  match(CallStaticJava);
+  predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+  effect(USE meth, KILL l7_mh_SP_save);
+
+  size(8);
+  ins_cost(CALL_COST);
+  format %{ "CALL,static/MethodHandle" %}
+  ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
+  ins_pc_relative(1);
+  ins_pipe(simple_call);
+%}
+
 // Call Java Dynamic Instruction
 instruct CallDynamicJavaDirect( method meth ) %{
   match(CallDynamicJava);
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2911,16 +2911,6 @@
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
 
-    // generic method handle stubs
-    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
-      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
-           ek < MethodHandles::_EK_LIMIT;
-           ek = MethodHandles::EntryKind(1 + (int)ek)) {
-        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
-        MethodHandles::generate_method_handle_stub(_masm, ek);
-      }
-    }
-
     // Don't initialize the platform math functions since sparc
     // doesn't have intrinsics for these operations.
   }
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -43,7 +43,7 @@
 
 // MethodHandles adapters
 enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 5000
+  method_handles_adapters_code_size = 6000
 };
 
 class Sparc {
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -204,7 +204,7 @@
   // out of the main line of code...
   if (EnableInvokeDynamic) {
     __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
+    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
     __ ba(false, L_got_cache);
     __ delayed()->nop();
   }
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1949,23 +1949,30 @@
 }
 
 // ----------------------------------------------------------------------------
-void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
-  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
-  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
-
+void TemplateTable::resolve_cache_and_index(int byte_no,
+                                            Register result,
+                                            Register Rcache,
+                                            Register index,
+                                            size_t index_size) {
   // Depends on cpCacheOop layout!
-  const int shift_count = (1 + byte_no)*BitsPerByte;
   Label resolved;
 
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
-  if (is_invokedynamic) {
-    // We are resolved if the f1 field contains a non-null CallSite object.
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+  if (byte_no == f1_oop) {
+    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+    // This kind of CP cache entry does not need to match the flags byte, because
+    // there is a 1-1 relation between bytecode type and CP entry type.
+    assert_different_registers(result, Rcache);
     __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
-              ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
-    __ tst(Lbyte_code);
+              ConstantPoolCacheEntry::f1_offset(), result);
+    __ tst(result);
     __ br(Assembler::notEqual, false, Assembler::pt, resolved);
     __ delayed()->set((int)bytecode(), O1);
   } else {
+    assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+    assert(result == noreg, "");  //else change code for setting result
+    const int shift_count = (1 + byte_no)*BitsPerByte;
+
     __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
               ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
 
@@ -1992,7 +1999,10 @@
   // first time invocation - must resolve first
   __ call_VM(noreg, entry, O1);
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+  if (result != noreg)
+    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
+              ConstantPoolCacheEntry::f1_offset(), result);
   __ bind(resolved);
 }
 
@@ -2001,7 +2011,8 @@
                                                Register Ritable_index,
                                                Register Rflags,
                                                bool is_invokevirtual,
-                                               bool is_invokevfinal) {
+                                               bool is_invokevfinal,
+                                               bool is_invokedynamic) {
   // Uses both G3_scratch and G4_scratch
   Register Rcache = G3_scratch;
   Register Rscratch = G4_scratch;
@@ -2025,11 +2036,15 @@
 
   if (is_invokevfinal) {
     __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
+    __ ld_ptr(Rcache, method_offset, Rmethod);
+  } else if (byte_no == f1_oop) {
+    // Resolved f1_oop goes directly into 'method' register.
+    resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
   } else {
-    resolve_cache_and_index(byte_no, Rcache, Rscratch);
+    resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
+    __ ld_ptr(Rcache, method_offset, Rmethod);
   }
 
-  __ ld_ptr(Rcache, method_offset, Rmethod);
   if (Ritable_index != noreg) {
     __ ld_ptr(Rcache, index_offset, Ritable_index);
   }
@@ -2110,7 +2125,7 @@
   Register Rflags = G1_scratch;
   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
 
-  resolve_cache_and_index(byte_no, Rcache, index);
+  resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
   jvmti_post_field_access(Rcache, index, is_static, false);
   load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
 
@@ -2475,7 +2490,7 @@
   Register Rflags = G1_scratch;
   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
 
-  resolve_cache_and_index(byte_no, Rcache, index);
+  resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
   jvmti_post_field_mod(Rcache, index, is_static);
   load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
 
@@ -2816,6 +2831,7 @@
 
 void TemplateTable::invokevirtual(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f2_byte, "use this argument");
 
   Register Rscratch = G3_scratch;
   Register Rtemp = G4_scratch;
@@ -2823,7 +2839,7 @@
   Register Rrecv = G5_method;
   Label notFinal;
 
-  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
+  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
 
   // Check for vfinal
@@ -2864,9 +2880,10 @@
 
 void TemplateTable::fast_invokevfinal(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f2_byte, "use this argument");
 
   load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
-                             /*is_invokevfinal*/true);
+                             /*is_invokevfinal*/true, false);
   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
   invokevfinal_helper(G3_scratch, Lscratch);
 }
@@ -2901,12 +2918,13 @@
 
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
 
   Register Rscratch = G3_scratch;
   Register Rtemp = G4_scratch;
   Register Rret = Lscratch;
 
-  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
+  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
 
   __ verify_oop(G5_method);
@@ -2934,12 +2952,13 @@
 
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
 
   Register Rscratch = G3_scratch;
   Register Rtemp = G4_scratch;
   Register Rret = Lscratch;
 
-  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
+  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
 
   __ verify_oop(G5_method);
@@ -2992,6 +3011,7 @@
 
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
 
   Register Rscratch = G4_scratch;
   Register Rret = G3_scratch;
@@ -3001,7 +3021,7 @@
   Register Rflags = O1;
   assert_different_registers(Rscratch, G5_method);
 
-  load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
+  load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
 
   // get receiver
@@ -3118,6 +3138,7 @@
 
 void TemplateTable::invokedynamic(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_oop, "use this argument");
 
   if (!EnableInvokeDynamic) {
     // We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3132,7 +3153,6 @@
 
   // G5: CallSite object (f1)
   // XX: unused (f2)
-  // G3: receiver address
   // XX: flags (unused)
 
   Register G5_callsite = G5_method;
@@ -3140,7 +3160,8 @@
   Register Rtemp       = G1_scratch;
   Register Rret        = Lscratch;
 
-  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
+  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
+                             /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
   __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
 
   __ verify_oop(G5_callsite);
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -65,13 +65,6 @@
       FLAG_SET_DEFAULT(UseInlineCaches, false);
     }
 #ifdef _LP64
-    // Single issue niagara1 is slower for CompressedOops
-    // but niagaras after that it's fine.
-    if (!is_niagara1_plus()) {
-      if (FLAG_IS_DEFAULT(UseCompressedOops)) {
-        FLAG_SET_ERGO(bool, UseCompressedOops, false);
-      }
-    }
     // 32-bit oops don't make sense for the 64-bit VM on sparc
     // since the 32-bit VM has the same registers and smaller objects.
     Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -7643,6 +7643,9 @@
   // Pass register number to verify_oop_subroutine
   char* b = new char[strlen(s) + 50];
   sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+#ifdef _LP64
+  push(rscratch1);                    // save r10, trashed by movptr()
+#endif
   push(rax);                          // save rax,
   push(reg);                          // pass register argument
   ExternalAddress buffer((address) b);
@@ -7653,6 +7656,7 @@
   // call indirectly to solve generation ordering problem
   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
   call(rax);
+  // Caller pops the arguments (oop, message) and restores rax, r10
 }
 
 
@@ -7767,6 +7771,9 @@
   char* b = new char[strlen(s) + 50];
   sprintf(b, "verify_oop_addr: %s", s);
 
+#ifdef _LP64
+  push(rscratch1);                    // save r10, trashed by movptr()
+#endif
   push(rax);                          // save rax,
   // addr may contain rsp so we will have to adjust it based on the push
   // we just did
@@ -7789,7 +7796,7 @@
   // call indirectly to solve generation ordering problem
   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
   call(rax);
-  // Caller pops the arguments and restores rax, from the stack
+  // Caller pops the arguments (addr, message) and restores rax, r10.
 }
 
 void MacroAssembler::verify_tlab() {
@@ -8185,9 +8192,14 @@
     assert (Universe::heap() != NULL, "java heap should be initialized");
     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
     if (Universe::narrow_oop_shift() != 0) {
-      assert(Address::times_8 == LogMinObjAlignmentInBytes &&
-             Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
-      movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+      assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+      if (LogMinObjAlignmentInBytes == Address::times_8) {
+        movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+      } else {
+        // OK to use shift since we don't need to preserve flags.
+        shlq(dst, LogMinObjAlignmentInBytes);
+        movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+      }
     } else {
       movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
     }
@@ -8361,31 +8373,43 @@
 }
 
 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
+  // Note: it will change flags
   assert (UseCompressedOops, "should only be used for compressed headers");
   assert (Universe::heap() != NULL, "java heap should be initialized");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   if (Universe::narrow_oop_shift() != 0) {
-    assert (Address::times_8 == LogMinObjAlignmentInBytes &&
-            Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
-    // Don't use Shift since it modifies flags.
-    leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+    shlq(r, LogMinObjAlignmentInBytes);
+    if (Universe::narrow_oop_base() != NULL) {
+      addq(r, r12_heapbase);
+    }
   } else {
     assert (Universe::narrow_oop_base() == NULL, "sanity");
   }
 }
 
 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
+  // Note: it will change flags
   assert (UseCompressedOops, "should only be used for compressed headers");
   assert (Universe::heap() != NULL, "java heap should be initialized");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   if (Universe::narrow_oop_shift() != 0) {
-    assert (Address::times_8 == LogMinObjAlignmentInBytes &&
-            Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
-    leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+    if (LogMinObjAlignmentInBytes == Address::times_8) {
+      leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+    } else {
+      if (dst != src) {
+        movq(dst, src);
+      }
+      shlq(dst, LogMinObjAlignmentInBytes);
+      if (Universe::narrow_oop_base() != NULL) {
+        addq(dst, r12_heapbase);
+      }
+    }
   } else if (dst != src) {
     assert (Universe::narrow_oop_base() == NULL, "sanity");
     movq(dst, src);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -135,6 +135,9 @@
 
 #endif // _LP64
 
+// JSR 292 fixed register usages:
+REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
+
 // Address is an abstraction used to represent a memory location
 // using any of the amd64 addressing modes with one object.
 //
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -136,8 +136,8 @@
 //               FrameMap
 //--------------------------------------------------------
 
-void FrameMap::init() {
-  if (_init_done) return;
+void FrameMap::initialize() {
+  assert(!_init_done, "once");
 
   assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
   map_register(0, rsi);  rsi_opr = LIR_OprFact::single_cpu(0);
@@ -309,6 +309,13 @@
 }
 
 
+// JSR 292
+LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
+  assert(rbp == rbp_mh_SP_save, "must be same register");
+  return rbp_opr;
+}
+
+
 bool FrameMap::validate_frame() {
   return true;
 }
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -126,6 +126,3 @@
     assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
     return _caller_save_xmm_regs[i];
   }
-
-  // JSR 292
-  static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2462,9 +2462,18 @@
       }
 #endif // _LP64
     } else {
+#ifdef _LP64
+      Register r_lo;
+      if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
+        r_lo = right->as_register();
+      } else {
+        r_lo = right->as_register_lo();
+      }
+#else
       Register r_lo = right->as_register_lo();
       Register r_hi = right->as_register_hi();
       assert(l_lo != r_hi, "overwriting registers");
+#endif
       switch (code) {
         case lir_logic_and:
           __ andptr(l_lo, r_lo);
@@ -2784,7 +2793,7 @@
   assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
   __ call(AddressLiteral(op->addr(), rtype));
-  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
+  add_call_info(code_offset(), op->info());
 }
 
 
@@ -2795,7 +2804,7 @@
          (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
   __ call(AddressLiteral(op->addr(), rh));
-  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
+  add_call_info(code_offset(), op->info());
 }
 
 
@@ -2805,16 +2814,6 @@
 }
 
 
-void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
-  __ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
-}
-
-
-void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
-  __ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
-}
-
-
 void LIR_Assembler::emit_static_call_stub() {
   address call_pc = __ pc();
   address stub = __ start_a_stub(call_stub_size);
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -175,7 +175,7 @@
     // store and again for the card mark.
     LIR_Opr tmp = new_pointer_register();
     __ leal(LIR_OprFact::address(addr), tmp);
-    return new LIR_Address(tmp, 0, type);
+    return new LIR_Address(tmp, type);
   } else {
     return addr;
   }
@@ -185,7 +185,7 @@
 void LIRGenerator::increment_counter(address counter, int step) {
   LIR_Opr pointer = new_pointer_register();
   __ move(LIR_OprFact::intptrConst(counter), pointer);
-  LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
+  LIR_Address* addr = new LIR_Address(pointer, T_INT);
   increment_counter(addr, step);
 }
 
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -782,7 +782,7 @@
   // Restore SP from BP if the exception PC is a MethodHandle call site.
   NOT_LP64(__ get_thread(thread);)
   __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
-  __ cmovptr(Assembler::notEqual, rsp, rbp);
+  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
 
   // continue at exception handler (return address removed)
   // note: do *not* remove arguments when unwinding the
@@ -1581,7 +1581,6 @@
           __ should_not_reach_here();
           break;
         }
-
         __ push(rax);
         __ push(rdx);
 
@@ -1605,8 +1604,8 @@
 
         // Can we store original value in the thread's buffer?
 
-        LP64_ONLY(__ movslq(tmp, queue_index);)
 #ifdef _LP64
+        __ movslq(tmp, queue_index);
         __ cmpq(tmp, 0);
 #else
         __ cmpl(queue_index, 0);
@@ -1628,13 +1627,33 @@
         __ jmp(done);
 
         __ bind(runtime);
+        __ push(rcx);
+#ifdef _LP64
+        __ push(r8);
+        __ push(r9);
+        __ push(r10);
+        __ push(r11);
+#  ifndef _WIN64
+        __ push(rdi);
+        __ push(rsi);
+#  endif
+#endif
         // load the pre-value
-        __ push(rcx);
         f.load_argument(0, rcx);
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
+#ifdef _LP64
+#  ifndef _WIN64
+        __ pop(rsi);
+        __ pop(rdi);
+#  endif
+        __ pop(r11);
+        __ pop(r10);
+        __ pop(r9);
+        __ pop(r8);
+#endif
         __ pop(rcx);
+        __ bind(done);
 
-        __ bind(done);
         __ pop(rdx);
         __ pop(rax);
       }
@@ -1664,13 +1683,13 @@
                                         PtrQueue::byte_offset_of_buf()));
 
         __ push(rax);
-        __ push(rdx);
+        __ push(rcx);
 
         NOT_LP64(__ get_thread(thread);)
         ExternalAddress cardtable((address)ct->byte_map_base);
         assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
-        const Register card_addr = rdx;
+        const Register card_addr = rcx;
 #ifdef _LP64
         const Register tmp = rscratch1;
         f.load_argument(0, card_addr);
@@ -1679,7 +1698,7 @@
         // get the address of the card
         __ addq(card_addr, tmp);
 #else
-        const Register card_index = rdx;
+        const Register card_index = rcx;
         f.load_argument(0, card_index);
         __ shrl(card_index, CardTableModRefBS::card_shift);
 
@@ -1716,12 +1735,32 @@
         __ jmp(done);
 
         __ bind(runtime);
-        NOT_LP64(__ push(rcx);)
+        __ push(rdx);
+#ifdef _LP64
+        __ push(r8);
+        __ push(r9);
+        __ push(r10);
+        __ push(r11);
+#  ifndef _WIN64
+        __ push(rdi);
+        __ push(rsi);
+#  endif
+#endif
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
-        NOT_LP64(__ pop(rcx);)
+#ifdef _LP64
+#  ifndef _WIN64
+        __ pop(rsi);
+        __ pop(rdi);
+#  endif
+        __ pop(r11);
+        __ pop(r10);
+        __ pop(r9);
+        __ pop(r8);
+#endif
+        __ pop(rdx);
+        __ bind(done);
 
-        __ bind(done);
-        __ pop(rdx);
+        __ pop(rcx);
         __ pop(rax);
 
       }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -189,11 +189,11 @@
 }
 
 
-void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) {
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
-  if (!giant_index) {
+  if (index_size == sizeof(u2)) {
     load_unsigned_short(reg, Address(rsi, bcp_offset));
-  } else {
+  } else if (index_size == sizeof(u4)) {
     assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
     movl(reg, Address(rsi, bcp_offset));
     // Check if the secondary index definition is still ~x, otherwise
@@ -201,14 +201,19 @@
     // plain index.
     assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
     notl(reg);  // convert to plain index
+  } else if (index_size == sizeof(u1)) {
+    assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
+    load_unsigned_byte(reg, Address(rsi, bcp_offset));
+  } else {
+    ShouldNotReachHere();
   }
 }
 
 
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
-                                                           int bcp_offset, bool giant_index) {
+                                                           int bcp_offset, size_t index_size) {
   assert(cache != index, "must use different registers");
-  get_cache_index_at_bcp(index, bcp_offset, giant_index);
+  get_cache_index_at_bcp(index, bcp_offset, index_size);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
   shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
@@ -216,9 +221,9 @@
 
 
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
-                                                               int bcp_offset, bool giant_index) {
+                                                               int bcp_offset, size_t index_size) {
   assert(cache != tmp, "must use different register");
-  get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
+  get_cache_index_at_bcp(tmp, bcp_offset, index_size);
   assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
                                // convert from field index to ConstantPoolCacheEntry index
                                // and from word offset to byte offset
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -76,9 +76,9 @@
   void get_cpool_and_tags(Register cpool, Register tags)   { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
   }
   void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
-  void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false);
-  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
-  void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
+  void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
 
   // Expression stack
   void f2ieee();                                           // truncate ftos to 32bits
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -187,11 +187,11 @@
 
 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
                                                        int bcp_offset,
-                                                       bool giant_index) {
+                                                       size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
-  if (!giant_index) {
+  if (index_size == sizeof(u2)) {
     load_unsigned_short(index, Address(r13, bcp_offset));
-  } else {
+  } else if (index_size == sizeof(u4)) {
     assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
     movl(index, Address(r13, bcp_offset));
     // Check if the secondary index definition is still ~x, otherwise
@@ -199,6 +199,11 @@
     // plain index.
     assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
     notl(index);  // convert to plain index
+  } else if (index_size == sizeof(u1)) {
+    assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
+    load_unsigned_byte(index, Address(r13, bcp_offset));
+  } else {
+    ShouldNotReachHere();
   }
 }
 
@@ -206,9 +211,9 @@
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
                                                            Register index,
                                                            int bcp_offset,
-                                                           bool giant_index) {
+                                                           size_t index_size) {
   assert(cache != index, "must use different registers");
-  get_cache_index_at_bcp(index, bcp_offset, giant_index);
+  get_cache_index_at_bcp(index, bcp_offset, index_size);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
@@ -219,9 +224,9 @@
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
                                                                Register tmp,
                                                                int bcp_offset,
-                                                               bool giant_index) {
+                                                               size_t index_size) {
   assert(cache != tmp, "must use different register");
-  get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
+  get_cache_index_at_bcp(tmp, bcp_offset, index_size);
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
   // and from word offset to byte offset
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -95,10 +95,10 @@
 
   void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
   void get_cache_and_index_at_bcp(Register cache, Register index,
-                                  int bcp_offset, bool giant_index = false);
+                                  int bcp_offset, size_t index_size = sizeof(u2));
   void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
-                                      int bcp_offset, bool giant_index = false);
-  void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
+                                      int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
 
 
   void pop_ptr(Register r = rax);
--- a/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,3 +115,6 @@
 REGISTER_DEFINITION(MMXRegister, mmx5 );
 REGISTER_DEFINITION(MMXRegister, mmx6 );
 REGISTER_DEFINITION(MMXRegister, mmx7 );
+
+// JSR 292
+REGISTER_DEFINITION(Register, rbp_mh_SP_save);
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,7 +117,7 @@
 
   // Restore SP from BP if the exception PC is a MethodHandle call site.
   __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
-  __ cmovptr(Assembler::notEqual, rsp, rbp);
+  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
 
   // We have a handler in rax, (could be deopt blob)
   // rdx - throwing pc, deopt blob will need it.
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -3305,7 +3305,7 @@
 
   // Restore SP from BP if the exception PC is a MethodHandle call site.
   __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
-  __ cmovptr(Assembler::notEqual, rsp, rbp);
+  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
 
   // We have a handler in rax (could be deopt blob).
   __ mov(r8, rax);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -914,6 +914,7 @@
   //  * [tos + 5]: error message (char*)
   //  * [tos + 6]: object to verify (oop)
   //  * [tos + 7]: saved rax - saved by caller and bashed
+  //  * [tos + 8]: saved r10 (rscratch1) - saved by caller
   //  * = popped on exit
   address generate_verify_oop() {
     StubCodeMark mark(this, "StubRoutines", "verify_oop");
@@ -934,6 +935,7 @@
            // After previous pushes.
            oop_to_verify = 6 * wordSize,
            saved_rax     = 7 * wordSize,
+           saved_r10     = 8 * wordSize,
 
            // Before the call to MacroAssembler::debug(), see below.
            return_addr   = 16 * wordSize,
@@ -983,15 +985,17 @@
     // return if everything seems ok
     __ bind(exit);
     __ movptr(rax, Address(rsp, saved_rax));     // get saved rax back
+    __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
     __ pop(c_rarg3);                             // restore c_rarg3
     __ pop(c_rarg2);                             // restore c_rarg2
     __ pop(r12);                                 // restore r12
     __ popf();                                   // restore flags
-    __ ret(3 * wordSize);                        // pop caller saved stuff
+    __ ret(4 * wordSize);                        // pop caller saved stuff
 
     // handle errors
     __ bind(error);
     __ movptr(rax, Address(rsp, saved_rax));     // get saved rax back
+    __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
     __ pop(c_rarg3);                             // get saved c_rarg3 back
     __ pop(c_rarg2);                             // get saved c_rarg2 back
     __ pop(r12);                                 // get saved r12 back
@@ -1009,6 +1013,7 @@
     //   * [tos + 17] error message (char*)
     //   * [tos + 18] object to verify (oop)
     //   * [tos + 19] saved rax - saved by caller and bashed
+    //   * [tos + 20] saved r10 (rscratch1) - saved by caller
     //   * = popped on exit
 
     __ movptr(c_rarg0, Address(rsp, error_msg));    // pass address of error message
@@ -1021,7 +1026,7 @@
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
     __ mov(rsp, r12);                               // restore rsp
     __ popa();                                      // pop registers (includes r12)
-    __ ret(3 * wordSize);                           // pop caller saved stuff
+    __ ret(4 * wordSize);                           // pop caller saved stuff
 
     return start;
   }
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -214,7 +214,7 @@
     __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
     __ jcc(Assembler::equal, L_giant_index);
   }
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
   __ bind(L_got_cache);
   __ movl(rbx, Address(rbx, rcx,
                     Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
@@ -226,7 +226,7 @@
   // out of the main line of code...
   if (EnableInvokeDynamic) {
     __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
     __ jmp(L_got_cache);
   }
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -192,7 +192,7 @@
     __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
     __ jcc(Assembler::equal, L_giant_index);
   }
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
   __ bind(L_got_cache);
   __ movl(rbx, Address(rbx, rcx,
                        Address::times_ptr,
@@ -205,7 +205,7 @@
   // out of the main line of code...
   if (EnableInvokeDynamic) {
     __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
     __ jmp(L_got_cache);
   }
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2012,22 +2012,29 @@
   __ membar(order_constraint);
 }
 
-void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
-  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
-  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
-
+void TemplateTable::resolve_cache_and_index(int byte_no,
+                                            Register result,
+                                            Register Rcache,
+                                            Register index,
+                                            size_t index_size) {
   Register temp = rbx;
 
-  assert_different_registers(Rcache, index, temp);
-
-  const int shift_count = (1 + byte_no)*BitsPerByte;
+  assert_different_registers(result, Rcache, index, temp);
+
   Label resolved;
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
-  if (is_invokedynamic) {
-    // we are resolved if the f1 field contains a non-null CallSite object
-    __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+  if (byte_no == f1_oop) {
+    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+    // This kind of CP cache entry does not need to match the flags byte, because
+    // there is a 1-1 relation between bytecode type and CP entry type.
+    assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
+    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
+    __ testptr(result, result);
     __ jcc(Assembler::notEqual, resolved);
   } else {
+    assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+    assert(result == noreg, "");  //else change code for setting result
+    const int shift_count = (1 + byte_no)*BitsPerByte;
     __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
     __ shrl(temp, shift_count);
     // have we resolved this bytecode?
@@ -2053,7 +2060,9 @@
   __ movl(temp, (int)bytecode());
   __ call_VM(noreg, entry, temp);
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+  if (result != noreg)
+    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   __ bind(resolved);
 }
 
@@ -2087,7 +2096,8 @@
                                                Register itable_index,
                                                Register flags,
                                                bool is_invokevirtual,
-                                               bool is_invokevfinal /*unused*/) {
+                                               bool is_invokevfinal /*unused*/,
+                                               bool is_invokedynamic) {
   // setup registers
   const Register cache = rcx;
   const Register index = rdx;
@@ -2109,13 +2119,18 @@
   const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
                                     ConstantPoolCacheEntry::f2_offset());
 
-  resolve_cache_and_index(byte_no, cache, index);
-
-  __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
+  if (byte_no == f1_oop) {
+    // Resolved f1_oop goes directly into 'method' register.
+    assert(is_invokedynamic, "");
+    resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
+  } else {
+    resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+    __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
+  }
   if (itable_index != noreg) {
     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
   }
-  __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
+  __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
 }
 
 
@@ -2169,7 +2184,7 @@
   const Register off   = rbx;
   const Register flags = rax;
 
-  resolve_cache_and_index(byte_no, cache, index);
+  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   jvmti_post_field_access(cache, index, is_static, false);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
 
@@ -2378,7 +2393,7 @@
   const Register off   = rbx;
   const Register flags = rax;
 
-  resolve_cache_and_index(byte_no, cache, index);
+  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   jvmti_post_field_mod(cache, index, is_static);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
 
@@ -2815,10 +2830,11 @@
   // save 'interpreter return address'
   __ save_bcp();
 
-  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
+  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
 
   // load receiver if needed (note: no return address pushed yet)
   if (load_receiver) {
+    assert(!is_invokedynamic, "");
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
     // recv count is 0 based?
@@ -2910,6 +2926,7 @@
 
 void TemplateTable::invokevirtual(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f2_byte, "use this argument");
   prepare_invoke(rbx, noreg, byte_no);
 
   // rbx,: index
@@ -2922,6 +2939,7 @@
 
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
   prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
@@ -2932,6 +2950,7 @@
 
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
   prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
@@ -2942,12 +2961,14 @@
 
 void TemplateTable::fast_invokevfinal(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f2_byte, "use this argument");
   __ stop("fast_invokevfinal not used on x86");
 }
 
 
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
   prepare_invoke(rax, rbx, byte_no);
 
   // rax,: Interface
@@ -3036,11 +3057,11 @@
     return;
   }
 
+  assert(byte_no == f1_oop, "use this argument");
   prepare_invoke(rax, rbx, byte_no);
 
   // rax: CallSite object (f1)
   // rbx: unused (f2)
-  // rcx: receiver address
   // rdx: flags (unused)
 
   if (ProfileInterpreter) {
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2015,21 +2015,28 @@
   }
 }
 
-void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
-  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
-  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
-
+void TemplateTable::resolve_cache_and_index(int byte_no,
+                                            Register result,
+                                            Register Rcache,
+                                            Register index,
+                                            size_t index_size) {
   const Register temp = rbx;
-  assert_different_registers(Rcache, index, temp);
-
-  const int shift_count = (1 + byte_no) * BitsPerByte;
+  assert_different_registers(result, Rcache, index, temp);
+
   Label resolved;
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
-  if (is_invokedynamic) {
-    // we are resolved if the f1 field contains a non-null CallSite object
-    __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+  if (byte_no == f1_oop) {
+    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+    // This kind of CP cache entry does not need to match the flags byte, because
+    // there is a 1-1 relation between bytecode type and CP entry type.
+    assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
+    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
+    __ testptr(result, result);
     __ jcc(Assembler::notEqual, resolved);
   } else {
+    assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+    assert(result == noreg, "");  //else change code for setting result
+    const int shift_count = (1 + byte_no) * BitsPerByte;
     __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
     __ shrl(temp, shift_count);
     // have we resolved this bytecode?
@@ -2064,7 +2071,9 @@
   __ call_VM(noreg, entry, temp);
 
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+  if (result != noreg)
+    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   __ bind(resolved);
 }
 
@@ -2100,7 +2109,8 @@
                                                Register itable_index,
                                                Register flags,
                                                bool is_invokevirtual,
-                                               bool is_invokevfinal /*unused*/) {
+                                               bool is_invokevfinal, /*unused*/
+                                               bool is_invokedynamic) {
   // setup registers
   const Register cache = rcx;
   const Register index = rdx;
@@ -2120,15 +2130,18 @@
   const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
                                     ConstantPoolCacheEntry::f2_offset());
 
-  resolve_cache_and_index(byte_no, cache, index);
-
-  assert(wordSize == 8, "adjust code below");
-  __ movptr(method, Address(cache, index, Address::times_8, method_offset));
+  if (byte_no == f1_oop) {
+    // Resolved f1_oop goes directly into 'method' register.
+    assert(is_invokedynamic, "");
+    resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
+  } else {
+    resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+    __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
+  }
   if (itable_index != noreg) {
-    __ movptr(itable_index,
-            Address(cache, index, Address::times_8, index_offset));
+    __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
   }
-  __ movl(flags , Address(cache, index, Address::times_8, flags_offset));
+  __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
 }
 
 
@@ -2187,7 +2200,7 @@
   const Register flags = rax;
   const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
 
-  resolve_cache_and_index(byte_no, cache, index);
+  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   jvmti_post_field_access(cache, index, is_static, false);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
 
@@ -2390,7 +2403,7 @@
   const Register flags = rax;
   const Register bc    = c_rarg3;
 
-  resolve_cache_and_index(byte_no, cache, index);
+  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   jvmti_post_field_mod(cache, index, is_static);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
 
@@ -2815,10 +2828,11 @@
   // save 'interpreter return address'
   __ save_bcp();
 
-  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
+  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
 
   // load receiver if needed (note: no return address pushed yet)
   if (load_receiver) {
+    assert(!is_invokedynamic, "");
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
     Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
@@ -2914,6 +2928,7 @@
 
 void TemplateTable::invokevirtual(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f2_byte, "use this argument");
   prepare_invoke(rbx, noreg, byte_no);
 
   // rbx: index
@@ -2926,6 +2941,7 @@
 
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
   prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
@@ -2936,6 +2952,7 @@
 
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
   prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
@@ -2945,11 +2962,13 @@
 
 void TemplateTable::fast_invokevfinal(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f2_byte, "use this argument");
   __ stop("fast_invokevfinal not used on amd64");
 }
 
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_byte, "use this argument");
   prepare_invoke(rax, rbx, byte_no);
 
   // rax: Interface
@@ -3027,6 +3046,7 @@
 
 void TemplateTable::invokedynamic(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_oop, "use this argument");
 
   if (!EnableInvokeDynamic) {
     // We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3039,6 +3059,7 @@
     return;
   }
 
+  assert(byte_no == f1_oop, "use this argument");
   prepare_invoke(rax, rbx, byte_no);
 
   // rax: CallSite object (f1)
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Tue Jun 29 10:48:02 2010 -0700
@@ -1377,6 +1377,12 @@
 // registers?  True for Intel but false for most RISCs
 const bool Matcher::clone_shift_expressions = true;
 
+bool Matcher::narrow_oop_use_complex_address() {
+  ShouldNotCallThis();
+  return true;
+}
+
+
 // Is it better to copy float constants, or load them directly from memory?
 // Intel can load a float constant from a direct address, requiring no
 // extra registers.  Most RISCs will have to materialize an address into a
@@ -1841,14 +1847,14 @@
     MacroAssembler _masm(&cbuf);
     // RBP is preserved across all calls, even compiled calls.
     // Use it to preserve RSP in places where the callee might change the SP.
-    __ movptr(rbp, rsp);
+    __ movptr(rbp_mh_SP_save, rsp);
     debug_only(int off1 = cbuf.code_size());
     assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
   %}
 
   enc_class restore_SP %{
     MacroAssembler _masm(&cbuf);
-    __ movptr(rsp, rbp);
+    __ movptr(rsp, rbp_mh_SP_save);
   %}
 
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
@@ -13570,7 +13576,7 @@
 // Call Java Static Instruction (method handle version)
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
+instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
   match(CallStaticJava);
   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
   effect(USE meth);
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Tue Jun 29 10:48:02 2010 -0700
@@ -1851,29 +1851,24 @@
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
   if (UseCompressedOops) {
-    st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
+    st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
     if (Universe::narrow_oop_shift() != 0) {
-      st->print_cr("leaq    rscratch1, [r12_heapbase, r, Address::times_8, 0]");
-    }
-    st->print_cr("cmpq    rax, rscratch1\t # Inline cache check");
+      st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
+    }
+    st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
   } else {
-    st->print_cr("cmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
-                 "# Inline cache check", oopDesc::klass_offset_in_bytes());
+    st->print_cr("\tcmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
+                 "# Inline cache check");
   }
   st->print_cr("\tjne     SharedRuntime::_ic_miss_stub");
-  st->print_cr("\tnop");
-  if (!OptoBreakpoint) {
-    st->print_cr("\tnop");
-  }
+  st->print_cr("\tnop\t# nops to align entry point");
 }
 #endif
 
 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 {
   MacroAssembler masm(&cbuf);
-#ifdef ASSERT
   uint code_size = cbuf.code_size();
-#endif
   if (UseCompressedOops) {
     masm.load_klass(rscratch1, j_rarg0);
     masm.cmpptr(rax, rscratch1);
@@ -1884,33 +1879,21 @@
   masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
   /* WARNING these NOPs are critical so that verified entry point is properly
-     aligned for patching by NativeJump::patch_verified_entry() */
-  int nops_cnt = 1;
-  if (!OptoBreakpoint) {
+     4 bytes aligned for patching by NativeJump::patch_verified_entry() */
+  int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
+  if (OptoBreakpoint) {
     // Leave space for int3
-     nops_cnt += 1;
+    nops_cnt -= 1;
   }
-  if (UseCompressedOops) {
-    // ??? divisible by 4 is aligned?
-    nops_cnt += 1;
-  }
-  masm.nop(nops_cnt);
-
-  assert(cbuf.code_size() - code_size == size(ra_),
-         "checking code size of inline cache node");
+  nops_cnt &= 0x3; // Do not add nops if code is aligned.
+  if (nops_cnt > 0)
+    masm.nop(nops_cnt);
 }
 
 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 {
-  if (UseCompressedOops) {
-    if (Universe::narrow_oop_shift() == 0) {
-      return OptoBreakpoint ? 15 : 16;
-    } else {
-      return OptoBreakpoint ? 19 : 20;
-    }
-  } else {
-    return OptoBreakpoint ? 11 : 12;
-  }
+  return MachNode::size(ra_); // too many variables; just compute it
+                              // the hard way
 }
 
 
@@ -2054,6 +2037,11 @@
 // into registers?  True for Intel but false for most RISCs
 const bool Matcher::clone_shift_expressions = true;
 
+bool Matcher::narrow_oop_use_complex_address() {
+  assert(UseCompressedOops, "only for compressed oops code");
+  return (LogMinObjAlignmentInBytes <= 3);
+}
+
 // Is it better to copy float constants, or load them directly from
 // memory?  Intel can load a float constant from a direct address,
 // requiring no extra registers.  Most RISCs will have to materialize
@@ -2635,14 +2623,14 @@
     MacroAssembler _masm(&cbuf);
     // RBP is preserved across all calls, even compiled calls.
     // Use it to preserve RSP in places where the callee might change the SP.
-    __ movptr(rbp, rsp);
+    __ movptr(rbp_mh_SP_save, rsp);
     debug_only(int off1 = cbuf.code_size());
     assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
   %}
 
   enc_class restore_SP %{
     MacroAssembler _masm(&cbuf);
-    __ movptr(rsp, rbp);
+    __ movptr(rsp, rbp_mh_SP_save);
   %}
 
   enc_class Java_Static_Call(method meth)
@@ -5127,7 +5115,7 @@
 // Note: x86 architecture doesn't support "scale * index + offset" without a base
 // we can't free r12 even with Universe::narrow_oop_base() == NULL.
 operand indCompressedOopOffset(rRegN reg, immL32 off) %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
+  predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
 
@@ -7742,10 +7730,11 @@
   ins_pipe(ialu_reg_long);
 %}
 
-instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
+instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
   predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
             n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
   match(Set dst (DecodeN src));
+  effect(KILL cr);
   format %{ "decode_heap_oop_not_null $dst,$src" %}
   ins_encode %{
     Register s = $src$$Register;
@@ -12604,7 +12593,7 @@
 // Call Java Static Instruction (method handle version)
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
+instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
   match(CallStaticJava);
   predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
   effect(USE meth);
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -232,12 +232,11 @@
   GEN_OFFS(CodeBlob, _header_size);
   GEN_OFFS(CodeBlob, _instructions_offset);
   GEN_OFFS(CodeBlob, _data_offset);
-  GEN_OFFS(CodeBlob, _oops_offset);
-  GEN_OFFS(CodeBlob, _oops_length);
   GEN_OFFS(CodeBlob, _frame_size);
   printf("\n");
 
   GEN_OFFS(nmethod, _method);
+  GEN_OFFS(nmethod, _oops_offset);
   GEN_OFFS(nmethod, _scopes_data_offset);
   GEN_OFFS(nmethod, _scopes_pcs_offset);
   GEN_OFFS(nmethod, _handler_table_offset);
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -130,7 +130,7 @@
   int32_t  scopes_data_beg;     /* _scopes_data_offset */
   int32_t  scopes_data_end;
   int32_t  oops_beg;            /* _oops_offset */
-  int32_t  oops_len;            /* _oops_length */
+  int32_t  oops_end;
   int32_t  scopes_pcs_beg;      /* _scopes_pcs_offset */
   int32_t  scopes_pcs_end;
 
@@ -597,9 +597,9 @@
   CHECK_FAIL(err);
 
   /* Oops */
-  err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_offset, &N->oops_beg, SZ32);
+  err = ps_pread(J->P, nm + OFFSET_nmethod_oops_offset, &N->oops_beg, SZ32);
   CHECK_FAIL(err);
-  err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_length, &N->oops_len, SZ32);
+  err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->oops_end, SZ32);
   CHECK_FAIL(err);
 
   /* scopes_pcs */
@@ -624,8 +624,8 @@
       fprintf(stderr, "\t nmethod_info: orig_pc_offset: %#x \n",
                        N->orig_pc_offset);
 
-      fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_len: %#x\n",
-                       N->oops_beg, N->oops_len);
+      fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_end: %#x\n",
+                       N->oops_beg, N->oops_end);
 
       fprintf(stderr, "\t nmethod_info: scopes_data_beg: %#x, scopes_data_end: %#x\n",
                        N->scopes_data_beg, N->scopes_data_end);
@@ -959,8 +959,8 @@
     err = scope_desc_at(N, decode_offset, vf);
     CHECK_FAIL(err);
 
-    if (vf->methodIdx > N->oops_len) {
-      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
+    if (vf->methodIdx > ((N->oops_end - N->oops_beg) / POINTER_SIZE)) {
+      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops length) !\n");
       return -1;
     }
     err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -510,9 +510,9 @@
     copy_relocations_to(blob);
     copy_code_to(blob);
   }
-  void copy_oops_to(CodeBlob* blob) {
+  void copy_oops_to(nmethod* nm) {
     if (!oop_recorder()->is_unused()) {
-      oop_recorder()->copy_to(blob);
+      oop_recorder()->copy_to(nm);
     }
   }
 
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -26,9 +26,11 @@
 #include "incls/_c1_Canonicalizer.cpp.incl"
 
 
-static void do_print_value(Value* vp) {
-  (*vp)->print_line();
-}
+class PrintValueVisitor: public ValueVisitor {
+  void visit(Value* vp) {
+    (*vp)->print_line();
+  }
+};
 
 void Canonicalizer::set_canonical(Value x) {
   assert(x != NULL, "value must exist");
@@ -37,10 +39,11 @@
   // in the instructions).
   if (canonical() != x) {
     if (PrintCanonicalization) {
-      canonical()->input_values_do(do_print_value);
+      PrintValueVisitor do_print_value;
+      canonical()->input_values_do(&do_print_value);
       canonical()->print_line();
       tty->print_cr("canonicalized to:");
-      x->input_values_do(do_print_value);
+      x->input_values_do(&do_print_value);
       x->print_line();
       tty->cr();
     }
@@ -202,7 +205,7 @@
     // limit this optimization to current block
     if (value != NULL && in_current_block(conv)) {
       set_canonical(new StoreField(x->obj(), x->offset(), x->field(), value, x->is_static(),
-                                   x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
+                                       x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
       return;
     }
   }
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -66,9 +66,6 @@
   }
 };
 
-Arena* Compilation::_arena = NULL;
-Compilation* Compilation::_compilation = NULL;
-
 // Implementation of Compilation
 
 
@@ -238,9 +235,23 @@
 }
 
 
+void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
+  // Preinitialize the consts section to some large size:
+  int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
+  char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
+  code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
+                                        locs_buffer_size / sizeof(relocInfo));
+  code->initialize_consts_size(Compilation::desired_max_constant_size());
+  // Call stubs + deopt/exception handler
+  code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
+                              LIR_Assembler::exception_handler_size +
+                              LIR_Assembler::deopt_handler_size);
+}
+
+
 int Compilation::emit_code_body() {
   // emit code
-  Runtime1::setup_code_buffer(code(), allocator()->num_calls());
+  setup_code_buffer(code(), allocator()->num_calls());
   code()->initialize_oop_recorder(env()->oop_recorder());
 
   _masm = new C1_MacroAssembler(code());
@@ -422,7 +433,8 @@
 }
 
 
-Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method, int osr_bci)
+Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
+                         int osr_bci, BufferBlob* buffer_blob)
 : _compiler(compiler)
 , _env(env)
 , _method(method)
@@ -437,8 +449,10 @@
 , _bailout_msg(NULL)
 , _exception_info_list(NULL)
 , _allocator(NULL)
-, _code(Runtime1::get_buffer_blob()->instructions_begin(),
-        Runtime1::get_buffer_blob()->instructions_size())
+, _next_id(0)
+, _next_block_id(0)
+, _code(buffer_blob->instructions_begin(),
+        buffer_blob->instructions_size())
 , _current_instruction(NULL)
 #ifndef PRODUCT
 , _last_instruction_printed(NULL)
@@ -446,17 +460,15 @@
 {
   PhaseTraceTime timeit(_t_compile);
 
-  assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
   _arena = Thread::current()->resource_area();
-  _compilation = this;
+  _env->set_compiler_data(this);
   _exception_info_list = new ExceptionInfoList();
   _implicit_exception_table.set_size(0);
   compile_method();
 }
 
 Compilation::~Compilation() {
-  _arena = NULL;
-  _compilation = NULL;
+  _env->set_compiler_data(NULL);
 }
 
 
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -54,14 +54,10 @@
 class Compilation: public StackObj {
   friend class CompilationResourceObj;
  private:
-
-  static Arena* _arena;
-  static Arena* arena() { return _arena; }
-
-  static Compilation* _compilation;
-
- private:
   // compilation specifics
+  Arena* _arena;
+  int _next_id;
+  int _next_block_id;
   AbstractCompiler*  _compiler;
   ciEnv*             _env;
   ciMethod*          _method;
@@ -108,10 +104,14 @@
 
  public:
   // creation
-  Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method, int osr_bci);
+  Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
+              int osr_bci, BufferBlob* buffer_blob);
   ~Compilation();
 
-  static Compilation* current_compilation()      { return _compilation; }
+
+  static Compilation* current() {
+    return (Compilation*) ciEnv::current()->compiler_data();
+  }
 
   // accessors
   ciEnv* env() const                             { return _env; }
@@ -128,6 +128,15 @@
   CodeBuffer* code()                             { return &_code; }
   C1_MacroAssembler* masm() const                { return _masm; }
   CodeOffsets* offsets()                         { return &_offsets; }
+  Arena* arena()                                 { return _arena; }
+
+  // Instruction ids
+  int get_next_id()                              { return _next_id++; }
+  int number_of_instructions() const             { return _next_id; }
+
+  // BlockBegin ids
+  int get_next_block_id()                        { return _next_block_id++; }
+  int number_of_blocks() const                   { return _next_block_id; }
 
   // setters
   void set_has_exception_handlers(bool f)        { _has_exception_handlers = f; }
@@ -158,6 +167,15 @@
   bool bailed_out() const                        { return _bailout_msg != NULL; }
   const char* bailout_msg() const                { return _bailout_msg; }
 
+  static int desired_max_code_buffer_size() {
+    return (int) NMethodSizeLimit;  // default 256K or 512K
+  }
+  static int desired_max_constant_size() {
+    return (int) NMethodSizeLimit / 10;  // about 25K
+  }
+
+  static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
+
   // timers
   static void print_timers();
 
@@ -203,7 +221,10 @@
 // Base class for objects allocated by the compiler in the compilation arena
 class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  void* operator new(size_t size) { return Compilation::arena()->Amalloc(size); }
+  void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
+  void* operator new(size_t size, Arena* arena) {
+    return arena->Amalloc(size);
+  }
   void  operator delete(void* p) {} // nothing to do
 };
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -27,9 +27,6 @@
 
 volatile int Compiler::_runtimes = uninitialized;
 
-volatile bool Compiler::_compiling = false;
-
-
 Compiler::Compiler() {
 }
 
@@ -39,47 +36,62 @@
 }
 
 
+void Compiler::initialize_all() {
+  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+  Arena* arena = new Arena();
+  Runtime1::initialize(buffer_blob);
+  FrameMap::initialize();
+  // initialize data structures
+  ValueType::initialize(arena);
+  // Instruction::initialize();
+  // BlockBegin::initialize();
+  GraphBuilder::initialize();
+  // note: to use more than one instance of LinearScan at a time this function call has to
+  //       be moved somewhere outside of this constructor:
+  Interval::initialize(arena);
+}
+
+
 void Compiler::initialize() {
   if (_runtimes != initialized) {
-    initialize_runtimes( Runtime1::initialize, &_runtimes);
+    initialize_runtimes( initialize_all, &_runtimes);
   }
   mark_initialized();
 }
 
 
+BufferBlob* Compiler::build_buffer_blob() {
+  // setup CodeBuffer.  Preallocate a BufferBlob of size
+  // NMethodSizeLimit plus some extra space for constants.
+  int code_buffer_size = Compilation::desired_max_code_buffer_size() +
+    Compilation::desired_max_constant_size();
+  BufferBlob* blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
+                                        code_buffer_size);
+  guarantee(blob != NULL, "must create initial code buffer");
+  return blob;
+}
+
+
 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
+  // Allocate buffer blob once at startup since allocation for each
+  // compilation seems to be too expensive (at least on Intel win32).
+  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+  if (buffer_blob == NULL) {
+    buffer_blob = build_buffer_blob();
+    CompilerThread::current()->set_buffer_blob(buffer_blob);
+  }
 
   if (!is_initialized()) {
     initialize();
   }
   // invoke compilation
-#ifdef TIERED
-  // We are thread in native here...
-  CompilerThread* thread = CompilerThread::current();
-  {
-    ThreadInVMfromNative tv(thread);
-    MutexLocker only_one (C1_lock, thread);
-    while ( _compiling) {
-      C1_lock->wait();
-    }
-    _compiling = true;
-  }
-#endif // TIERED
   {
     // We are nested here because we need for the destructor
     // of Compilation to occur before we release the any
     // competing compiler thread
     ResourceMark rm;
-    Compilation c(this, env, method, entry_bci);
+    Compilation c(this, env, method, entry_bci, buffer_blob);
   }
-#ifdef TIERED
-  {
-    ThreadInVMfromNative tv(thread);
-    MutexLocker only_one (C1_lock, thread);
-    _compiling = false;
-    C1_lock->notify();
-  }
-#endif // TIERED
 }
 
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -31,10 +31,6 @@
  // Tracks whether runtime has been initialized
  static volatile int _runtimes;
 
- // In tiered it is possible for multiple threads to want to do compilation
- // only one can enter c1 at a time
- static volatile bool _compiling;
-
  public:
   // Creation
   Compiler();
@@ -47,6 +43,7 @@
   virtual bool is_c1() { return true; };
 #endif // TIERED
 
+  BufferBlob* build_buffer_blob();
 
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
@@ -58,6 +55,7 @@
 
   // Initialization
   virtual void initialize();
+  static  void initialize_all();
 
   // Compilation entry point for methods
   virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
--- a/hotspot/src/share/vm/c1/c1_FrameMap.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -153,7 +153,7 @@
 
 
 FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
-  if (!_init_done) init();
+  assert(_init_done, "should already be completed");
 
   _framesize = -1;
   _num_spills = -1;
--- a/hotspot/src/share/vm/c1/c1_FrameMap.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -150,6 +150,9 @@
   // Opr representing the stack_pointer on this platform
   static LIR_Opr stack_pointer();
 
+  // JSR 292
+  static LIR_Opr method_handle_invoke_SP_save_opr();
+
   static BasicTypeArray*     signature_type_array_for(const ciMethod* method);
   static BasicTypeArray*     signature_type_array_for(const char * signature);
 
@@ -232,7 +235,7 @@
     return _caller_save_fpu_regs[i];
   }
 
-  static void init();
+  static void initialize();
 };
 
 //               CallingConvention
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2438,13 +2438,13 @@
       case Bytecodes::_invokestatic   : // fall through
       case Bytecodes::_invokedynamic  : // fall through
       case Bytecodes::_invokeinterface: invoke(code); break;
-      case Bytecodes::_new            : new_instance(s.get_index_big()); break;
+      case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
       case Bytecodes::_newarray       : new_type_array(); break;
       case Bytecodes::_anewarray      : new_object_array(); break;
       case Bytecodes::_arraylength    : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
       case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
-      case Bytecodes::_checkcast      : check_cast(s.get_index_big()); break;
-      case Bytecodes::_instanceof     : instance_of(s.get_index_big()); break;
+      case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
+      case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
       // Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
       case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
       case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
@@ -2530,16 +2530,10 @@
 }
 
 
-bool GraphBuilder::_is_initialized = false;
 bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
 bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes];
 
 void GraphBuilder::initialize() {
-  // make sure initialization happens only once (need a
-  // lock here, if we allow the compiler to be re-entrant)
-  if (is_initialized()) return;
-  _is_initialized = true;
-
   // the following bytecodes are assumed to potentially
   // throw exceptions in compiled code - note that e.g.
   // monitorexit & the return bytecodes do not throw
@@ -2855,7 +2849,6 @@
   BlockList* bci2block = blm.bci2block();
   BlockBegin* start_block = bci2block->at(0);
 
-  assert(is_initialized(), "GraphBuilder must have been initialized");
   push_root_scope(scope, bci2block, start_block);
 
   // setup state for std entry
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -162,7 +162,6 @@
   };
 
   // for all GraphBuilders
-  static bool       _is_initialized;             // true if trap tables were initialized, false otherwise
   static bool       _can_trap[Bytecodes::number_of_java_codes];
   static bool       _is_async[Bytecodes::number_of_java_codes];
 
@@ -268,7 +267,6 @@
   Instruction* append_split(StateSplit* instr);
 
   // other helpers
-  static bool is_initialized()                   { return _is_initialized; }
   static bool is_async(Bytecodes::Code code) {
     assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode");
     return _is_async[code];
--- a/hotspot/src/share/vm/c1/c1_IR.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_IR.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -230,7 +230,8 @@
   , _stack(stack)
   , _exception_handlers(exception_handlers)
   , _next(NULL)
-  , _id(-1) {
+  , _id(-1)
+  , _is_method_handle_invoke(false) {
   assert(_stack != NULL, "must be non null");
   assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
 }
@@ -241,7 +242,8 @@
   , _exception_handlers(NULL)
   , _bci(info->_bci)
   , _scope_debug_info(NULL)
-  , _oop_map(NULL) {
+  , _oop_map(NULL)
+  , _is_method_handle_invoke(info->_is_method_handle_invoke) {
   if (lock_stack_only) {
     if (info->_stack != NULL) {
       _stack = info->_stack->copy_locks();
@@ -259,10 +261,10 @@
 }
 
 
-void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
+void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
   // record the safepoint before recording the debug info for enclosing scopes
   recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
-  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
+  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
   recorder->end_safepoint(pc_offset);
 }
 
@@ -285,11 +287,6 @@
 IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
     _locals_size(in_WordSize(-1))
   , _num_loops(0) {
-  // initialize data structures
-  ValueType::initialize();
-  Instruction::initialize();
-  BlockBegin::initialize();
-  GraphBuilder::initialize();
   // setup IR fields
   _compilation = compilation;
   _top_scope   = new IRScope(compilation, NULL, -1, method, osr_bci, true);
@@ -379,15 +376,15 @@
 }
 
 
-class UseCountComputer: public AllStatic {
+class UseCountComputer: public ValueVisitor, BlockClosure {
  private:
-  static void update_use_count(Value* n) {
+  void visit(Value* n) {
     // Local instructions and Phis for expression stack values at the
     // start of basic blocks are not added to the instruction list
     if ((*n)->bci() == -99 && (*n)->as_Local() == NULL &&
         (*n)->as_Phi() == NULL) {
       assert(false, "a node was not appended to the graph");
-      Compilation::current_compilation()->bailout("a node was not appended to the graph");
+      Compilation::current()->bailout("a node was not appended to the graph");
     }
     // use n's input if not visited before
     if (!(*n)->is_pinned() && !(*n)->has_uses()) {
@@ -400,31 +397,31 @@
     (*n)->_use_count++;
   }
 
-  static Values* worklist;
-  static int depth;
+  Values* worklist;
+  int depth;
   enum {
     max_recurse_depth = 20
   };
 
-  static void uses_do(Value* n) {
+  void uses_do(Value* n) {
     depth++;
     if (depth > max_recurse_depth) {
       // don't allow the traversal to recurse too deeply
       worklist->push(*n);
     } else {
-      (*n)->input_values_do(update_use_count);
+      (*n)->input_values_do(this);
       // special handling for some instructions
       if ((*n)->as_BlockEnd() != NULL) {
         // note on BlockEnd:
         //   must 'use' the stack only if the method doesn't
         //   terminate, however, in those cases stack is empty
-        (*n)->state_values_do(update_use_count);
+        (*n)->state_values_do(this);
       }
     }
     depth--;
   }
 
-  static void basic_compute_use_count(BlockBegin* b) {
+  void block_do(BlockBegin* b) {
     depth = 0;
     // process all pinned nodes as the roots of expression trees
     for (Instruction* n = b; n != NULL; n = n->next()) {
@@ -447,18 +444,19 @@
     assert(depth == 0, "should have counted back down");
   }
 
+  UseCountComputer() {
+    worklist = new Values();
+    depth = 0;
+  }
+
  public:
   static void compute(BlockList* blocks) {
-    worklist = new Values();
-    blocks->blocks_do(basic_compute_use_count);
-    worklist = NULL;
+    UseCountComputer ucc;
+    blocks->iterate_backward(&ucc);
   }
 };
 
 
-Values* UseCountComputer::worklist = NULL;
-int UseCountComputer::depth = 0;
-
 // helper macro for short definition of trace-output inside code
 #ifndef PRODUCT
   #define TRACE_LINEAR_SCAN(level, code)       \
@@ -1300,7 +1298,7 @@
 
 #endif // PRODUCT
 
-void SubstitutionResolver::substitute(Value* v) {
+void SubstitutionResolver::visit(Value* v) {
   Value v0 = *v;
   if (v0) {
     Value vs = v0->subst();
@@ -1311,20 +1309,22 @@
 }
 
 #ifdef ASSERT
-void check_substitute(Value* v) {
-  Value v0 = *v;
-  if (v0) {
-    Value vs = v0->subst();
-    assert(vs == v0, "missed substitution");
+class SubstitutionChecker: public ValueVisitor {
+  void visit(Value* v) {
+    Value v0 = *v;
+    if (v0) {
+      Value vs = v0->subst();
+      assert(vs == v0, "missed substitution");
+    }
   }
-}
+};
 #endif
 
 
 void SubstitutionResolver::block_do(BlockBegin* block) {
   Instruction* last = NULL;
   for (Instruction* n = block; n != NULL;) {
-    n->values_do(substitute);
+    n->values_do(this);
     // need to remove this instruction from the instruction stream
     if (n->subst() != n) {
       assert(last != NULL, "must have last");
@@ -1336,8 +1336,9 @@
   }
 
 #ifdef ASSERT
-  if (block->state()) block->state()->values_do(check_substitute);
-  block->block_values_do(check_substitute);
-  if (block->end() && block->end()->state()) block->end()->state()->values_do(check_substitute);
+  SubstitutionChecker check_substitute;
+  if (block->state()) block->state()->values_do(&check_substitute);
+  block->block_values_do(&check_substitute);
+  if (block->end() && block->end()->state()) block->end()->state()->values_do(&check_substitute);
 #endif
 }
--- a/hotspot/src/share/vm/c1/c1_IR.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -269,6 +269,7 @@
   int               _bci;
   CodeEmitInfo*     _next;
   int               _id;
+  bool              _is_method_handle_invoke;    // true if the associated call site is a MethodHandle call site.
 
   FrameMap*     frame_map() const                { return scope()->compilation()->frame_map(); }
   Compilation*  compilation() const              { return scope()->compilation(); }
@@ -287,7 +288,8 @@
     , _stack(NULL)
     , _exception_handlers(NULL)
     , _next(NULL)
-    , _id(-1) {
+    , _id(-1)
+    , _is_method_handle_invoke(false) {
   }
 
   // make a copy
@@ -302,13 +304,16 @@
   int bci() const                                { return _bci; }
 
   void add_register_oop(LIR_Opr opr);
-  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
+  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
 
   CodeEmitInfo* next() const        { return _next; }
   void set_next(CodeEmitInfo* next) { _next = next; }
 
   int id() const      { return _id; }
   void set_id(int id) { _id = id; }
+
+  bool     is_method_handle_invoke() const { return _is_method_handle_invoke;     }
+  void set_is_method_handle_invoke(bool x) {        _is_method_handle_invoke = x; }
 };
 
 
@@ -366,8 +371,8 @@
 // instructions from the instruction list.
 //
 
-class SubstitutionResolver: public BlockClosure {
-  static void substitute(Value* v);
+class SubstitutionResolver: public BlockClosure, ValueVisitor {
+  virtual void visit(Value* v);
 
  public:
   SubstitutionResolver(IR* hir) {
--- a/hotspot/src/share/vm/c1/c1_Instruction.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -29,8 +29,6 @@
 // Implementation of Instruction
 
 
-int Instruction::_next_id = 0;
-
 #ifdef ASSERT
 void Instruction::create_hi_word() {
   assert(type()->is_double_word() && _hi_word == NULL, "only double word has high word");
@@ -193,22 +191,22 @@
 }
 
 
-void ArithmeticOp::other_values_do(void f(Value*)) {
+void ArithmeticOp::other_values_do(ValueVisitor* f) {
   if (lock_stack() != NULL) lock_stack()->values_do(f);
 }
 
-void NullCheck::other_values_do(void f(Value*)) {
+void NullCheck::other_values_do(ValueVisitor* f) {
   lock_stack()->values_do(f);
 }
 
-void AccessArray::other_values_do(void f(Value*)) {
+void AccessArray::other_values_do(ValueVisitor* f) {
   if (lock_stack() != NULL) lock_stack()->values_do(f);
 }
 
 
 // Implementation of AccessField
 
-void AccessField::other_values_do(void f(Value*)) {
+void AccessField::other_values_do(ValueVisitor* f) {
   if (state_before() != NULL) state_before()->values_do(f);
   if (lock_stack() != NULL) lock_stack()->values_do(f);
 }
@@ -270,7 +268,7 @@
 
 // Implementation of CompareOp
 
-void CompareOp::other_values_do(void f(Value*)) {
+void CompareOp::other_values_do(ValueVisitor* f) {
   if (state_before() != NULL) state_before()->values_do(f);
 }
 
@@ -302,12 +300,12 @@
 }
 
 
-void StateSplit::state_values_do(void f(Value*)) {
+void StateSplit::state_values_do(ValueVisitor* f) {
   if (state() != NULL) state()->values_do(f);
 }
 
 
-void BlockBegin::state_values_do(void f(Value*)) {
+void BlockBegin::state_values_do(ValueVisitor* f) {
   StateSplit::state_values_do(f);
 
   if (is_set(BlockBegin::exception_entry_flag)) {
@@ -318,13 +316,13 @@
 }
 
 
-void MonitorEnter::state_values_do(void f(Value*)) {
+void MonitorEnter::state_values_do(ValueVisitor* f) {
   StateSplit::state_values_do(f);
   _lock_stack_before->values_do(f);
 }
 
 
-void Intrinsic::state_values_do(void f(Value*)) {
+void Intrinsic::state_values_do(ValueVisitor* f) {
   StateSplit::state_values_do(f);
   if (lock_stack() != NULL) lock_stack()->values_do(f);
 }
@@ -349,8 +347,9 @@
 
   assert(args != NULL, "args must exist");
 #ifdef ASSERT
-  values_do(assert_value);
-#endif // ASSERT
+  AssertValues assert_value;
+  values_do(&assert_value);
+#endif
 
   // provide an initial guess of signature size.
   _signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
@@ -368,7 +367,7 @@
 }
 
 
-void Invoke::state_values_do(void f(Value*)) {
+void Invoke::state_values_do(ValueVisitor* f) {
   StateSplit::state_values_do(f);
   if (state_before() != NULL) state_before()->values_do(f);
   if (state()        != NULL) state()->values_do(f);
@@ -500,30 +499,27 @@
 }
 
 
-void Constant::other_values_do(void f(Value*)) {
+void Constant::other_values_do(ValueVisitor* f) {
   if (state() != NULL) state()->values_do(f);
 }
 
 
 // Implementation of NewArray
 
-void NewArray::other_values_do(void f(Value*)) {
+void NewArray::other_values_do(ValueVisitor* f) {
   if (state_before() != NULL) state_before()->values_do(f);
 }
 
 
 // Implementation of TypeCheck
 
-void TypeCheck::other_values_do(void f(Value*)) {
+void TypeCheck::other_values_do(ValueVisitor* f) {
   if (state_before() != NULL) state_before()->values_do(f);
 }
 
 
 // Implementation of BlockBegin
 
-int BlockBegin::_next_block_id = 0;
-
-
 void BlockBegin::set_end(BlockEnd* end) {
   assert(end != NULL, "should not reset block end to NULL");
   BlockEnd* old_end = _end;
@@ -738,7 +734,7 @@
 }
 
 
-void BlockBegin::block_values_do(void f(Value*)) {
+void BlockBegin::block_values_do(ValueVisitor* f) {
   for (Instruction* n = this; n != NULL; n = n->next()) n->values_do(f);
 }
 
@@ -930,7 +926,7 @@
 }
 
 
-void BlockList::values_do(void f(Value*)) {
+void BlockList::values_do(ValueVisitor* f) {
   for (int i = length() - 1; i >= 0; i--) at(i)->block_values_do(f);
 }
 
@@ -973,7 +969,7 @@
 }
 
 
-void BlockEnd::other_values_do(void f(Value*)) {
+void BlockEnd::other_values_do(ValueVisitor* f) {
   if (state_before() != NULL) state_before()->values_do(f);
 }
 
@@ -1012,6 +1008,6 @@
 
 // Implementation of Throw
 
-void Throw::state_values_do(void f(Value*)) {
+void Throw::state_values_do(ValueVisitor* f) {
   BlockEnd::state_values_do(f);
 }
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -116,6 +116,13 @@
 };
 
 
+// A simple closure class for visiting the values of an Instruction
+class ValueVisitor: public StackObj {
+ public:
+  virtual void visit(Value* v) = 0;
+};
+
+
 // Some array and list classes
 define_array(BlockBeginArray, BlockBegin*)
 define_stack(_BlockList, BlockBeginArray)
@@ -129,7 +136,7 @@
   void iterate_forward(BlockClosure* closure);
   void iterate_backward(BlockClosure* closure);
   void blocks_do(void f(BlockBegin*));
-  void values_do(void f(Value*));
+  void values_do(ValueVisitor* f);
   void print(bool cfg_only = false, bool live_only = false) PRODUCT_RETURN;
 };
 
@@ -264,8 +271,6 @@
 
 class Instruction: public CompilationResourceObj {
  private:
-  static int   _next_id;                         // the node counter
-
   int          _id;                              // the unique instruction id
   int          _bci;                             // the instruction bci
   int          _use_count;                       // the number of instructions refering to this value (w/o prev/next); only roots can have use count = 0 or > 1
@@ -283,6 +288,7 @@
 #endif
 
   friend class UseCountComputer;
+  friend class BlockBegin;
 
  protected:
   void set_bci(int bci)                          { assert(bci == SynchronizationEntryBCI || bci >= 0, "illegal bci"); _bci = bci; }
@@ -292,6 +298,13 @@
   }
 
  public:
+  void* operator new(size_t size) {
+    Compilation* c = Compilation::current();
+    void* res = c->arena()->Amalloc(size);
+    ((Instruction*)res)->_id = c->get_next_id();
+    return res;
+  }
+
   enum InstructionFlag {
     NeedsNullCheckFlag = 0,
     CanTrapFlag,
@@ -338,13 +351,13 @@
   static Condition negate(Condition cond);
 
   // initialization
-  static void initialize()                       { _next_id = 0; }
-  static int number_of_instructions()            { return _next_id; }
+  static int number_of_instructions() {
+    return Compilation::current()->number_of_instructions();
+  }
 
   // creation
   Instruction(ValueType* type, bool type_is_constant = false, bool create_hi = true)
-  : _id(_next_id++)
-  , _bci(-99)
+  : _bci(-99)
   , _use_count(0)
   , _pin_state(0)
   , _type(type)
@@ -479,10 +492,10 @@
 
   virtual bool can_trap() const                  { return false; }
 
-  virtual void input_values_do(void f(Value*))   = 0;
-  virtual void state_values_do(void f(Value*))   { /* usually no state - override on demand */ }
-  virtual void other_values_do(void f(Value*))   { /* usually no other - override on demand */ }
-          void       values_do(void f(Value*))   { input_values_do(f); state_values_do(f); other_values_do(f); }
+  virtual void input_values_do(ValueVisitor* f)   = 0;
+  virtual void state_values_do(ValueVisitor* f)   { /* usually no state - override on demand */ }
+  virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
+          void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
 
   virtual ciType* exact_type() const             { return NULL; }
   virtual ciType* declared_type() const          { return NULL; }
@@ -517,9 +530,12 @@
 
 // Debugging support
 
+
 #ifdef ASSERT
-  static void assert_value(Value* x)             { assert((*x) != NULL, "value must exist"); }
-  #define ASSERT_VALUES                          values_do(assert_value);
+class AssertValues: public ValueVisitor {
+  void visit(Value* x)             { assert((*x) != NULL, "value must exist"); }
+};
+  #define ASSERT_VALUES                          { AssertValues assert_value; values_do(&assert_value); }
 #else
   #define ASSERT_VALUES
 #endif // ASSERT
@@ -555,7 +571,7 @@
   void make_illegal()                            { set_type(illegalType); }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { ShouldNotReachHere(); }
+  virtual void input_values_do(ValueVisitor* f)   { ShouldNotReachHere(); }
 };
 
 
@@ -615,7 +631,7 @@
   }
 
   // generic
-  virtual void input_values_do(void f(Value*)) {
+  virtual void input_values_do(ValueVisitor* f) {
   }
 };
 
@@ -635,7 +651,7 @@
   int java_index() const                         { return _java_index; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { /* no values */ }
+  virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
 };
 
 
@@ -663,8 +679,8 @@
 
   // generic
   virtual bool can_trap() const                  { return state() != NULL; }
-  virtual void input_values_do(void f(Value*))   { /* no values */ }
-  virtual void other_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
+  virtual void other_values_do(ValueVisitor* f);
 
   virtual intx hash() const;
   virtual bool is_equal(Value v) const;
@@ -734,8 +750,8 @@
 
   // generic
   virtual bool can_trap() const                  { return needs_null_check() || needs_patching(); }
-  virtual void input_values_do(void f(Value*))   { f(&_obj); }
-  virtual void other_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_obj); }
+  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -776,7 +792,7 @@
   bool needs_write_barrier() const               { return check_flag(NeedsWriteBarrierFlag); }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { AccessField::input_values_do(f); f(&_value); }
+  virtual void input_values_do(ValueVisitor* f)   { AccessField::input_values_do(f); f->visit(&_value); }
 };
 
 
@@ -804,8 +820,8 @@
 
   // generic
   virtual bool can_trap() const                  { return needs_null_check(); }
-  virtual void input_values_do(void f(Value*))   { f(&_array); }
-  virtual void other_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_array); }
+  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -857,7 +873,7 @@
   bool compute_needs_range_check();
 
   // generic
-  virtual void input_values_do(void f(Value*))   { AccessArray::input_values_do(f); f(&_index); if (_length != NULL) f(&_length); }
+  virtual void input_values_do(ValueVisitor* f)   { AccessArray::input_values_do(f); f->visit(&_index); if (_length != NULL) f->visit(&_length); }
 };
 
 
@@ -909,7 +925,7 @@
   bool needs_store_check() const                 { return check_flag(NeedsStoreCheckFlag); }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { AccessIndexed::input_values_do(f); f(&_value); }
+  virtual void input_values_do(ValueVisitor* f)   { AccessIndexed::input_values_do(f); f->visit(&_value); }
 };
 
 
@@ -927,7 +943,7 @@
   Value x() const                                { return _x; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { f(&_x); }
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_x); }
 };
 
 
@@ -956,7 +972,7 @@
 
   // generic
   virtual bool is_commutative() const            { return false; }
-  virtual void input_values_do(void f(Value*))   { f(&_x); f(&_y); }
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_x); f->visit(&_y); }
 };
 
 
@@ -982,7 +998,7 @@
   // generic
   virtual bool is_commutative() const;
   virtual bool can_trap() const;
-  virtual void other_values_do(void f(Value*));
+  virtual void other_values_do(ValueVisitor* f);
   HASHING3(Op2, true, op(), x()->subst(), y()->subst())
 };
 
@@ -1023,7 +1039,7 @@
 
   // generic
   HASHING3(Op2, true, op(), x()->subst(), y()->subst())
-  virtual void other_values_do(void f(Value*));
+  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -1051,7 +1067,7 @@
   Value fval() const                             { return _fval; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { Op2::input_values_do(f); f(&_tval); f(&_fval); }
+  virtual void input_values_do(ValueVisitor* f)   { Op2::input_values_do(f); f->visit(&_tval); f->visit(&_fval); }
 };
 
 
@@ -1071,7 +1087,7 @@
   Value value() const                            { return _value; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { f(&_value); }
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_value); }
   HASHING2(Convert, true, op(), value()->subst())
 };
 
@@ -1100,8 +1116,8 @@
 
   // generic
   virtual bool can_trap() const                  { return check_flag(CanTrapFlag); /* null-check elimination sets to false */ }
-  virtual void input_values_do(void f(Value*))   { f(&_obj); }
-  virtual void other_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_obj); }
+  virtual void other_values_do(ValueVisitor* f);
   HASHING1(NullCheck, true, obj()->subst())
 };
 
@@ -1127,8 +1143,8 @@
   void set_state(ValueStack* state)              { _state = state; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { /* no values */ }
-  virtual void state_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
+  virtual void state_values_do(ValueVisitor* f);
 };
 
 
@@ -1169,12 +1185,12 @@
 
   // generic
   virtual bool can_trap() const                  { return true; }
-  virtual void input_values_do(void f(Value*)) {
+  virtual void input_values_do(ValueVisitor* f) {
     StateSplit::input_values_do(f);
-    if (has_receiver()) f(&_recv);
-    for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
+    if (has_receiver()) f->visit(&_recv);
+    for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
   }
-  virtual void state_values_do(void f(Value*));
+  virtual void state_values_do(ValueVisitor *f);
 };
 
 
@@ -1212,8 +1228,8 @@
 
   // generic
   virtual bool can_trap() const                  { return true; }
-  virtual void input_values_do(void f(Value*))   { StateSplit::input_values_do(f); f(&_length); }
-  virtual void other_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_length); }
+  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -1262,7 +1278,7 @@
   int rank() const                               { return dims()->length(); }
 
   // generic
-  virtual void input_values_do(void f(Value*)) {
+  virtual void input_values_do(ValueVisitor* f) {
     // NOTE: we do not call NewArray::input_values_do since "length"
     // is meaningless for a multi-dimensional array; passing the
     // zeroth element down to NewArray as its length is a bad idea
@@ -1270,7 +1286,7 @@
     // get updated, and the value must not be traversed twice. Was bug
     // - kbr 4/10/2001
     StateSplit::input_values_do(f);
-    for (int i = 0; i < _dims->length(); i++) f(_dims->adr_at(i));
+    for (int i = 0; i < _dims->length(); i++) f->visit(_dims->adr_at(i));
   }
 };
 
@@ -1300,8 +1316,8 @@
 
   // generic
   virtual bool can_trap() const                  { return true; }
-  virtual void input_values_do(void f(Value*))   { StateSplit::input_values_do(f); f(&_obj); }
-  virtual void other_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_obj); }
+  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -1366,7 +1382,7 @@
   int monitor_no() const                         { return _monitor_no; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { StateSplit::input_values_do(f); f(&_obj); }
+  virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_obj); }
 };
 
 
@@ -1385,7 +1401,7 @@
 
   // accessors
   ValueStack* lock_stack_before() const          { return _lock_stack_before; }
-  virtual void state_values_do(void f(Value*));
+  virtual void state_values_do(ValueVisitor* f);
 
   // generic
   virtual bool can_trap() const                  { return true; }
@@ -1454,11 +1470,11 @@
 
   // generic
   virtual bool can_trap() const                  { return check_flag(CanTrapFlag); }
-  virtual void input_values_do(void f(Value*)) {
+  virtual void input_values_do(ValueVisitor* f) {
     StateSplit::input_values_do(f);
-    for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
+    for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
   }
-  virtual void state_values_do(void f(Value*));
+  virtual void state_values_do(ValueVisitor* f);
 
 };
 
@@ -1467,8 +1483,6 @@
 
 LEAF(BlockBegin, StateSplit)
  private:
-  static int _next_block_id;                     // the block counter
-
   int        _block_id;                          // the unique block id
   int        _depth_first_number;                // number of this block in a depth-first ordering
   int        _linear_scan_number;                // number of this block in linear-scan ordering
@@ -1510,14 +1524,22 @@
   friend class SuxAndWeightAdjuster;
 
  public:
+   void* operator new(size_t size) {
+    Compilation* c = Compilation::current();
+    void* res = c->arena()->Amalloc(size);
+    ((BlockBegin*)res)->_id = c->get_next_id();
+    ((BlockBegin*)res)->_block_id = c->get_next_block_id();
+    return res;
+  }
+
   // initialization/counting
-  static void initialize()                       { _next_block_id = 0; }
-  static int  number_of_blocks()                 { return _next_block_id; }
+  static int  number_of_blocks() {
+    return Compilation::current()->number_of_blocks();
+  }
 
   // creation
   BlockBegin(int bci)
   : StateSplit(illegalType)
-  , _block_id(_next_block_id++)
   , _depth_first_number(-1)
   , _linear_scan_number(-1)
   , _loop_depth(0)
@@ -1592,7 +1614,7 @@
   void init_stores_to_locals(int locals_count)   { _stores_to_locals = BitMap(locals_count); _stores_to_locals.clear(); }
 
   // generic
-  virtual void state_values_do(void f(Value*));
+  virtual void state_values_do(ValueVisitor* f);
 
   // successors and predecessors
   int number_of_sux() const;
@@ -1646,7 +1668,7 @@
   void iterate_preorder   (BlockClosure* closure);
   void iterate_postorder  (BlockClosure* closure);
 
-  void block_values_do(void f(Value*));
+  void block_values_do(ValueVisitor* f);
 
   // loops
   void set_loop_index(int ix)                    { _loop_index = ix;        }
@@ -1698,7 +1720,7 @@
   void set_begin(BlockBegin* begin);
 
   // generic
-  virtual void other_values_do(void f(Value*));
+  virtual void other_values_do(ValueVisitor* f);
 
   // successors
   int number_of_sux() const                      { return _sux != NULL ? _sux->length() : 0; }
@@ -1787,7 +1809,7 @@
   void set_profiled_bci(int bci)                  { _profiled_bci = bci;       }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { BlockEnd::input_values_do(f); f(&_x); f(&_y); }
+  virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
 };
 
 
@@ -1841,7 +1863,7 @@
   }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { BlockEnd::input_values_do(f); f(&_obj); }
+  virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_obj); }
 };
 
 
@@ -1863,7 +1885,7 @@
   int length() const                             { return number_of_sux() - 1; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { BlockEnd::input_values_do(f); f(&_tag); }
+  virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_tag); }
 };
 
 
@@ -1916,9 +1938,9 @@
   bool has_result() const                        { return result() != NULL; }
 
   // generic
-  virtual void input_values_do(void f(Value*)) {
+  virtual void input_values_do(ValueVisitor* f) {
     BlockEnd::input_values_do(f);
-    if (has_result()) f(&_result);
+    if (has_result()) f->visit(&_result);
   }
 };
 
@@ -1938,8 +1960,8 @@
 
   // generic
   virtual bool can_trap() const                  { return true; }
-  virtual void input_values_do(void f(Value*))   { BlockEnd::input_values_do(f); f(&_exception); }
-  virtual void state_values_do(void f(Value*));
+  virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_exception); }
+  virtual void state_values_do(ValueVisitor* f);
 };
 
 
@@ -1971,7 +1993,7 @@
 #endif
 
   // generic
-  virtual void input_values_do(void f(Value*))   { }
+  virtual void input_values_do(ValueVisitor* f)   { }
 };
 
 
@@ -1984,7 +2006,7 @@
   }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { }
+  virtual void input_values_do(ValueVisitor* f)   { }
 };
 
 
@@ -2008,7 +2030,7 @@
   Value input() const                            { return _input; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { f(&_input); }
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_input); }
 };
 
 
@@ -2033,8 +2055,8 @@
   BasicType basic_type()                         { return _basic_type; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { }
-  virtual void other_values_do(void f(Value*))   { }
+  virtual void input_values_do(ValueVisitor* f)   { }
+  virtual void other_values_do(ValueVisitor* f)   { }
 };
 
 
@@ -2078,9 +2100,9 @@
   void set_log2_scale(int log2_scale)            { _log2_scale = log2_scale; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { UnsafeOp::input_values_do(f);
-                                                   f(&_base);
-                                                   if (has_index()) f(&_index); }
+  virtual void input_values_do(ValueVisitor* f)   { UnsafeOp::input_values_do(f);
+                                                   f->visit(&_base);
+                                                   if (has_index()) f->visit(&_index); }
 };
 
 
@@ -2128,8 +2150,8 @@
   Value value()                                  { return _value; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { UnsafeRawOp::input_values_do(f);
-                                                   f(&_value); }
+  virtual void input_values_do(ValueVisitor* f)   { UnsafeRawOp::input_values_do(f);
+                                                   f->visit(&_value); }
 };
 
 
@@ -2149,9 +2171,9 @@
   Value offset()                                 { return _offset; }
   bool  is_volatile()                            { return _is_volatile; }
   // generic
-  virtual void input_values_do(void f(Value*))   { UnsafeOp::input_values_do(f);
-                                                   f(&_object);
-                                                   f(&_offset); }
+  virtual void input_values_do(ValueVisitor* f)   { UnsafeOp::input_values_do(f);
+                                                   f->visit(&_object);
+                                                   f->visit(&_offset); }
 };
 
 
@@ -2180,8 +2202,8 @@
   Value value()                                  { return _value; }
 
   // generic
-  virtual void input_values_do(void f(Value*))   { UnsafeObjectOp::input_values_do(f);
-                                                   f(&_value); }
+  virtual void input_values_do(ValueVisitor* f)   { UnsafeObjectOp::input_values_do(f);
+                                                   f->visit(&_value); }
 };
 
 
@@ -2238,7 +2260,7 @@
   Value recv()            { return _recv; }
   ciKlass* known_holder() { return _known_holder; }
 
-  virtual void input_values_do(void f(Value*))   { if (_recv != NULL) f(&_recv); }
+  virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
 };
 
 
@@ -2266,7 +2288,7 @@
   int offset()     { return _offset; }
   int increment()  { return _increment; }
 
-  virtual void input_values_do(void f(Value*))   { f(&_mdo); }
+  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_mdo); }
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -715,7 +715,10 @@
       }
 
       if (opJavaCall->_info)                     do_info(opJavaCall->_info);
-      if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
+      if (opJavaCall->is_method_handle_invoke()) {
+        opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
+        do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
+      }
       do_call();
       if (opJavaCall->_result->is_valid())       do_output(opJavaCall->_result);
 
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -505,15 +505,22 @@
      , _type(type)
      , _disp(0) { verify(); }
 
-  LIR_Address(LIR_Opr base, int disp, BasicType type):
+  LIR_Address(LIR_Opr base, intx disp, BasicType type):
        _base(base)
      , _index(LIR_OprDesc::illegalOpr())
      , _scale(times_1)
      , _type(type)
      , _disp(disp) { verify(); }
 
+  LIR_Address(LIR_Opr base, BasicType type):
+       _base(base)
+     , _index(LIR_OprDesc::illegalOpr())
+     , _scale(times_1)
+     , _type(type)
+     , _disp(0) { verify(); }
+
 #ifdef X86
-  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
+  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
        _base(base)
      , _index(index)
      , _scale(scale)
@@ -1033,8 +1040,9 @@
  friend class LIR_OpVisitState;
 
  private:
-  ciMethod*       _method;
-  LIR_Opr         _receiver;
+  ciMethod* _method;
+  LIR_Opr   _receiver;
+  LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
 
  public:
   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@@ -1043,14 +1051,18 @@
                  CodeEmitInfo* info)
   : LIR_OpCall(code, addr, result, arguments, info)
   , _receiver(receiver)
-  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
+  , _method(method)
+  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
+  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
 
   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
                  LIR_OprList* arguments, CodeEmitInfo* info)
   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
   , _receiver(receiver)
-  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
+  , _method(method)
+  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
+  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
 
   LIR_Opr receiver() const                       { return _receiver; }
   ciMethod* method() const                       { return _method;   }
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -301,9 +301,9 @@
 }
 
 
-void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
+void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
   flush_debug_info(pc_offset);
-  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
+  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   if (cinfo->exception_handlers() != NULL) {
     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   }
@@ -413,12 +413,6 @@
 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   verify_oop_map(op->info());
 
-  // JSR 292
-  // Preserve the SP over MethodHandle call sites.
-  if (op->is_method_handle_invoke()) {
-    preserve_SP(op);
-  }
-
   if (os::is_MP()) {
     // must align calls sites, otherwise they can't be updated atomically on MP hardware
     align_call(op->code());
@@ -444,10 +438,6 @@
   default: ShouldNotReachHere();
   }
 
-  if (op->is_method_handle_invoke()) {
-    restore_SP(op);
-  }
-
 #if defined(X86) && defined(TIERED)
   // C2 leave fpu stack dirty clean it
   if (UseSSE < 2) {
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -84,7 +84,7 @@
   Address as_Address_hi(LIR_Address* addr);
 
   // debug information
-  void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
+  void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
   void add_debug_info_for_branch(CodeEmitInfo* info);
   void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
   void add_debug_info_for_div0_here(CodeEmitInfo* info);
@@ -212,10 +212,6 @@
   void ic_call(     LIR_OpJavaCall* op);
   void vtable_call( LIR_OpJavaCall* op);
 
-  // JSR 292
-  void preserve_SP(LIR_OpJavaCall* op);
-  void restore_SP( LIR_OpJavaCall* op);
-
   void osr_entry();
 
   void build_frame();
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -304,7 +304,7 @@
   __ branch_destination(block->label());
 
   if (LIRTraceExecution &&
-      Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() &&
+      Compilation::current()->hir()->start()->block_id() != block->block_id() &&
       !block->is_set(BlockBegin::exception_entry_flag)) {
     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
     trace_block_entry(block);
@@ -1309,7 +1309,7 @@
   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
   if (!addr_opr->is_address()) {
     assert(addr_opr->is_register(), "must be");
-    addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT));
+    addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
   }
   CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
                                         info);
@@ -1325,7 +1325,7 @@
       new_val->as_constant_ptr()->as_jobject() == NULL) return;
 
   if (!new_val->is_register()) {
-    LIR_Opr new_val_reg = new_pointer_register();
+    LIR_Opr new_val_reg = new_register(T_OBJECT);
     if (new_val->is_constant()) {
       __ move(new_val, new_val_reg);
     } else {
@@ -1337,7 +1337,7 @@
 
   if (addr->is_address()) {
     LIR_Address* address = addr->as_address_ptr();
-    LIR_Opr ptr = new_pointer_register();
+    LIR_Opr ptr = new_register(T_OBJECT);
     if (!address->index()->is_valid() && address->disp() == 0) {
       __ move(address->base(), ptr);
     } else {
@@ -1350,7 +1350,6 @@
 
   LIR_Opr xor_res = new_pointer_register();
   LIR_Opr xor_shift_res = new_pointer_register();
-
   if (TwoOperandLIRForm ) {
     __ move(addr, xor_res);
     __ logical_xor(xor_res, new_val, xor_res);
@@ -1368,7 +1367,7 @@
   }
 
   if (!new_val->is_register()) {
-    LIR_Opr new_val_reg = new_pointer_register();
+    LIR_Opr new_val_reg = new_register(T_OBJECT);
     __ leal(new_val, new_val_reg);
     new_val = new_val_reg;
   }
@@ -1377,7 +1376,7 @@
   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
 
   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
-  __ branch(lir_cond_notEqual, T_INT, slow);
+  __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
   __ branch_destination(slow->continuation());
 }
 
@@ -2371,9 +2370,17 @@
   bool optimized = x->target_is_loaded() && x->target_is_final();
   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
 
+  // JSR 292
+  // Preserve the SP over MethodHandle call sites.
+  ciMethod* target = x->target();
+  if (target->is_method_handle_invoke()) {
+    info->set_is_method_handle_invoke(true);
+    __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+  }
+
   switch (x->code()) {
     case Bytecodes::_invokestatic:
-      __ call_static(x->target(), result_register,
+      __ call_static(target, result_register,
                      SharedRuntime::get_resolve_static_call_stub(),
                      arg_list, info);
       break;
@@ -2383,17 +2390,17 @@
       // for final target we still produce an inline cache, in order
       // to be able to call mixed mode
       if (x->code() == Bytecodes::_invokespecial || optimized) {
-        __ call_opt_virtual(x->target(), receiver, result_register,
+        __ call_opt_virtual(target, receiver, result_register,
                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
                             arg_list, info);
       } else if (x->vtable_index() < 0) {
-        __ call_icvirtual(x->target(), receiver, result_register,
+        __ call_icvirtual(target, receiver, result_register,
                           SharedRuntime::get_resolve_virtual_call_stub(),
                           arg_list, info);
       } else {
         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
-        __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
+        __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
       }
       break;
     case Bytecodes::_invokedynamic: {
@@ -2432,7 +2439,7 @@
       // Load target MethodHandle from CallSite object.
       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
 
-      __ call_dynamic(x->target(), receiver, result_register,
+      __ call_dynamic(target, receiver, result_register,
                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
                       arg_list, info);
       break;
@@ -2442,6 +2449,12 @@
       break;
   }
 
+  // JSR 292
+  // Restore the SP after MethodHandle call sites.
+  if (target->is_method_handle_invoke()) {
+    __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
+  }
+
   if (x->type()->is_float() || x->type()->is_double()) {
     // Force rounding of results from non-strictfp when in strictfp
     // scope (or when we don't know the strictness of the callee, to
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -84,10 +84,6 @@
  , _fpu_stack_allocator(NULL)
 #endif
 {
-  // note: to use more than on instance of LinearScan at a time this function call has to
-  //       be moved somewhere outside of this constructor:
-  Interval::initialize();
-
   assert(this->ir() != NULL,          "check if valid");
   assert(this->compilation() != NULL, "check if valid");
   assert(this->gen() != NULL,         "check if valid");
@@ -3929,8 +3925,8 @@
 
 // initialize sentinel
 Range* Range::_end = NULL;
-void Range::initialize() {
-  _end = new Range(max_jint, max_jint, NULL);
+void Range::initialize(Arena* arena) {
+  _end = new (arena) Range(max_jint, max_jint, NULL);
 }
 
 int Range::intersects_at(Range* r2) const {
@@ -3976,9 +3972,9 @@
 
 // initialize sentinel
 Interval* Interval::_end = NULL;
-void Interval::initialize() {
-  Range::initialize();
-  _end = new Interval(-1);
+void Interval::initialize(Arena* arena) {
+  Range::initialize(arena);
+  _end = new (arena) Interval(-1);
 }
 
 Interval::Interval(int reg_num) :
--- a/hotspot/src/share/vm/c1/c1_LinearScan.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -462,7 +462,7 @@
  public:
   Range(int from, int to, Range* next);
 
-  static void      initialize();
+  static void      initialize(Arena* arena);
   static Range*    end()                         { return _end; }
 
   int              from() const                  { return _from; }
@@ -529,7 +529,7 @@
  public:
   Interval(int reg_num);
 
-  static void      initialize();
+  static void      initialize(Arena* arena);
   static Interval* end()                         { return _end; }
 
   // accessors
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -437,11 +437,8 @@
 // Because of a static contained within (for the purpose of iteration
 // over instructions), it is only valid to have one of these active at
 // a time
-class NullCheckEliminator {
+class NullCheckEliminator: public ValueVisitor {
  private:
-  static NullCheckEliminator* _static_nce;
-  static void                 do_value(Value* vp);
-
   Optimizer*        _opt;
 
   ValueSet*         _visitable_instructions;        // Visit each instruction only once per basic block
@@ -504,6 +501,8 @@
   // Process a graph
   void iterate(BlockBegin* root);
 
+  void visit(Value* f);
+
   // In some situations (like NullCheck(x); getfield(x)) the debug
   // information from the explicit NullCheck can be used to populate
   // the getfield, even if the two instructions are in different
@@ -602,14 +601,11 @@
 void NullCheckVisitor::do_ProfileCounter (ProfileCounter*  x) {}
 
 
-NullCheckEliminator* NullCheckEliminator::_static_nce = NULL;
-
-
-void NullCheckEliminator::do_value(Value* p) {
+void NullCheckEliminator::visit(Value* p) {
   assert(*p != NULL, "should not find NULL instructions");
-  if (_static_nce->visitable(*p)) {
-    _static_nce->mark_visited(*p);
-    (*p)->visit(&_static_nce->_visitor);
+  if (visitable(*p)) {
+    mark_visited(*p);
+    (*p)->visit(&_visitor);
   }
 }
 
@@ -637,7 +633,6 @@
 
 
 void NullCheckEliminator::iterate_one(BlockBegin* block) {
-  _static_nce = this;
   clear_visitable_state();
   // clear out an old explicit null checks
   set_last_explicit_null_check(NULL);
@@ -712,7 +707,7 @@
     mark_visitable(instr);
     if (instr->is_root() || instr->can_trap() || (instr->as_NullCheck() != NULL)) {
       mark_visited(instr);
-      instr->input_values_do(&NullCheckEliminator::do_value);
+      instr->input_values_do(this);
       instr->visit(&_visitor);
     }
   }
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -60,7 +60,6 @@
 
 // Implementation of Runtime1
 
-bool      Runtime1::_is_initialized = false;
 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 const char *Runtime1::_blob_names[] = {
   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
@@ -89,8 +88,6 @@
 int Runtime1::_throw_count = 0;
 #endif
 
-BufferBlob* Runtime1::_buffer_blob  = NULL;
-
 // Simple helper to see if the caller of a runtime stub which
 // entered the VM has been deoptimized
 
@@ -117,43 +114,14 @@
 }
 
 
-BufferBlob* Runtime1::get_buffer_blob() {
-  // Allocate code buffer space only once
-  BufferBlob* blob = _buffer_blob;
-  if (blob == NULL) {
-    // setup CodeBuffer.  Preallocate a BufferBlob of size
-    // NMethodSizeLimit plus some extra space for constants.
-    int code_buffer_size = desired_max_code_buffer_size() + desired_max_constant_size();
-    blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
-                              code_buffer_size);
-    guarantee(blob != NULL, "must create initial code buffer");
-    _buffer_blob = blob;
-  }
-  return _buffer_blob;
-}
-
-void Runtime1::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
-  // Preinitialize the consts section to some large size:
-  int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
-  char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
-  code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
-                                        locs_buffer_size / sizeof(relocInfo));
-  code->initialize_consts_size(desired_max_constant_size());
-  // Call stubs + deopt/exception handler
-  code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
-                              LIR_Assembler::exception_handler_size +
-                              LIR_Assembler::deopt_handler_size);
-}
-
-
-void Runtime1::generate_blob_for(StubID id) {
+void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
   assert(0 <= id && id < number_of_ids, "illegal stub id");
   ResourceMark rm;
   // create code buffer for code storage
-  CodeBuffer code(get_buffer_blob()->instructions_begin(),
-                  get_buffer_blob()->instructions_size());
+  CodeBuffer code(buffer_blob->instructions_begin(),
+                  buffer_blob->instructions_size());
 
-  setup_code_buffer(&code, 0);
+  Compilation::setup_code_buffer(&code, 0);
 
   // create assembler for code generation
   StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
@@ -204,35 +172,28 @@
 }
 
 
-void Runtime1::initialize() {
-  // Warning: If we have more than one compilation running in parallel, we
-  //          need a lock here with the current setup (lazy initialization).
-  if (!is_initialized()) {
-    _is_initialized = true;
-
-    // platform-dependent initialization
-    initialize_pd();
-    // generate stubs
-    for (int id = 0; id < number_of_ids; id++) generate_blob_for((StubID)id);
-    // printing
+void Runtime1::initialize(BufferBlob* blob) {
+  // platform-dependent initialization
+  initialize_pd();
+  // generate stubs
+  for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
+  // printing
 #ifndef PRODUCT
-    if (PrintSimpleStubs) {
-      ResourceMark rm;
-      for (int id = 0; id < number_of_ids; id++) {
-        _blobs[id]->print();
-        if (_blobs[id]->oop_maps() != NULL) {
-          _blobs[id]->oop_maps()->print();
-        }
+  if (PrintSimpleStubs) {
+    ResourceMark rm;
+    for (int id = 0; id < number_of_ids; id++) {
+      _blobs[id]->print();
+      if (_blobs[id]->oop_maps() != NULL) {
+        _blobs[id]->oop_maps()->print();
       }
     }
+  }
 #endif
-  }
 }
 
 
 CodeBlob* Runtime1::blob_for(StubID id) {
   assert(0 <= id && id < number_of_ids, "illegal stub id");
-  if (!is_initialized()) initialize();
   return _blobs[id];
 }
 
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -70,18 +70,6 @@
 class Runtime1: public AllStatic {
   friend class VMStructs;
   friend class ArrayCopyStub;
- private:
-  static int desired_max_code_buffer_size() {
-    return (int) NMethodSizeLimit;  // default 256K or 512K
-  }
-  static int desired_max_constant_size() {
-    return (int) NMethodSizeLimit / 10;  // about 25K
-  }
-
-  // Note: This buffers is allocated once at startup since allocation
-  // for each compilation seems to be too expensive (at least on Intel
-  // win32).
-  static BufferBlob* _buffer_blob;
 
  public:
   enum StubID {
@@ -115,12 +103,11 @@
 #endif
 
  private:
-  static bool      _is_initialized;
   static CodeBlob* _blobs[number_of_ids];
   static const char* _blob_names[];
 
   // stub generation
-  static void generate_blob_for(StubID id);
+  static void generate_blob_for(BufferBlob* blob, StubID id);
   static OopMapSet* generate_code_for(StubID id, StubAssembler* masm);
   static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
   static void generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool ignore_fpu_registers = false);
@@ -162,12 +149,8 @@
   static void patch_code(JavaThread* thread, StubID stub_id);
 
  public:
-  static BufferBlob* get_buffer_blob();
-  static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
-
   // initialization
-  static bool is_initialized()                   { return _is_initialized; }
-  static void initialize();
+  static void initialize(BufferBlob* blob);
   static void initialize_pd();
 
   // stubs
--- a/hotspot/src/share/vm/c1/c1_ValueStack.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueStack.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -119,14 +119,14 @@
 
 
 // apply function to all values of a list; factored out from values_do(f)
-void ValueStack::apply(Values list, void f(Value*)) {
+void ValueStack::apply(Values list, ValueVisitor* f) {
   for (int i = 0; i < list.length(); i++) {
     Value* va = list.adr_at(i);
     Value v0 = *va;
     if (v0 != NULL) {
       if (!v0->type()->is_illegal()) {
         assert(v0->as_HiWord() == NULL, "should never see HiWord during traversal");
-        f(va);
+        f->visit(va);
 #ifdef ASSERT
         Value v1 = *va;
         if (v0 != v1) {
@@ -143,7 +143,7 @@
 }
 
 
-void ValueStack::values_do(void f(Value*)) {
+void ValueStack::values_do(ValueVisitor* f) {
   apply(_stack, f);
   apply(_locks, f);
 
--- a/hotspot/src/share/vm/c1/c1_ValueStack.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueStack.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -41,7 +41,7 @@
   }
 
   // helper routine
-  static void apply(Values list, void f(Value*));
+  static void apply(Values list, ValueVisitor* f);
 
  public:
   // creation
@@ -143,7 +143,7 @@
   void pin_stack_for_linear_scan();
 
   // iteration
-  void values_do(void f(Value*));
+  void values_do(ValueVisitor* f);
 
   // untyped manipulation (for dup_x1, etc.)
   void clear_stack()                             { _stack.clear(); }
--- a/hotspot/src/share/vm/c1/c1_ValueType.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueType.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -46,27 +46,26 @@
 ObjectConstant* objectNull   = NULL;
 
 
-void ValueType::initialize() {
+void ValueType::initialize(Arena* arena) {
   // Note: Must initialize all types for each compilation
   //       as they are allocated within a ResourceMark!
 
   // types
-  voidType     = new VoidType();
-  intType      = new IntType();
-  longType     = new LongType();
-  floatType    = new FloatType();
-  doubleType   = new DoubleType();
-  objectType   = new ObjectType();
-  arrayType    = new ArrayType();
-  instanceType = new InstanceType();
-  classType    = new ClassType();
-  addressType  = new AddressType();
-  illegalType  = new IllegalType();
+  voidType     = new (arena) VoidType();
+  intType      = new (arena) IntType();
+  longType     = new (arena) LongType();
+  floatType    = new (arena) FloatType();
+  doubleType   = new (arena) DoubleType();
+  objectType   = new (arena) ObjectType();
+  arrayType    = new (arena) ArrayType();
+  instanceType = new (arena) InstanceType();
+  classType    = new (arena) ClassType();
+  addressType  = new (arena) AddressType();
+  illegalType  = new (arena) IllegalType();
 
-  // constants
-  intZero     = new IntConstant(0);
-  intOne      = new IntConstant(1);
-  objectNull  = new ObjectConstant(ciNullObject::make());
+  intZero     = new (arena) IntConstant(0);
+  intOne      = new (arena) IntConstant(1);
+  objectNull  = new (arena) ObjectConstant(ciNullObject::make());
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_ValueType.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueType.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -94,7 +94,7 @@
 
  public:
   // initialization
-  static void initialize();
+  static void initialize(Arena* arena);
 
   // accessors
   virtual ValueType* base() const                = 0; // the 'canonical' type (e.g., intType for an IntConstant)
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -690,20 +690,32 @@
 
 // ------------------------------------------------------------------
 // invokedynamic support
+
+// ------------------------------------------------------------------
+// ciMethod::is_method_handle_invoke
 //
+// Return true if the method is a MethodHandle target.
 bool ciMethod::is_method_handle_invoke() const {
-  check_is_loaded();
-  bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
+  bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
+               methodOopDesc::is_method_handle_invoke_name(name()->sid()));
 #ifdef ASSERT
-  {
-    VM_ENTRY_MARK;
-    bool flag2 = get_methodOop()->is_method_handle_invoke();
-    assert(flag == flag2, "consistent");
+  if (is_loaded()) {
+    bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
+    {
+      VM_ENTRY_MARK;
+      bool flag3 = get_methodOop()->is_method_handle_invoke();
+      assert(flag2 == flag3, "consistent");
+      assert(flag  == flag3, "consistent");
+    }
   }
 #endif //ASSERT
   return flag;
 }
 
+// ------------------------------------------------------------------
+// ciMethod::is_method_handle_adapter
+//
+// Return true if the method is a generated MethodHandle adapter.
 bool ciMethod::is_method_handle_adapter() const {
   check_is_loaded();
   VM_ENTRY_MARK;
--- a/hotspot/src/share/vm/ci/ciStreams.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,27 +81,21 @@
 // providing accessors for constant pool items.
 
 // ------------------------------------------------------------------
-// ciBytecodeStream::wide
-//
-// Special handling for the wide bytcode
-Bytecodes::Code ciBytecodeStream::wide()
-{
-  // Get following bytecode; do not return wide
-  Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
-  _pc += 2;                     // Skip both bytecodes
-  _pc += 2;                     // Skip index always
-  if( bc == Bytecodes::_iinc )
-    _pc += 2;                   // Skip optional constant
-  _was_wide = _pc;              // Flag last wide bytecode found
-  return bc;
-}
-
-// ------------------------------------------------------------------
-// ciBytecodeStream::table
+// ciBytecodeStream::next_wide_or_table
 //
 // Special handling for switch ops
-Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) {
-  switch( bc ) {                // Check for special bytecode handling
+Bytecodes::Code ciBytecodeStream::next_wide_or_table(Bytecodes::Code bc) {
+  switch (bc) {                // Check for special bytecode handling
+  case Bytecodes::_wide:
+    // Special handling for the wide bytcode
+    // Get following bytecode; do not return wide
+    assert(Bytecodes::Code(_pc[0]) == Bytecodes::_wide, "");
+    bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)_pc[1]);
+    assert(Bytecodes::wide_length_for(bc) > 2, "must make progress");
+    _pc += Bytecodes::wide_length_for(bc);
+    _was_wide = _pc;              // Flag last wide bytecode found
+    assert(is_wide(), "accessor works right");
+    break;
 
   case Bytecodes::_lookupswitch:
     _pc++;                      // Skip wide bytecode
@@ -164,7 +158,7 @@
 int ciBytecodeStream::get_klass_index() const {
   switch(cur_bc()) {
   case Bytecodes::_ldc:
-    return get_index();
+    return get_index_u1();
   case Bytecodes::_ldc_w:
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
@@ -173,7 +167,7 @@
   case Bytecodes::_multianewarray:
   case Bytecodes::_new:
   case Bytecodes::_newarray:
-    return get_index_big();
+    return get_index_u2();
   default:
     ShouldNotReachHere();
     return 0;
@@ -199,10 +193,10 @@
 int ciBytecodeStream::get_constant_index() const {
   switch(cur_bc()) {
   case Bytecodes::_ldc:
-    return get_index();
+    return get_index_u1();
   case Bytecodes::_ldc_w:
   case Bytecodes::_ldc2_w:
-    return get_index_big();
+    return get_index_u2();
   default:
     ShouldNotReachHere();
     return 0;
@@ -239,7 +233,7 @@
          cur_bc() == Bytecodes::_putfield ||
          cur_bc() == Bytecodes::_getstatic ||
          cur_bc() == Bytecodes::_putstatic, "wrong bc");
-  return get_index_big();
+  return get_index_u2_cpcache();
 }
 
 
@@ -319,7 +313,9 @@
     ShouldNotReachHere();
   }
 #endif
-  return get_index_int();
+  if (has_index_u4())
+    return get_index_u4();  // invokedynamic
+  return get_index_u2_cpcache();
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciStreams.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,15 +31,19 @@
 // their original form during iteration.
 class ciBytecodeStream : StackObj {
 private:
- // Handling for the weird bytecodes
-  Bytecodes::Code wide();       // Handle wide bytecode
-  Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
+  // Handling for the weird bytecodes
+  Bytecodes::Code next_wide_or_table(Bytecodes::Code); // Handle _wide & complicated inline table
 
   static Bytecodes::Code check_java(Bytecodes::Code c) {
     assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes");
     return c;
   }
 
+  static Bytecodes::Code check_defined(Bytecodes::Code c) {
+    assert(Bytecodes::is_defined(c), "");
+    return c;
+  }
+
   ciMethod* _method;           // the method
   ciInstanceKlass* _holder;
   address _bc_start;            // Start of current bytecode for table
@@ -50,11 +54,21 @@
   address _end;                    // Past end of bytecodes
   address _pc;                     // Current PC
   Bytecodes::Code _bc;             // Current bytecode
+  Bytecodes::Code _raw_bc;         // Current bytecode, raw form
 
   void reset( address base, unsigned int size ) {
     _bc_start =_was_wide = 0;
     _start = _pc = base; _end = base + size; }
 
+  void assert_wide(bool require_wide) const {
+    if (require_wide)
+         { assert(is_wide(),  "must be a wide instruction"); }
+    else { assert(!is_wide(), "must not be a wide instruction"); }
+  }
+
+  Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
+  Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
+
 public:
   // End-Of-Bytecodes
   static Bytecodes::Code EOBC() {
@@ -92,11 +106,12 @@
   }
 
   address cur_bcp() const       { return _bc_start; }  // Returns bcp to current instruction
-  int next_bci() const          { return _pc -_start; }
+  int next_bci() const          { return _pc - _start; }
   int cur_bci() const           { return _bc_start - _start; }
   int instruction_size() const  { return _pc - _bc_start; }
 
   Bytecodes::Code cur_bc() const{ return check_java(_bc); }
+  Bytecodes::Code cur_bc_raw() const { return check_defined(_raw_bc); }
   Bytecodes::Code next_bc()     { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
 
   // Return current ByteCode and increment PC to next bytecode, skipping all
@@ -109,85 +124,76 @@
 
     // Fetch Java bytecode
     // All rewritten bytecodes maintain the size of original bytecode.
-    _bc = Bytecodes::java_code((Bytecodes::Code)*_pc);
+    _bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)*_pc);
     int csize = Bytecodes::length_for(_bc); // Expected size
-
-    if( _bc == Bytecodes::_wide ) {
-      _bc=wide();                           // Handle wide bytecode
-    } else if( csize == 0 ) {
-      _bc=table(_bc);                       // Handle inline tables
-    } else {
-      _pc += csize;                         // Bump PC past bytecode
+    _pc += csize;                           // Bump PC past bytecode
+    if (csize == 0) {
+      _bc = next_wide_or_table(_bc);
     }
     return check_java(_bc);
   }
 
   bool is_wide() const { return ( _pc == _was_wide ); }
 
+  // Does this instruction contain an index which refes into the CP cache?
+  bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
+
+  int get_index_u1() const {
+    return bytecode()->get_index_u1(cur_bc_raw());
+  }
+
   // Get a byte index following this bytecode.
   // If prefixed with a wide bytecode, get a wide index.
   int get_index() const {
-    assert_index_size(is_wide() ? 2 : 1);
     return (_pc == _was_wide)   // was widened?
-      ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
-      : _bc_start[1];           // no, return narrow index
+      ? get_index_u2(true)      // yes, return wide index
+      : get_index_u1();         // no, return narrow index
   }
 
-  // Get 2-byte index (getfield/putstatic/etc)
-  int get_index_big() const {
-    assert_index_size(2);
-    return Bytes::get_Java_u2(_bc_start+1);
+  // Get 2-byte index (byte swapping depending on which bytecode)
+  int get_index_u2(bool is_wide = false) const {
+    return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
   }
 
-  // Get 2-byte index (or 4-byte, for invokedynamic)
-  int get_index_int() const {
-    return has_giant_index() ? get_index_giant() : get_index_big();
+  // Get 2-byte index in native byte order.  (Rewriter::rewrite makes these.)
+  int get_index_u2_cpcache() const {
+    return bytecode()->get_index_u2_cpcache(cur_bc_raw());
   }
 
   // Get 4-byte index, for invokedynamic.
-  int get_index_giant() const {
-    assert_index_size(4);
-    return Bytes::get_native_u4(_bc_start+1);
+  int get_index_u4() const {
+    return bytecode()->get_index_u4(cur_bc_raw());
   }
 
-  bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); }
+  bool has_index_u4() const {
+    return bytecode()->has_index_u4(cur_bc_raw());
+  }
 
   // Get dimensions byte (multinewarray)
   int get_dimensions() const { return *(unsigned char*)(_pc-1); }
 
   // Sign-extended index byte/short, no widening
-  int get_byte() const { return (int8_t)(_pc[-1]); }
-  int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); }
-  int get_long() const  { return (int32_t)Bytes::get_Java_u4(_pc-4); }
+  int get_constant_u1()                     const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
+  int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
 
   // Get a byte signed constant for "iinc".  Invalid for other bytecodes.
   // If prefixed with a wide bytecode, get a wide constant
-  int get_iinc_con() const {return (_pc==_was_wide) ? get_short() :get_byte();}
+  int get_iinc_con() const {return (_pc==_was_wide) ? (jshort) get_constant_u2(true) : (jbyte) get_constant_u1();}
 
   // 2-byte branch offset from current pc
-  int get_dest( ) const {
-    assert( Bytecodes::length_at(_bc_start) == sizeof(jshort)+1,  "get_dest called with bad bytecode" );
-    return _bc_start-_start + (short)Bytes::get_Java_u2(_pc-2);
+  int get_dest() const {
+    return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
   }
 
   // 2-byte branch offset from next pc
-  int next_get_dest( ) const {
-    address next_bc_start = _pc;
-    assert( _pc < _end, "" );
-    Bytecodes::Code next_bc = (Bytecodes::Code)*_pc;
-    assert( next_bc != Bytecodes::_wide, "");
-    int next_csize = Bytecodes::length_for(next_bc);
-    assert( next_csize != 0, "" );
-    assert( next_bc <= Bytecodes::_jsr_w, "");
-    address next_pc = _pc + next_csize;
-    assert( Bytecodes::length_at(next_bc_start) == sizeof(jshort)+1,  "next_get_dest called with bad bytecode" );
-    return next_bc_start-_start + (short)Bytes::get_Java_u2(next_pc-2);
+  int next_get_dest() const {
+    assert(_pc < _end, "");
+    return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
   }
 
   // 4-byte branch offset from current pc
-  int get_far_dest( ) const {
-    assert( Bytecodes::length_at(_bc_start) == sizeof(jint)+1, "dest4 called with bad bytecode" );
-    return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
+  int get_far_dest() const {
+    return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
   }
 
   // For a lookup or switch table, return target destination
@@ -234,22 +240,6 @@
 
   ciCPCache*  get_cpcache();
   ciCallSite* get_call_site();
-
- private:
-  void assert_index_size(int required_size) const {
-#ifdef ASSERT
-    int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
-    if (isize == 2 &&  cur_bc() == Bytecodes::_iinc)
-      isize = 1;
-    else if (isize <= 2)
-      ;                         // no change
-    else if (has_giant_index())
-      isize = 4;
-    else
-      isize = 2;
-    assert(isize = required_size, "wrong index size");
-#endif
-  }
 };
 
 
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2132,6 +2132,7 @@
   if (!Bytecodes::can_trap(str.cur_bc()))  return false;
 
   switch (str.cur_bc()) {
+    // %%% FIXME: ldc of Class can generate an exception
     case Bytecodes::_ldc:
     case Bytecodes::_ldc_w:
     case Bytecodes::_ldc2_w:
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -25,10 +25,10 @@
 #include "incls/_precompiled.incl"
 #include "incls/_classFileParser.cpp.incl"
 
-// We generally try to create the oops directly when parsing, rather than allocating
-// temporary data structures and copying the bytes twice. A temporary area is only
-// needed when parsing utf8 entries in the constant pool and when parsing line number
-// tables.
+// We generally try to create the oops directly when parsing, rather than
+// allocating temporary data structures and copying the bytes twice. A
+// temporary area is only needed when parsing utf8 entries in the constant
+// pool and when parsing line number tables.
 
 // We add assert in debug mode when class format is not checked.
 
@@ -47,6 +47,10 @@
 // - also used as the max version when running in jdk6
 #define JAVA_6_VERSION                    50
 
+// Used for backward compatibility reasons:
+// - to check NameAndType_info signatures more aggressively
+#define JAVA_7_VERSION                    51
+
 
 void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) {
   // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
@@ -384,6 +388,20 @@
         verify_legal_class_name(class_name, CHECK_(nullHandle));
         break;
       }
+      case JVM_CONSTANT_NameAndType: {
+        if (_need_verify && _major_version >= JAVA_7_VERSION) {
+          int sig_index = cp->signature_ref_index_at(index);
+          int name_index = cp->name_ref_index_at(index);
+          symbolHandle name(THREAD, cp->symbol_at(name_index));
+          symbolHandle sig(THREAD, cp->symbol_at(sig_index));
+          if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
+            verify_legal_method_signature(name, sig, CHECK_(nullHandle));
+          } else {
+            verify_legal_field_signature(name, sig, CHECK_(nullHandle));
+          }
+        }
+        break;
+      }
       case JVM_CONSTANT_Fieldref:
       case JVM_CONSTANT_Methodref:
       case JVM_CONSTANT_InterfaceMethodref: {
@@ -396,10 +414,28 @@
         symbolHandle signature(THREAD, cp->symbol_at(signature_ref_index));
         if (tag == JVM_CONSTANT_Fieldref) {
           verify_legal_field_name(name, CHECK_(nullHandle));
-          verify_legal_field_signature(name, signature, CHECK_(nullHandle));
+          if (_need_verify && _major_version >= JAVA_7_VERSION) {
+            // Signature is verified above, when iterating NameAndType_info.
+            // Need only to be sure it's the right type.
+            if (signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
+              throwIllegalSignature(
+                  "Field", name, signature, CHECK_(nullHandle));
+            }
+          } else {
+            verify_legal_field_signature(name, signature, CHECK_(nullHandle));
+          }
         } else {
           verify_legal_method_name(name, CHECK_(nullHandle));
-          verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+          if (_need_verify && _major_version >= JAVA_7_VERSION) {
+            // Signature is verified above, when iterating NameAndType_info.
+            // Need only to be sure it's the right type.
+            if (signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
+              throwIllegalSignature(
+                  "Method", name, signature, CHECK_(nullHandle));
+            }
+          } else {
+            verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+          }
           if (tag == JVM_CONSTANT_Methodref) {
             // 4509014: If a class method name begins with '<', it must be "<init>".
             assert(!name.is_null(), "method name in constant pool is null");
@@ -1313,6 +1349,14 @@
   return checked_exceptions_start;
 }
 
+void ClassFileParser::throwIllegalSignature(
+    const char* type, symbolHandle name, symbolHandle sig, TRAPS) {
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(THREAD_AND_LOCATION,
+      vmSymbols::java_lang_ClassFormatError(),
+      "%s \"%s\" in class %s has illegal signature \"%s\"", type,
+      name->as_C_string(), _class_name->as_C_string(), sig->as_C_string());
+}
 
 #define MAX_ARGS_SIZE 255
 #define MAX_CODE_SIZE 65535
@@ -4058,14 +4102,7 @@
   char* p = skip_over_field_signature(bytes, false, length, CHECK);
 
   if (p == NULL || (p - bytes) != (int)length) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Field \"%s\" in class %s has illegal signature \"%s\"",
-      name->as_C_string(), _class_name->as_C_string(), bytes
-    );
-    return;
+    throwIllegalSignature("Field", name, signature, CHECK);
   }
 }
 
@@ -4116,13 +4153,7 @@
     }
   }
   // Report error
-  ResourceMark rm(THREAD);
-  Exceptions::fthrow(
-    THREAD_AND_LOCATION,
-    vmSymbolHandles::java_lang_ClassFormatError(),
-    "Method \"%s\" in class %s has illegal signature \"%s\"",
-    name->as_C_string(),  _class_name->as_C_string(), p
-  );
+  throwIllegalSignature("Method", name, signature, CHECK_0);
   return 0;
 }
 
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -195,6 +195,9 @@
     if (!b) { classfile_parse_error(msg, index, name, CHECK); }
   }
 
+  void throwIllegalSignature(
+      const char* type, symbolHandle name, symbolHandle sig, TRAPS);
+
   bool is_supported_version(u2 major, u2 minor);
   bool has_illegal_visibility(jint flags);
 
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 # include "incls/_precompiled.incl"
 # include "incls/_verifier.cpp.incl"
 
+#define NOFAILOVER_MAJOR_VERSION 51
+
 // Access to external entry for VerifyClassCodes - old byte code verifier
 
 extern "C" {
@@ -91,7 +93,8 @@
           klass, message_buffer, message_buffer_len, THREAD);
         split_verifier.verify_class(THREAD);
         exception_name = split_verifier.result();
-      if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
+      if (klass->major_version() < NOFAILOVER_MAJOR_VERSION &&
+          FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
           (exception_name == vmSymbols::java_lang_VerifyError() ||
            exception_name == vmSymbols::java_lang_ClassFormatError())) {
         if (TraceClassInitialization) {
@@ -254,6 +257,9 @@
   int num_methods = methods->length();
 
   for (int index = 0; index < num_methods; index++) {
+    // Check for recursive re-verification before each method.
+    if (was_recursively_verified())  return;
+
     methodOop m = (methodOop)methods->obj_at(index);
     if (m->is_native() || m->is_abstract()) {
       // If m is native or abstract, skip it.  It is checked in class file
@@ -262,6 +268,12 @@
     }
     verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
   }
+
+  if (_verify_verbose || TraceClassInitialization) {
+    if (was_recursively_verified())
+      tty->print_cr("Recursive verification detected for: %s",
+          _klass->external_name());
+  }
 }
 
 void ClassVerifier::verify_method(methodHandle m, TRAPS) {
@@ -326,6 +338,9 @@
                                 // instruction in sequence
   Bytecodes::Code opcode;
   while (!bcs.is_last_bytecode()) {
+    // Check for recursive re-verification before each bytecode.
+    if (was_recursively_verified())  return;
+
     opcode = bcs.raw_next();
     u2 bci = bcs.bci();
 
@@ -410,13 +425,13 @@
           no_control_flow = false; break;
         case Bytecodes::_ldc :
           verify_ldc(
-            opcode, bcs.get_index(), &current_frame,
+            opcode, bcs.get_index_u1(), &current_frame,
             cp, bci, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_ldc_w :
         case Bytecodes::_ldc2_w :
           verify_ldc(
-            opcode, bcs.get_index_big(), &current_frame,
+            opcode, bcs.get_index_u2(), &current_frame,
             cp, bci, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_iload :
@@ -1182,7 +1197,7 @@
           no_control_flow = false; break;
         case Bytecodes::_new :
         {
-          index = bcs.get_index_big();
+          index = bcs.get_index_u2();
           verify_cp_class_type(index, cp, CHECK_VERIFY(this));
           VerificationType new_class_type =
             cp_index_to_type(index, cp, CHECK_VERIFY(this));
@@ -1202,7 +1217,7 @@
           no_control_flow = false; break;
         case Bytecodes::_anewarray :
           verify_anewarray(
-            bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this));
+            bcs.get_index_u2(), cp, &current_frame, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_arraylength :
           type = current_frame.pop_stack(
@@ -1215,7 +1230,7 @@
           no_control_flow = false; break;
         case Bytecodes::_checkcast :
         {
-          index = bcs.get_index_big();
+          index = bcs.get_index_u2();
           verify_cp_class_type(index, cp, CHECK_VERIFY(this));
           current_frame.pop_stack(
             VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -1225,7 +1240,7 @@
           no_control_flow = false; break;
         }
         case Bytecodes::_instanceof : {
-          index = bcs.get_index_big();
+          index = bcs.get_index_u2();
           verify_cp_class_type(index, cp, CHECK_VERIFY(this));
           current_frame.pop_stack(
             VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -1240,7 +1255,7 @@
           no_control_flow = false; break;
         case Bytecodes::_multianewarray :
         {
-          index = bcs.get_index_big();
+          index = bcs.get_index_u2();
           u2 dim = *(bcs.bcp()+3);
           verify_cp_class_type(index, cp, CHECK_VERIFY(this));
           VerificationType new_array_type =
@@ -1299,7 +1314,7 @@
   while (!bcs.is_last_bytecode()) {
     if (bcs.raw_next() != Bytecodes::_illegal) {
       int bci = bcs.bci();
-      if (bcs.code() == Bytecodes::_new) {
+      if (bcs.raw_code() == Bytecodes::_new) {
         code_data[bci] = NEW_OFFSET;
       } else {
         code_data[bci] = BYTECODE_OFFSET;
@@ -1470,20 +1485,9 @@
 
   // In some situations, bytecode rewriting may occur while we're verifying.
   // In this case, a constant pool cache exists and some indices refer to that
-  // instead.  Get the original index for the tag check
-  constantPoolCacheOop cache = cp->cache();
-  if (cache != NULL &&
-       ((types == (1 <<  JVM_CONSTANT_InterfaceMethodref)) ||
-        (types == (1 <<  JVM_CONSTANT_Methodref)) ||
-        (types == (1 <<  JVM_CONSTANT_Fieldref)))) {
-    int native_index = index;
-    if (Bytes::is_Java_byte_ordering_different()) {
-      native_index = Bytes::swap_u2(index);
-    }
-    assert((native_index >= 0) && (native_index < cache->length()),
-      "Must be a legal index into the cp cache");
-    index = cache->entry_at(native_index)->constant_pool_index();
-  }
+  // instead.  Be sure we don't pick up such indices by accident.
+  // We must check was_recursively_verified() before we get here.
+  guarantee(cp->cache() == NULL, "not rewritten yet");
 
   verify_cp_index(cp, index, CHECK_VERIFY(this));
   unsigned int tag = cp->tag_at(index).value();
@@ -1654,7 +1658,7 @@
   int keys, delta;
   current_frame->pop_stack(
     VerificationType::integer_type(), CHECK_VERIFY(this));
-  if (bcs->code() == Bytecodes::_tableswitch) {
+  if (bcs->raw_code() == Bytecodes::_tableswitch) {
     jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
     jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
     if (low > high) {
@@ -1710,7 +1714,7 @@
                                               StackMapFrame* current_frame,
                                               constantPoolHandle cp,
                                               TRAPS) {
-  u2 index = bcs->get_index_big();
+  u2 index = bcs->get_index_u2();
   verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
 
   // Get field name and signature
@@ -1750,7 +1754,7 @@
     &sig_stream, field_type, CHECK_VERIFY(this));
   u2 bci = bcs->bci();
   bool is_assignable;
-  switch (bcs->code()) {
+  switch (bcs->raw_code()) {
     case Bytecodes::_getstatic: {
       for (int i = 0; i < n; i++) {
         current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
@@ -1870,7 +1874,7 @@
         ref_class_type.name(), CHECK_VERIFY(this));
       methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
         vmSymbols::object_initializer_name(),
-        cp->signature_ref_at(bcs->get_index_big()));
+        cp->signature_ref_at(bcs->get_index_u2()));
       instanceKlassHandle mh(THREAD, m->method_holder());
       if (m->is_protected() && !mh->is_same_class_package(_klass())) {
         bool assignable = current_type().is_assignable_from(
@@ -1893,8 +1897,8 @@
     bool *this_uninit, VerificationType return_type,
     constantPoolHandle cp, TRAPS) {
   // Make sure the constant pool item is the right type
-  u2 index = bcs->get_index_big();
-  Bytecodes::Code opcode = bcs->code();
+  u2 index = bcs->get_index_u2();
+  Bytecodes::Code opcode = bcs->raw_code();
   unsigned int types = (opcode == Bytecodes::_invokeinterface
                                 ? 1 << JVM_CONSTANT_InterfaceMethodref
                       : opcode == Bytecodes::_invokedynamic
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -158,6 +158,16 @@
   methodHandle        _method; // current method being verified
   VerificationType    _this_type; // the verification type of the current class
 
+  // Some recursive calls from the verifier to the name resolver
+  // can cause the current class to be re-verified and rewritten.
+  // If this happens, the original verification should not continue,
+  // because constant pool indexes will have changed.
+  // The rewriter is preceded by the verifier.  If the verifier throws
+  // an error, rewriting is prevented.  Also, rewriting always precedes
+  // bytecode execution or compilation.  Thus, is_rewritten implies
+  // that a class has been verified and prepared for execution.
+  bool was_recursively_verified() { return _klass->is_rewritten(); }
+
  public:
   enum {
     BYTECODE_OFFSET = 1,
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -66,8 +66,6 @@
   _relocation_size       = locs_size;
   _instructions_offset   = align_code_offset(header_size + locs_size);
   _data_offset           = size;
-  _oops_offset           = size;
-  _oops_length           =  0;
   _frame_size            =  0;
   set_oop_maps(NULL);
 }
@@ -94,9 +92,6 @@
   _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
   _instructions_offset   = align_code_offset(header_size + _relocation_size);
   _data_offset           = _instructions_offset + round_to(cb->total_code_size(), oopSize);
-  _oops_offset           = _size - round_to(cb->total_oop_size(), oopSize);
-  _oops_length           = 0;  // temporary, until the copy_oops handshake
-  assert(_oops_offset >=   _data_offset, "codeBlob is too small");
   assert(_data_offset <= size, "codeBlob is too small");
 
   cb->copy_code_and_locs_to(this);
@@ -131,99 +126,6 @@
 }
 
 
-// Promote one word from an assembly-time handle to a live embedded oop.
-inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
-  if (handle == NULL ||
-      // As a special case, IC oops are initialized to 1 or -1.
-      handle == (jobject) Universe::non_oop_word()) {
-    (*dest) = (oop)handle;
-  } else {
-    (*dest) = JNIHandles::resolve_non_null(handle);
-  }
-}
-
-
-void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
-  assert(_oops_length == 0, "do this handshake just once, please");
-  int length = array->length();
-  assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
-  oop* dest = oops_begin();
-  for (int index = 0 ; index < length; index++) {
-    initialize_immediate_oop(&dest[index], array->at(index));
-  }
-  _oops_length = length;
-
-  // Now we can fix up all the oops in the code.
-  // We need to do this in the code because
-  // the assembler uses jobjects as placeholders.
-  // The code and relocations have already been
-  // initialized by the CodeBlob constructor,
-  // so it is valid even at this early point to
-  // iterate over relocations and patch the code.
-  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
-}
-
-
-relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    return (relocInfo::relocType) iter.type();
-  }
-  // No relocation info found for pc
-  ShouldNotReachHere();
-  return relocInfo::none; // dummy return value
-}
-
-
-bool CodeBlob::is_at_poll_return(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::poll_return_type)
-      return true;
-  }
-  return false;
-}
-
-
-bool CodeBlob::is_at_poll_or_poll_return(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    relocInfo::relocType t = iter.type();
-    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
-      return true;
-  }
-  return false;
-}
-
-
-void CodeBlob::fix_oop_relocations(address begin, address end,
-                                   bool initialize_immediates) {
-  // re-patch all oop-bearing instructions, just in case some oops moved
-  RelocIterator iter(this, begin, end);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* reloc = iter.oop_reloc();
-      if (initialize_immediates && reloc->oop_is_immediate()) {
-        oop* dest = reloc->oop_addr();
-        initialize_immediate_oop(dest, (jobject) *dest);
-      }
-      // Refresh the oop-related bits of this instruction.
-      reloc->fix_oop_relocation();
-    }
-
-    // There must not be any interfering patches or breakpoints.
-    assert(!(iter.type() == relocInfo::breakpoint_type
-             && iter.breakpoint_reloc()->active()),
-           "no active breakpoint");
-  }
-}
-
-void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
-                            OopClosure* keep_alive,
-                            bool unloading_occurred) {
-  ShouldNotReachHere();
-}
-
 OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
   address pc = return_address ;
   assert (oop_maps() != NULL, "nope");
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -54,17 +54,12 @@
                                                  // that range. There is a similar range(s) on returns
                                                  // which we don't detect.
   int        _data_offset;                       // offset to where data region begins
-  int        _oops_offset;                       // offset to where embedded oop table begins (inside data)
-  int        _oops_length;                       // number of embedded oops
   int        _frame_size;                        // size of stack frame
   OopMapSet* _oop_maps;                          // OopMap for this CodeBlob
   CodeComments _comments;
 
   friend class OopRecorder;
 
-  void fix_oop_relocations(address begin, address end, bool initialize_immediates);
-  inline void initialize_immediate_oop(oop* dest, jobject handle);
-
  public:
   // Returns the space needed for CodeBlob
   static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@@ -115,14 +110,11 @@
   address    instructions_end() const            { return (address)    header_begin() + _data_offset; }
   address    data_begin() const                  { return (address)    header_begin() + _data_offset; }
   address    data_end() const                    { return (address)    header_begin() + _size; }
-  oop*       oops_begin() const                  { return (oop*)      (header_begin() + _oops_offset); }
-  oop*       oops_end() const                    { return                oops_begin() + _oops_length; }
 
   // Offsets
   int relocation_offset() const                  { return _header_size; }
   int instructions_offset() const                { return _instructions_offset; }
   int data_offset() const                        { return _data_offset; }
-  int oops_offset() const                        { return _oops_offset; }
 
   // Sizes
   int size() const                               { return _size; }
@@ -130,40 +122,16 @@
   int relocation_size() const                    { return (address) relocation_end() - (address) relocation_begin(); }
   int instructions_size() const                  { return instructions_end() - instructions_begin();  }
   int data_size() const                          { return data_end() - data_begin(); }
-  int oops_size() const                          { return (address) oops_end() - (address) oops_begin(); }
 
   // Containment
   bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end(); }
   bool relocation_contains(relocInfo* addr) const{ return relocation_begin()   <= addr && addr < relocation_end(); }
   bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
   bool data_contains(address addr) const         { return data_begin()         <= addr && addr < data_end(); }
-  bool oops_contains(oop* addr) const            { return oops_begin()         <= addr && addr < oops_end(); }
   bool contains(address addr) const              { return instructions_contains(addr); }
   bool is_frame_complete_at(address addr) const  { return instructions_contains(addr) &&
                                                           addr >= instructions_begin() + _frame_complete_offset; }
 
-  // Relocation support
-  void fix_oop_relocations(address begin, address end) {
-    fix_oop_relocations(begin, end, false);
-  }
-  void fix_oop_relocations() {
-    fix_oop_relocations(NULL, NULL, false);
-  }
-  relocInfo::relocType reloc_type_for_address(address pc);
-  bool is_at_poll_return(address pc);
-  bool is_at_poll_or_poll_return(address pc);
-
-  // Support for oops in scopes and relocs:
-  // Note: index 0 is reserved for null.
-  oop  oop_at(int index) const                   { return index == 0? (oop)NULL: *oop_addr_at(index); }
-  oop* oop_addr_at(int index) const{             // for GC
-    // relocation indexes are biased by 1 (because 0 is reserved)
-    assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
-    return &oops_begin()[index-1];
-  }
-
-  void copy_oops(GrowableArray<jobject>* oops);
-
   // CodeCache support: really only used by the nmethods, but in order to get
   // asserts and certain bookkeeping to work in the CodeCache they are defined
   // virtual here.
@@ -175,12 +143,6 @@
 
   // GC support
   virtual bool is_alive() const                  = 0;
-  virtual void do_unloading(BoolObjectClosure* is_alive,
-                            OopClosure* keep_alive,
-                            bool unloading_occurred);
-  virtual void oops_do(OopClosure* f) = 0;
-  // (All CodeBlob subtypes other than NMethod currently have
-  // an empty oops_do() method.
 
   // OopMap for frame
   OopMapSet* oop_maps() const                    { return _oop_maps; }
@@ -245,11 +207,6 @@
   // GC/Verification support
   void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f)  { /* nothing to do */ }
   bool is_alive() const                          { return true; }
-  void do_unloading(BoolObjectClosure* is_alive,
-                    OopClosure* keep_alive,
-                    bool unloading_occurred)     { /* do nothing */ }
-
-  void oops_do(OopClosure* f)                    { /* do nothing*/ }
 
   void verify();
   void print() const                             PRODUCT_RETURN;
@@ -334,10 +291,6 @@
   // GC/Verification support
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f)  { /* nothing to do */ }
   bool is_alive() const                          { return true; }
-  void do_unloading(BoolObjectClosure* is_alive,
-                    OopClosure* keep_alive,
-                    bool unloading_occurred)     { /* do nothing */ }
-  void oops_do(OopClosure* f) { /* do-nothing*/ }
 
   void verify();
   void print() const                             PRODUCT_RETURN;
@@ -363,9 +316,6 @@
    {};
 
    bool is_alive() const                         { return true; }
-   void do_unloading(BoolObjectClosure* is_alive,
-                     OopClosure* keep_alive,
-                     bool unloading_occurred)    { /* do-nothing*/ }
 
    void verify(); // does nothing
    void print() const                            PRODUCT_RETURN;
@@ -423,9 +373,6 @@
   // GC for args
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
 
-  // Iteration
-  void oops_do(OopClosure* f) {}
-
   // Printing
   void print_value_on(outputStream* st) const PRODUCT_RETURN;
 
@@ -477,9 +424,6 @@
 
   // Typing
   bool is_uncommon_trap_stub() const             { return true; }
-
-  // Iteration
-  void oops_do(OopClosure* f) {}
 };
 
 
@@ -512,9 +456,6 @@
 
   // Typing
   bool is_exception_stub() const                 { return true; }
-
-  // Iteration
-  void oops_do(OopClosure* f) {}
 };
 #endif // COMPILER2
 
@@ -548,7 +489,4 @@
 
   // Typing
   bool is_safepoint_stub() const                 { return true; }
-
-  // Iteration
-  void oops_do(OopClosure* f) {}
 };
--- a/hotspot/src/share/vm/code/codeCache.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -74,12 +74,12 @@
     total_size       += cb->size();
     header_size      += cb->header_size();
     relocation_size  += cb->relocation_size();
-    scopes_oop_size  += cb->oops_size();
     if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
+      nmethod* nm = cb->as_nmethod_or_null();
       code_size        += nm->code_size();
       stub_size        += nm->stub_size();
 
+      scopes_oop_size  += nm->oops_size();
       scopes_data_size += nm->scopes_data_size();
       scopes_pcs_size  += nm->scopes_pcs_size();
     } else {
@@ -262,14 +262,14 @@
 }
 
 
-// Mark code blobs for unloading if they contain otherwise
-// unreachable oops.
+// Mark nmethods for unloading if they contain otherwise unreachable
+// oops.
 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
                              OopClosure* keep_alive,
                              bool unloading_occurred) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    cb->do_unloading(is_alive, keep_alive, unloading_occurred);
+  FOR_ALL_ALIVE_NMETHODS(nm) {
+    nm->do_unloading(is_alive, keep_alive, unloading_occurred);
   }
 }
 
@@ -509,9 +509,9 @@
       if (needs_cache_clean()) {
         nm->cleanup_inline_caches();
       }
-      debug_only(nm->verify();)
+      DEBUG_ONLY(nm->verify());
+      nm->fix_oop_relocations();
     }
-    cb->fix_oop_relocations();
   }
   set_needs_cache_clean(false);
   prune_scavenge_root_nmethods();
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -441,11 +441,11 @@
 }
 
 
-inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
+inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
    address  first_oop = NULL;
    // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
-   CodeBlob *code1 = code;
-   return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
+   nmethod* tmp_nm = nm;
+   return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
 }
 
 CompiledIC::CompiledIC(NativeCall* ic_call)
--- a/hotspot/src/share/vm/code/nmethod.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -99,12 +99,12 @@
     code_size           += nm->code_size();
     stub_size           += nm->stub_size();
     consts_size         += nm->consts_size();
+    oops_size           += nm->oops_size();
     scopes_data_size    += nm->scopes_data_size();
     scopes_pcs_size     += nm->scopes_pcs_size();
     dependencies_size   += nm->dependencies_size();
     handler_table_size  += nm->handler_table_size();
     nul_chk_table_size  += nm->nul_chk_table_size();
-    oops_size += nm->oops_size();
   }
   void print_nmethod_stats() {
     if (nmethod_count == 0)  return;
@@ -114,12 +114,12 @@
     if (code_size != 0)           tty->print_cr(" main code      = %d", code_size);
     if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
     if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
+    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
     if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
     if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
     if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
     if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
     if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
-    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
   }
 
   int native_nmethod_count;
@@ -600,7 +600,8 @@
 #endif // def HAVE_DTRACE_H
     _stub_offset             = data_offset();
     _consts_offset           = data_offset();
-    _scopes_data_offset      = data_offset();
+    _oops_offset             = data_offset();
+    _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
     _scopes_pcs_offset       = _scopes_data_offset;
     _dependencies_offset     = _scopes_pcs_offset;
     _handler_table_offset    = _dependencies_offset;
@@ -690,7 +691,8 @@
     _orig_pc_offset          = 0;
     _stub_offset             = data_offset();
     _consts_offset           = data_offset();
-    _scopes_data_offset      = data_offset();
+    _oops_offset             = data_offset();
+    _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
     _scopes_pcs_offset       = _scopes_data_offset;
     _dependencies_offset     = _scopes_pcs_offset;
     _handler_table_offset    = _dependencies_offset;
@@ -805,8 +807,9 @@
       _unwind_handler_offset   = -1;
     }
     _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
-    _scopes_data_offset      = data_offset();
-    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
+    _oops_offset             = data_offset();
+    _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size (), oopSize);
+    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
     _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
     _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
@@ -990,6 +993,79 @@
 }
 
 
+// Promote one word from an assembly-time handle to a live embedded oop.
+inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
+  if (handle == NULL ||
+      // As a special case, IC oops are initialized to 1 or -1.
+      handle == (jobject) Universe::non_oop_word()) {
+    (*dest) = (oop) handle;
+  } else {
+    (*dest) = JNIHandles::resolve_non_null(handle);
+  }
+}
+
+
+void nmethod::copy_oops(GrowableArray<jobject>* array) {
+  //assert(oops_size() == 0, "do this handshake just once, please");
+  int length = array->length();
+  assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
+  oop* dest = oops_begin();
+  for (int index = 0 ; index < length; index++) {
+    initialize_immediate_oop(&dest[index], array->at(index));
+  }
+
+  // Now we can fix up all the oops in the code.  We need to do this
+  // in the code because the assembler uses jobjects as placeholders.
+  // The code and relocations have already been initialized by the
+  // CodeBlob constructor, so it is valid even at this early point to
+  // iterate over relocations and patch the code.
+  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
+}
+
+
+bool nmethod::is_at_poll_return(address pc) {
+  RelocIterator iter(this, pc, pc+1);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::poll_return_type)
+      return true;
+  }
+  return false;
+}
+
+
+bool nmethod::is_at_poll_or_poll_return(address pc) {
+  RelocIterator iter(this, pc, pc+1);
+  while (iter.next()) {
+    relocInfo::relocType t = iter.type();
+    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
+      return true;
+  }
+  return false;
+}
+
+
+void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
+  // re-patch all oop-bearing instructions, just in case some oops moved
+  RelocIterator iter(this, begin, end);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* reloc = iter.oop_reloc();
+      if (initialize_immediates && reloc->oop_is_immediate()) {
+        oop* dest = reloc->oop_addr();
+        initialize_immediate_oop(dest, (jobject) *dest);
+      }
+      // Refresh the oop-related bits of this instruction.
+      reloc->fix_oop_relocation();
+    }
+
+    // There must not be any interfering patches or breakpoints.
+    assert(!(iter.type() == relocInfo::breakpoint_type
+             && iter.breakpoint_reloc()->active()),
+           "no active breakpoint");
+  }
+}
+
+
 ScopeDesc* nmethod::scope_desc_at(address pc) {
   PcDesc* pd = pc_desc_at(pc);
   guarantee(pd != NULL, "scope must be present");
@@ -1266,19 +1342,7 @@
   // and it hasn't already been reported for this nmethod then report it now.
   // (the event may have been reported earilier if the GC marked it for unloading).
   if (state == zombie) {
-
-    DTRACE_METHOD_UNLOAD_PROBE(method());
-
-    if (JvmtiExport::should_post_compiled_method_unload() &&
-        !unload_reported()) {
-      assert(method() != NULL, "checking");
-      {
-        HandleMark hm;
-        JvmtiExport::post_compiled_method_unload_at_safepoint(
-            method()->jmethod_id(), code_begin());
-      }
-      set_unload_reported();
-    }
+    post_compiled_method_unload();
   }
 
 
@@ -1430,6 +1494,12 @@
 }
 
 void nmethod::post_compiled_method_unload() {
+  if (unload_reported()) {
+    // During unloading we transition to unloaded and then to zombie
+    // and the unloading is reported during the first transition.
+    return;
+  }
+
   assert(_method != NULL && !is_unloaded(), "just checking");
   DTRACE_METHOD_UNLOAD_PROBE(method());
 
@@ -1439,8 +1509,7 @@
   if (JvmtiExport::should_post_compiled_method_unload()) {
     assert(!unload_reported(), "already unloaded");
     HandleMark hm;
-    JvmtiExport::post_compiled_method_unload_at_safepoint(
-                      method()->jmethod_id(), code_begin());
+    JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
   }
 
   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
@@ -2282,6 +2351,10 @@
                                               consts_begin(),
                                               consts_end(),
                                               consts_size());
+  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              oops_begin(),
+                                              oops_end(),
+                                              oops_size());
   if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                               scopes_data_begin(),
                                               scopes_data_end(),
--- a/hotspot/src/share/vm/code/nmethod.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -105,6 +105,7 @@
 //  [Relocation]
 //  - relocation information
 //  - constant part          (doubles, longs and floats used in nmethod)
+//  - oop table
 //  [Code]
 //  - code body
 //  - exception handler
@@ -161,6 +162,7 @@
 #endif // def HAVE_DTRACE_H
   int _stub_offset;
   int _consts_offset;
+  int _oops_offset;                       // offset to where embedded oop table begins (inside data)
   int _scopes_data_offset;
   int _scopes_pcs_offset;
   int _dependencies_offset;
@@ -347,7 +349,10 @@
   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
   address stub_end              () const          { return           header_begin() + _consts_offset        ; }
   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
-  address consts_end            () const          { return           header_begin() + _scopes_data_offset   ; }
+  address consts_end            () const          { return           header_begin() + _oops_offset          ; }
+  oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
+  oop*    oops_end              () const          { return (oop*)   (header_begin() + _scopes_data_offset)  ; }
+
   address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
@@ -359,20 +364,24 @@
   address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
   address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 
-  int code_size         () const                  { return      code_end         () -      code_begin         (); }
-  int stub_size         () const                  { return      stub_end         () -      stub_begin         (); }
-  int consts_size       () const                  { return      consts_end       () -      consts_begin       (); }
-  int scopes_data_size  () const                  { return      scopes_data_end  () -      scopes_data_begin  (); }
-  int scopes_pcs_size   () const                  { return (intptr_t)scopes_pcs_end   () - (intptr_t)scopes_pcs_begin   (); }
-  int dependencies_size () const                  { return      dependencies_end () -      dependencies_begin (); }
-  int handler_table_size() const                  { return      handler_table_end() -      handler_table_begin(); }
-  int nul_chk_table_size() const                  { return      nul_chk_table_end() -      nul_chk_table_begin(); }
+  // Sizes
+  int code_size         () const                  { return            code_end         () -            code_begin         (); }
+  int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
+  int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
+  int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
+  int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
+  int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
+  int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
+  int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
+  int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }
 
   int total_size        () const;
 
+  // Containment
   bool code_contains         (address addr) const { return code_begin         () <= addr && addr < code_end         (); }
   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
+  bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
@@ -431,6 +440,29 @@
   int   version() const                           { return flags.version; }
   void  set_version(int v);
 
+  // Support for oops in scopes and relocs:
+  // Note: index 0 is reserved for null.
+  oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
+  oop*  oop_addr_at(int index) const {  // for GC
+    // relocation indexes are biased by 1 (because 0 is reserved)
+    assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
+    return &oops_begin()[index - 1];
+  }
+
+  void copy_oops(GrowableArray<jobject>* oops);
+
+  // Relocation support
+private:
+  void fix_oop_relocations(address begin, address end, bool initialize_immediates);
+  inline void initialize_immediate_oop(oop* dest, jobject handle);
+
+public:
+  void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
+  void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
+
+  bool is_at_poll_return(address pc);
+  bool is_at_poll_or_poll_return(address pc);
+
   // Non-perm oop support
   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
  protected:
@@ -511,8 +543,8 @@
 
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
                                      OopClosure* f);
-  virtual void oops_do(OopClosure* f) { oops_do(f, false); }
-  void         oops_do(OopClosure* f, bool do_strong_roots_only);
+  void oops_do(OopClosure* f) { oops_do(f, false); }
+  void oops_do(OopClosure* f, bool do_strong_roots_only);
   bool detect_scavenge_root_oops();
   void verify_scavenge_root_oops() PRODUCT_RETURN;
 
--- a/hotspot/src/share/vm/code/oopRecorder.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/oopRecorder.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,10 @@
   return _handles->length() * sizeof(oop);
 }
 
-void OopRecorder::copy_to(CodeBlob* code) {
+void OopRecorder::copy_to(nmethod* nm) {
   assert(_complete, "must be frozen");
   maybe_initialize();  // get non-null handles, even if we have no oops
-  code->copy_oops(_handles);
+  nm->copy_oops(_handles);
 }
 
 void OopRecorder::maybe_initialize() {
--- a/hotspot/src/share/vm/code/oopRecorder.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/oopRecorder.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,8 @@
     return _handles->length() + first_index;
   }
 
-  // copy the generated oop table to CodeBlob
-  void copy_to(CodeBlob* code);  // => code->copy_oops(_handles)
+  // copy the generated oop table to nmethod
+  void copy_to(nmethod* nm);  // => nm->copy_oops(_handles)
 
   bool is_unused() { return _handles == NULL && !_complete; }
 #ifdef ASSERT
--- a/hotspot/src/share/vm/code/relocInfo.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,24 +115,25 @@
 // ----------------------------------------------------------------------------------------------------
 // Implementation of RelocIterator
 
-void RelocIterator::initialize(CodeBlob* cb, address begin, address limit) {
+void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
   initialize_misc();
 
-  if (cb == NULL && begin != NULL) {
-    // allow CodeBlob to be deduced from beginning address
-    cb = CodeCache::find_blob(begin);
+  if (nm == NULL && begin != NULL) {
+    // allow nmethod to be deduced from beginning address
+    CodeBlob* cb = CodeCache::find_blob(begin);
+    nm = cb->as_nmethod_or_null();
   }
-  assert(cb != NULL, "must be able to deduce nmethod from other arguments");
+  assert(nm != NULL, "must be able to deduce nmethod from other arguments");
 
-  _code    = cb;
-  _current = cb->relocation_begin()-1;
-  _end     = cb->relocation_end();
-  _addr    = (address) cb->instructions_begin();
+  _code    = nm;
+  _current = nm->relocation_begin() - 1;
+  _end     = nm->relocation_end();
+  _addr    = (address) nm->instructions_begin();
 
   assert(!has_current(), "just checking");
-  address code_end = cb->instructions_end();
+  address code_end = nm->instructions_end();
 
-  assert(begin == NULL || begin >= cb->instructions_begin(), "in bounds");
+  assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds");
  // FIX THIS  assert(limit == NULL || limit <= code_end,     "in bounds");
   set_limits(begin, limit);
 }
@@ -754,7 +755,7 @@
     // oop is stored in the code stream
     return (oop*) pd_address_in_code();
   } else {
-    // oop is stored in table at CodeBlob::oops_begin
+    // oop is stored in table at nmethod::oops_begin
     return code()->oop_addr_at(n);
   }
 }
@@ -776,26 +777,28 @@
 }
 
 
-RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop,
+RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
                                                 oop* &oop_addr, bool *is_optimized) {
   assert(ic_call != NULL, "ic_call address must be set");
   assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
-  if (code == NULL) {
+  if (nm == NULL) {
+    CodeBlob* code;
     if (ic_call != NULL) {
       code = CodeCache::find_blob(ic_call);
     } else if (first_oop != NULL) {
       code = CodeCache::find_blob(first_oop);
     }
-    assert(code != NULL, "address to parse must be in CodeBlob");
+    nm = code->as_nmethod_or_null();
+    assert(nm != NULL, "address to parse must be in nmethod");
   }
-  assert(ic_call   == NULL || code->contains(ic_call),   "must be in CodeBlob");
-  assert(first_oop == NULL || code->contains(first_oop), "must be in CodeBlob");
+  assert(ic_call   == NULL || nm->contains(ic_call),   "must be in nmethod");
+  assert(first_oop == NULL || nm->contains(first_oop), "must be in nmethod");
 
   address oop_limit = NULL;
 
   if (ic_call != NULL) {
     // search for the ic_call at the given address
-    RelocIterator iter(code, ic_call, ic_call+1);
+    RelocIterator iter(nm, ic_call, ic_call+1);
     bool ret = iter.next();
     assert(ret == true, "relocInfo must exist at this address");
     assert(iter.addr() == ic_call, "must find ic_call");
@@ -814,7 +817,7 @@
   }
 
   // search for the first_oop, to get its oop_addr
-  RelocIterator all_oops(code, first_oop);
+  RelocIterator all_oops(nm, first_oop);
   RelocIterator iter = all_oops;
   iter.set_limit(first_oop+1);
   bool found_oop = false;
@@ -842,7 +845,7 @@
       }
     }
     guarantee(!did_reset, "cannot find ic_call");
-    iter = RelocIterator(code); // search the whole CodeBlob
+    iter = RelocIterator(nm); // search the whole nmethod
     did_reset = true;
   }
 
@@ -1175,9 +1178,9 @@
 
 // For the debugger:
 extern "C"
-void print_blob_locs(CodeBlob* cb) {
-  cb->print();
-  RelocIterator iter(cb);
+void print_blob_locs(nmethod* nm) {
+  nm->print();
+  RelocIterator iter(nm);
   iter.print();
 }
 extern "C"
--- a/hotspot/src/share/vm/code/relocInfo.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -512,7 +512,7 @@
   address    _limit;   // stop producing relocations after this _addr
   relocInfo* _current; // the current relocation information
   relocInfo* _end;     // end marker; we're done iterating when _current == _end
-  CodeBlob*  _code;    // compiled method containing _addr
+  nmethod*   _code;    // compiled method containing _addr
   address    _addr;    // instruction to which the relocation applies
   short      _databuf; // spare buffer for compressed data
   short*     _data;    // pointer to the relocation's data
@@ -549,7 +549,7 @@
 
   address compute_section_start(int n) const;  // out-of-line helper
 
-  void initialize(CodeBlob* nm, address begin, address limit);
+  void initialize(nmethod* nm, address begin, address limit);
 
   friend class PatchingRelocIterator;
   // make an uninitialized one, for PatchingRelocIterator:
@@ -557,7 +557,7 @@
 
  public:
   // constructor
-  RelocIterator(CodeBlob* cb,    address begin = NULL, address limit = NULL);
+  RelocIterator(nmethod* nm,     address begin = NULL, address limit = NULL);
   RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
 
   // get next reloc info, return !eos
@@ -592,7 +592,7 @@
   relocType    type()         const { return current()->type(); }
   int          format()       const { return (relocInfo::have_format) ? current()->format() : 0; }
   address      addr()         const { return _addr; }
-  CodeBlob*    code()         const { return _code; }
+  nmethod*     code()         const { return _code; }
   short*       data()         const { return _data; }
   int          datalen()      const { return _datalen; }
   bool     has_current()      const { return _datalen >= 0; }
@@ -790,9 +790,9 @@
 
  public:
   // accessors which only make sense for a bound Relocation
-  address   addr()         const { return binding()->addr(); }
-  CodeBlob* code()         const { return binding()->code(); }
-  bool      addr_in_const() const { return binding()->addr_in_const(); }
+  address  addr()         const { return binding()->addr(); }
+  nmethod* code()         const { return binding()->code(); }
+  bool     addr_in_const() const { return binding()->addr_in_const(); }
  protected:
   short*   data()         const { return binding()->data(); }
   int      datalen()      const { return binding()->datalen(); }
@@ -982,12 +982,12 @@
 
   // Figure out where an ic_call is hiding, given a set-oop or call.
   // Either ic_call or first_oop must be non-null; the other is deduced.
-  // Code if non-NULL must be the CodeBlob, else it is deduced.
+  // Code if non-NULL must be the nmethod, else it is deduced.
   // The address of the patchable oop is also deduced.
   // The returned iterator will enumerate over the oops and the ic_call,
   // as well as any other relocations that happen to be in that span of code.
   // Recognize relevant set_oops with:  oop_reloc()->oop_addr() == oop_addr.
-  static RelocIterator parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
+  static RelocIterator parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
 };
 
 
@@ -1304,8 +1304,8 @@
 APPLY_TO_RELOCATIONS(EACH_CASE);
 #undef EACH_CASE
 
-inline RelocIterator::RelocIterator(CodeBlob* cb, address begin, address limit) {
-  initialize(cb, begin, limit);
+inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
+  initialize(nm, begin, limit);
 }
 
 // if you are going to patch code, you should use this subclass of
@@ -1323,8 +1323,8 @@
   void        operator=(const RelocIterator&);
 
  public:
-  PatchingRelocIterator(CodeBlob* cb, address begin =NULL, address limit =NULL)
-    : RelocIterator(cb, begin, limit)                { prepass();  }
+  PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
+    : RelocIterator(nm, begin, limit)                { prepass();  }
 
   ~PatchingRelocIterator()                           { postpass(); }
 };
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1651,14 +1651,15 @@
       log->stamp();
       log->end_elem();
     }
-  #ifndef PRODUCT
-    warning("CodeCache is full. Compiler has been disabled");
+    warning("CodeCache is full. Compiler has been disabled.");
+    warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+#ifndef PRODUCT
     if (CompileTheWorld || ExitOnFullCodeCache) {
       before_exit(JavaThread::current());
       exit_globals(); // will delete tty
       vm_direct_exit(CompileTheWorld ? 0 : 1);
     }
-  #endif
+#endif
     if (UseCodeCacheFlushing) {
       NMethodSweeper::handle_full_code_cache(true);
     } else {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -32,6 +32,23 @@
 // highest ranked  free list lock rank
 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
 
+// Defaults are 0 so things will break badly if incorrectly initialized.
+int CompactibleFreeListSpace::IndexSetStart  = 0;
+int CompactibleFreeListSpace::IndexSetStride = 0;
+
+size_t MinChunkSize = 0;
+
+void CompactibleFreeListSpace::set_cms_values() {
+  // Set CMS global values
+  assert(MinChunkSize == 0, "already set");
+  #define numQuanta(x,y) ((x+y-1)/y)
+  MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
+
+  assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
+  IndexSetStart  = MinObjAlignment;
+  IndexSetStride = MinObjAlignment;
+}
+
 // Constructor
 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
   MemRegion mr, bool use_adaptive_freelists,
@@ -302,7 +319,7 @@
 
 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   size_t count = 0;
-  for (int i = MinChunkSize; i < IndexSetSize; i++) {
+  for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
     debug_only(
       ssize_t total_list_count = 0;
       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -91,10 +91,10 @@
   enum SomeConstants {
     SmallForLinearAlloc = 16,        // size < this then use _sLAB
     SmallForDictionary  = 257,       // size < this then use _indexedFreeList
-    IndexSetSize        = SmallForDictionary,  // keep this odd-sized
-    IndexSetStart       = MinObjAlignment,
-    IndexSetStride      = MinObjAlignment
+    IndexSetSize        = SmallForDictionary  // keep this odd-sized
   };
+  static int IndexSetStart;
+  static int IndexSetStride;
 
  private:
   enum FitStrategyOptions {
@@ -278,6 +278,9 @@
   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
 
+  // Set CMS global values
+  static void set_cms_values();
+
   // Return the free chunk at the end of the space.  If no such
   // chunk exists, return NULL.
   FreeChunk* find_chunk_at_end();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -159,7 +159,7 @@
      CardTableRS* ct, bool use_adaptive_freelists,
      FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
   CardGeneration(rs, initial_byte_size, level, ct),
-  _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
+  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   _debug_collection_type(Concurrent_collection_type)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
@@ -222,7 +222,7 @@
   // promoting generation, we'll instead just use the mimimum
   // object size (which today is a header's worth of space);
   // note that all arithmetic is in units of HeapWords.
-  assert(MinChunkSize >= oopDesc::header_size(), "just checking");
+  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
   assert(_dilatation_factor >= 1.0, "from previous assert");
 }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -133,9 +133,5 @@
   void print_on(outputStream* st);
 };
 
-// Alignment helpers etc.
-#define numQuanta(x,y) ((x+y-1)/y)
-enum AlignmentConstants {
-  MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
-};
+extern size_t MinChunkSize;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -3644,7 +3644,7 @@
   do {
     free_words = r->free()/HeapWordSize;
     // If there's too little space, no one can allocate, so we're done.
-    if (free_words < (size_t)oopDesc::header_size()) return;
+    if (free_words < CollectedHeap::min_fill_size()) return;
     // Otherwise, try to claim it.
     block = r->par_allocate(free_words);
   } while (block == NULL);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2523,14 +2523,14 @@
   }
   if (ParallelGCThreads > 0) {
     const size_t OverpartitionFactor = 4;
-    const size_t MinChunkSize = 8;
-    const size_t ChunkSize =
+    const size_t MinWorkUnit = 8;
+    const size_t WorkUnit =
       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
-           MinChunkSize);
+           MinWorkUnit);
     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
-                                                             ChunkSize);
+                                                             WorkUnit);
     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
-                                            (int) ChunkSize);
+                                            (int) WorkUnit);
     _g1->workers()->run_task(&parKnownGarbageTask);
 
     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -81,20 +81,24 @@
 
   remove_expired_entries(end);
   if (_no_entries == QueueLength) {
-    // OK, right now when we fill up we bomb out
-    // there are a few ways of dealing with this "gracefully"
+    // OK, we've filled up the queue. There are a few ways
+    // of dealing with this "gracefully"
     //   increase the array size (:-)
     //   remove the oldest entry (this might allow more GC time for
-    //     the time slice than what's allowed)
+    //     the time slice than what's allowed) - this is what we
+    //     currently do
     //   consolidate the two entries with the minimum gap between them
     //     (this might allow less GC time than what's allowed)
-    guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
-              "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
+
     // In the case where ScavengeALot is true, such overflow is not
     // uncommon; in such cases, we can, without much loss of precision
     // or performance (we are GC'ing most of the time anyway!),
-    // simply overwrite the oldest entry in the tracker: this
-    // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
+    // simply overwrite the oldest entry in the tracker.
+
+    if (G1PolicyVerbose > 1) {
+      warning("MMU Tracker Queue overflow. Replacing earliest entry.");
+    }
+
     _head_index = trim_index(_head_index + 1);
     assert(_head_index == _tail_index, "Because we have a full circular buffer");
     _tail_index = trim_index(_tail_index + 1);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -254,9 +254,6 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
-  product(bool, G1UseFixedWindowMMUTracker, false,                          \
-          "If the MMU tracker's memory is full, forget the oldest entry")   \
-                                                                            \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -711,6 +711,7 @@
   // object in the region.
   if (region_ptr->data_size() == RegionSize) {
     result += pointer_delta(addr, region_addr);
+    DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
     return result;
   }
 
@@ -1487,13 +1488,14 @@
       space->set_top_for_allocations();
     }
 
-    size_t obj_len = 8;
+    size_t min_size = CollectedHeap::min_fill_size();
+    size_t obj_len = min_size;
     while (b + obj_len <= t) {
       CollectedHeap::fill_with_object(b, obj_len);
       mark_bitmap()->mark_obj(b, obj_len);
       summary_data().add_obj(b, obj_len);
       b += obj_len;
-      obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ...
+      obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
     }
     if (b < t) {
       // The loop didn't completely fill to t (top); adjust top downward.
@@ -1680,11 +1682,13 @@
     //                          +-------+
 
     // Initially assume case a, c or e will apply.
-    size_t obj_len = (size_t)oopDesc::header_size();
+    size_t obj_len = CollectedHeap::min_fill_size();
     HeapWord* obj_beg = dense_prefix_end - obj_len;
 
 #ifdef  _LP64
-    if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
+    if (MinObjAlignment > 1) { // object alignment > heap word size
+      // Cases a, c or e.
+    } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
       // Case b above.
       obj_beg = dense_prefix_end - 1;
     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1414,6 +1414,8 @@
 {
   assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
          "must move left or to a different space");
+  assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
+         "checking alignment");
 }
 #endif // ASSERT
 
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -761,7 +761,7 @@
 
   if (p != NULL) {
     size_t remainder = s->free_in_words();
-    if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
+    if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
       s->set_top(s->top() - size);
       p = NULL;
     }
@@ -803,7 +803,7 @@
   HeapWord *p = s->cas_allocate(size);
   if (p != NULL) {
     size_t remainder = pointer_delta(s->end(), p + size);
-    if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
+    if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
       if (s->cas_deallocate(p, size)) {
         // We were the last to allocate and created a fragment less than
         // a minimal object.
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -239,11 +239,11 @@
 }
 
 size_t CollectedHeap::filler_array_hdr_size() {
-  return size_t(arrayOopDesc::header_size(T_INT));
+  return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
 }
 
 size_t CollectedHeap::filler_array_min_size() {
-  return align_object_size(filler_array_hdr_size());
+  return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 }
 
 size_t CollectedHeap::filler_array_max_size() {
--- a/hotspot/src/share/vm/includeDB_compiler1	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_compiler1	Tue Jun 29 10:48:02 2010 -0700
@@ -71,8 +71,8 @@
 c1_Compilation.cpp                      c1_MacroAssembler.hpp
 c1_Compilation.cpp                      c1_ValueMap.hpp
 c1_Compilation.cpp                      c1_ValueStack.hpp
-c1_Compilation.cpp                      ciEnv.hpp
 c1_Compilation.cpp                      debugInfoRec.hpp
+c1_Compilation.hpp                      ciEnv.hpp
 c1_Compilation.hpp                      exceptionHandlerTable.hpp
 c1_Compilation.hpp                      resourceArea.hpp
 
@@ -82,6 +82,8 @@
 c1_Compiler.cpp                         c1_Compilation.hpp
 c1_Compiler.cpp                         c1_Compiler.hpp
 c1_Compiler.cpp                         c1_FrameMap.hpp
+c1_Compiler.cpp                         c1_GraphBuilder.hpp
+c1_Compiler.cpp                         c1_LinearScan.hpp
 c1_Compiler.cpp                         c1_MacroAssembler.hpp
 c1_Compiler.cpp                         c1_Runtime1.hpp
 c1_Compiler.cpp                         c1_ValueType.hpp
--- a/hotspot/src/share/vm/includeDB_core	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_core	Tue Jun 29 10:48:02 2010 -0700
@@ -827,6 +827,7 @@
 ciStreams.cpp                           ciStreams.hpp
 ciStreams.cpp                           ciUtilities.hpp
 
+ciStreams.hpp                           bytecode.hpp
 ciStreams.hpp                           ciClassList.hpp
 ciStreams.hpp                           ciExceptionHandler.hpp
 ciStreams.hpp                           ciInstanceKlass.hpp
@@ -3635,6 +3636,7 @@
 rewriter.cpp                            gcLocker.hpp
 rewriter.cpp                            generateOopMap.hpp
 rewriter.cpp                            interpreter.hpp
+rewriter.cpp                            methodComparator.hpp
 rewriter.cpp                            objArrayOop.hpp
 rewriter.cpp                            oop.inline.hpp
 rewriter.cpp                            oopFactory.hpp
--- a/hotspot/src/share/vm/interpreter/bytecode.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecode.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,12 @@
 #include "incls/_bytecode.cpp.incl"
 
 // Implementation of Bytecode
-// Should eventually get rid of these functions and use ThisRelativeObj methods instead
 
-void Bytecode::set_code(Bytecodes::Code code) {
-  Bytecodes::check(code);
-  *addr_at(0) = u_char(code);
-}
-
-
-bool Bytecode::check_must_rewrite() const {
-  assert(Bytecodes::can_rewrite(code()), "post-check only");
+bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
+  assert(Bytecodes::can_rewrite(code), "post-check only");
 
   // Some codes are conditionally rewriting.  Look closely at them.
-  switch (code()) {
+  switch (code) {
   case Bytecodes::_aload_0:
     // Even if RewriteFrequentPairs is turned on,
     // the _aload_0 code might delay its rewrite until
@@ -58,14 +51,85 @@
 }
 
 
+#ifdef ASSERT
+
+void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
+  Bytecodes::Code thisbc = Bytecodes::cast(byte_at(0));
+  if (thisbc == Bytecodes::_breakpoint)  return;  // let the assertion fail silently
+  if (is_wide) {
+    assert(thisbc == Bytecodes::_wide, "expected a wide instruction");
+    thisbc = Bytecodes::cast(byte_at(1));
+    if (thisbc == Bytecodes::_breakpoint)  return;
+  }
+  int thisflags = Bytecodes::flags(testbc, is_wide) & Bytecodes::_all_fmt_bits;
+  int testflags = Bytecodes::flags(thisbc, is_wide) & Bytecodes::_all_fmt_bits;
+  if (thisflags != testflags)
+    tty->print_cr("assert_same_format_as(%d) failed on bc=%d%s; %d != %d",
+                  (int)testbc, (int)thisbc, (is_wide?"/wide":""), testflags, thisflags);
+  assert(thisflags == testflags, "expected format");
+}
+
+void Bytecode::assert_index_size(int size, Bytecodes::Code bc, bool is_wide) {
+  int have_fmt = (Bytecodes::flags(bc, is_wide)
+                  & (Bytecodes::_fmt_has_u2 | Bytecodes::_fmt_has_u4 |
+                     Bytecodes::_fmt_not_simple |
+                     // Not an offset field:
+                     Bytecodes::_fmt_has_o));
+  int need_fmt = -1;
+  switch (size) {
+  case 1: need_fmt = 0;                      break;
+  case 2: need_fmt = Bytecodes::_fmt_has_u2; break;
+  case 4: need_fmt = Bytecodes::_fmt_has_u4; break;
+  }
+  if (is_wide)  need_fmt |= Bytecodes::_fmt_not_simple;
+  if (have_fmt != need_fmt) {
+    tty->print_cr("assert_index_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+    assert(have_fmt == need_fmt, "assert_index_size");
+  }
+}
+
+void Bytecode::assert_offset_size(int size, Bytecodes::Code bc, bool is_wide) {
+  int have_fmt = Bytecodes::flags(bc, is_wide) & Bytecodes::_all_fmt_bits;
+  int need_fmt = -1;
+  switch (size) {
+  case 2: need_fmt = Bytecodes::_fmt_bo2; break;
+  case 4: need_fmt = Bytecodes::_fmt_bo4; break;
+  }
+  if (is_wide)  need_fmt |= Bytecodes::_fmt_not_simple;
+  if (have_fmt != need_fmt) {
+    tty->print_cr("assert_offset_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+    assert(have_fmt == need_fmt, "assert_offset_size");
+  }
+}
+
+void Bytecode::assert_constant_size(int size, int where, Bytecodes::Code bc, bool is_wide) {
+  int have_fmt = Bytecodes::flags(bc, is_wide) & (Bytecodes::_all_fmt_bits
+                                                  // Ignore any 'i' field (for iinc):
+                                                  & ~Bytecodes::_fmt_has_i);
+  int need_fmt = -1;
+  switch (size) {
+  case 1: need_fmt = Bytecodes::_fmt_bc;                          break;
+  case 2: need_fmt = Bytecodes::_fmt_bc | Bytecodes::_fmt_has_u2; break;
+  }
+  if (is_wide)  need_fmt |= Bytecodes::_fmt_not_simple;
+  int length = is_wide ? Bytecodes::wide_length_for(bc) : Bytecodes::length_for(bc);
+  if (have_fmt != need_fmt || where + size != length) {
+    tty->print_cr("assert_constant_size %d @%d: bc=%d%s %d != %d", size, where, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+  }
+  assert(have_fmt == need_fmt, "assert_constant_size");
+  assert(where + size == length, "assert_constant_size oob");
+}
+
+void Bytecode::assert_native_index(Bytecodes::Code bc, bool is_wide) {
+  assert((Bytecodes::flags(bc, is_wide) & Bytecodes::_fmt_has_nbo) != 0, "native index");
+}
+
+#endif //ASSERT
 
 // Implementation of Bytecode_tableupswitch
 
 int Bytecode_tableswitch::dest_offset_at(int i) const {
-  address x = aligned_addr_at(1);
-  int x2 = aligned_offset(1 + (3 + i)*jintSize);
-  int val = java_signed_word_at(x2);
-  return java_signed_word_at(aligned_offset(1 + (3 + i)*jintSize));
+  return get_Java_u4_at(aligned_offset(1 + (3 + i)*jintSize));
 }
 
 
@@ -74,6 +138,7 @@
 void Bytecode_invoke::verify() const {
   Bytecodes::Code bc = adjusted_invoke_code();
   assert(is_valid(), "check invoke");
+  assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
 }
 
 
@@ -116,27 +181,12 @@
 int Bytecode_invoke::index() const {
   // Note:  Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
   // at the same time it allocates per-call-site CP cache entries.
-  if (has_giant_index())
-    return Bytes::get_native_u4(bcp() + 1);
+  Bytecodes::Code stdc = Bytecodes::java_code(code());
+  Bytecode* invoke = Bytecode_at(bcp());
+  if (invoke->has_index_u4(stdc))
+    return invoke->get_index_u4(stdc);
   else
-    return Bytes::get_Java_u2(bcp() + 1);
-}
-
-
-// Implementation of Bytecode_static
-
-void Bytecode_static::verify() const {
-  assert(Bytecodes::java_code(code()) == Bytecodes::_putstatic
-      || Bytecodes::java_code(code()) == Bytecodes::_getstatic, "check static");
-}
-
-
-BasicType Bytecode_static::result_type(methodOop method) const {
-  int index = java_hwrd_at(1);
-  constantPoolOop constants = method->constants();
-  symbolOop field_type = constants->signature_ref_at(index);
-  BasicType basic_type = FieldType::basic_type(field_type);
-  return basic_type;
+    return invoke->get_index_u2_cpcache(stdc);
 }
 
 
@@ -156,7 +206,8 @@
 
 
 int Bytecode_field::index() const {
-  return java_hwrd_at(1);
+  Bytecode* invoke = Bytecode_at(bcp());
+  return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
 }
 
 
@@ -164,7 +215,14 @@
 
 int Bytecode_loadconstant::index() const {
   Bytecodes::Code stdc = Bytecodes::java_code(code());
-  return stdc == Bytecodes::_ldc ? java_byte_at(1) : java_hwrd_at(1);
+  if (stdc != Bytecodes::_wide) {
+    if (Bytecodes::java_code(stdc) == Bytecodes::_ldc)
+      return get_index_u1(stdc);
+    else
+      return get_index_u2(stdc, false);
+  }
+  stdc = Bytecodes::code_at(addr_at(1));
+  return get_index_u2(stdc, true);
 }
 
 //------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,92 +26,100 @@
 // relative to an objects 'this' pointer.
 
 class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
- private:
-  int     sign_extend        (int x, int size)   const     { const int s = (BytesPerInt - size)*BitsPerByte; return (x << s) >> s; }
-
  public:
   // Address computation
   address addr_at            (int offset)        const     { return (address)this + offset; }
+  int     byte_at            (int offset)        const     { return *(addr_at(offset)); }
   address aligned_addr_at    (int offset)        const     { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
   int     aligned_offset     (int offset)        const     { return aligned_addr_at(offset) - addr_at(0); }
 
-  // Java unsigned accessors (using Java spec byte ordering)
-  int     java_byte_at       (int offset)        const     { return *(jubyte*)addr_at(offset); }
-  int     java_hwrd_at       (int offset)        const     { return java_byte_at(offset) << (1 * BitsPerByte) | java_byte_at(offset + 1); }
-  int     java_word_at       (int offset)        const     { return java_hwrd_at(offset) << (2 * BitsPerByte) | java_hwrd_at(offset + 2); }
-
-  // Java signed accessors (using Java spec byte ordering)
-  int     java_signed_byte_at(int offset)        const     { return sign_extend(java_byte_at(offset), 1); }
-  int     java_signed_hwrd_at(int offset)        const     { return sign_extend(java_hwrd_at(offset), 2); }
-  int     java_signed_word_at(int offset)        const     { return             java_word_at(offset)    ; }
-
-  // Fast accessors (using the machine's natural byte ordering)
-  int     fast_byte_at       (int offset)        const     { return *(jubyte *)addr_at(offset); }
-  int     fast_hwrd_at       (int offset)        const     { return *(jushort*)addr_at(offset); }
-  int     fast_word_at       (int offset)        const     { return *(juint  *)addr_at(offset); }
-
-  // Fast signed accessors (using the machine's natural byte ordering)
-  int     fast_signed_byte_at(int offset)        const     { return *(jbyte *)addr_at(offset); }
-  int     fast_signed_hwrd_at(int offset)        const     { return *(jshort*)addr_at(offset); }
-  int     fast_signed_word_at(int offset)        const     { return *(jint  *)addr_at(offset); }
-
-  // Fast manipulators (using the machine's natural byte ordering)
-  void    set_fast_byte_at   (int offset, int x) const     { *(jbyte *)addr_at(offset) = (jbyte )x; }
-  void    set_fast_hwrd_at   (int offset, int x) const     { *(jshort*)addr_at(offset) = (jshort)x; }
-  void    set_fast_word_at   (int offset, int x) const     { *(jint  *)addr_at(offset) = (jint  )x; }
+  // Word access:
+  int     get_Java_u2_at     (int offset)        const     { return Bytes::get_Java_u2(addr_at(offset)); }
+  int     get_Java_u4_at     (int offset)        const     { return Bytes::get_Java_u4(addr_at(offset)); }
+  int     get_native_u2_at   (int offset)        const     { return Bytes::get_native_u2(addr_at(offset)); }
+  int     get_native_u4_at   (int offset)        const     { return Bytes::get_native_u4(addr_at(offset)); }
 };
 
 
 // The base class for different kinds of bytecode abstractions.
 // Provides the primitive operations to manipulate code relative
 // to an objects 'this' pointer.
+// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
 
 class Bytecode: public ThisRelativeObj {
  protected:
   u_char byte_at(int offset) const               { return *addr_at(offset); }
-  bool check_must_rewrite() const;
+  bool check_must_rewrite(Bytecodes::Code bc) const;
 
  public:
   // Attributes
   address bcp() const                            { return addr_at(0); }
-  address next_bcp() const                       { return addr_at(0) + Bytecodes::length_at(bcp()); }
   int instruction_size() const                   { return Bytecodes::length_at(bcp()); }
 
+  // Warning: Use code() with caution on live bytecode streams.  4926272
   Bytecodes::Code code() const                   { return Bytecodes::code_at(addr_at(0)); }
   Bytecodes::Code java_code() const              { return Bytecodes::java_code(code()); }
-  bool must_rewrite() const                      { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); }
-  bool is_active_breakpoint() const              { return Bytecodes::is_active_breakpoint_at(bcp()); }
-
-  int     one_byte_index() const                 { assert_index_size(1); return byte_at(1); }
-  int     two_byte_index() const                 { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); }
-
-  int     offset() const                         { return (two_byte_index() << 16) >> 16; }
-  address destination() const                    { return bcp() + offset(); }
-
-  // Attribute modification
-  void    set_code(Bytecodes::Code code);
+  bool must_rewrite(Bytecodes::Code code) const  { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
 
   // Creation
   inline friend Bytecode* Bytecode_at(address bcp);
 
- private:
-  void assert_index_size(int required_size) const {
-#ifdef ASSERT
-    int isize = instruction_size() - 1;
-    if (isize == 2 && code() == Bytecodes::_iinc)
-      isize = 1;
-    else if (isize <= 2)
-      ;                         // no change
-    else if (code() == Bytecodes::_invokedynamic)
-      isize = 4;
-    else
-      isize = 2;
-    assert(isize = required_size, "wrong index size");
-#endif
+  // Static functions for parsing bytecodes in place.
+  int get_index_u1(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_index_size(1, bc);
+    return *(jubyte*)addr_at(1);
+  }
+  int get_index_u2(Bytecodes::Code bc, bool is_wide = false) const {
+    assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
+    address p = addr_at(is_wide ? 2 : 1);
+    if (can_use_native_byte_order(bc, is_wide))
+          return Bytes::get_native_u2(p);
+    else  return Bytes::get_Java_u2(p);
+  }
+  int get_index_u2_cpcache(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
+    return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
+  }
+  int get_index_u4(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_index_size(4, bc);
+    assert(can_use_native_byte_order(bc), "");
+    return Bytes::get_native_u4(addr_at(1));
+  }
+  bool has_index_u4(Bytecodes::Code bc) const {
+    return bc == Bytecodes::_invokedynamic;
+  }
+
+  int get_offset_s2(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_offset_size(2, bc);
+    return (jshort) Bytes::get_Java_u2(addr_at(1));
+  }
+  int get_offset_s4(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_offset_size(4, bc);
+    return (jint) Bytes::get_Java_u4(addr_at(1));
+  }
+
+  int get_constant_u1(int offset, Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_constant_size(1, offset, bc);
+    return *(jbyte*)addr_at(offset);
+  }
+  int get_constant_u2(int offset, Bytecodes::Code bc, bool is_wide = false) const {
+    assert_same_format_as(bc, is_wide); assert_constant_size(2, offset, bc, is_wide);
+    return (jshort) Bytes::get_Java_u2(addr_at(offset));
+  }
+
+  // These are used locally and also from bytecode streams.
+  void assert_same_format_as(Bytecodes::Code testbc, bool is_wide = false) const NOT_DEBUG_RETURN;
+  static void assert_index_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+  static void assert_offset_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+  static void assert_constant_size(int required_size, int where, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+  static void assert_native_index(Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+  static bool can_use_native_byte_order(Bytecodes::Code bc, bool is_wide = false) {
+    return (!Bytes::is_Java_byte_ordering_different() || Bytecodes::native_byte_order(bc /*, is_wide*/));
   }
 };
 
 inline Bytecode* Bytecode_at(address bcp) {
+  // Warning: Use with caution on live bytecode streams.  4926272
   return (Bytecode*)bcp;
 }
 
@@ -124,8 +132,8 @@
   int  _offset;
 
  public:
-  int  match() const                             { return java_signed_word_at(0 * jintSize); }
-  int  offset() const                            { return java_signed_word_at(1 * jintSize); }
+  int  match() const                             { return get_Java_u4_at(0 * jintSize); }
+  int  offset() const                            { return get_Java_u4_at(1 * jintSize); }
 };
 
 
@@ -134,8 +142,8 @@
   void verify() const PRODUCT_RETURN;
 
   // Attributes
-  int  default_offset() const                    { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
-  int  number_of_pairs() const                   { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
+  int  default_offset() const                    { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
+  int  number_of_pairs() const                   { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
   LookupswitchPair* pair_at(int i) const         { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
                                                    return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
   // Creation
@@ -154,9 +162,9 @@
   void verify() const PRODUCT_RETURN;
 
   // Attributes
-  int  default_offset() const                    { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
-  int  low_key() const                           { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
-  int  high_key() const                          { return java_signed_word_at(aligned_offset(1 + 2*jintSize)); }
+  int  default_offset() const                    { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
+  int  low_key() const                           { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
+  int  high_key() const                          { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
   int  dest_offset_at(int i) const;
   int  length()                                  { return high_key()-low_key()+1; }
 
@@ -206,7 +214,6 @@
   bool is_invokedynamic() const                  { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
 
   bool has_receiver() const                      { return !is_invokestatic() && !is_invokedynamic(); }
-  bool has_giant_index() const                   { return is_invokedynamic(); }
 
   bool is_valid() const                          { return is_invokeinterface() ||
                                                           is_invokevirtual()   ||
@@ -252,26 +259,6 @@
 }
 
 
-// Abstraction for {get,put}static
-
-class Bytecode_static: public Bytecode {
- public:
-  void verify() const;
-
-  // Returns the result type of the send by inspecting the field ref
-  BasicType result_type(methodOop method) const;
-
-  // Creation
-  inline friend Bytecode_static* Bytecode_static_at(const methodOop method, address bcp);
-};
-
-inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {
-  Bytecode_static* b = (Bytecode_static*)bcp;
-  debug_only(b->verify());
-  return b;
-}
-
-
 // Abstraction for checkcast
 
 class Bytecode_checkcast: public Bytecode {
@@ -279,7 +266,7 @@
   void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
 
   // Returns index
-  long index() const   { return java_hwrd_at(1); };
+  long index() const   { return get_index_u2(Bytecodes::_checkcast); };
 
   // Creation
   inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
@@ -299,7 +286,7 @@
   void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
 
   // Returns index
-  long index() const   { return java_hwrd_at(1); };
+  long index() const   { return get_index_u2(Bytecodes::_instanceof); };
 
   // Creation
   inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
@@ -317,7 +304,7 @@
   void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
 
   // Returns index
-  long index() const   { return java_hwrd_at(1); };
+  long index() const   { return get_index_u2(Bytecodes::_new); };
 
   // Creation
   inline friend Bytecode_new* Bytecode_new_at(address bcp);
@@ -335,7 +322,7 @@
   void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
 
   // Returns index
-  long index() const   { return java_hwrd_at(1); };
+  long index() const   { return get_index_u2(Bytecodes::_multianewarray); };
 
   // Creation
   inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
@@ -353,7 +340,7 @@
   void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
 
   // Returns index
-  long index() const   { return java_hwrd_at(1); };
+  long index() const   { return get_index_u2(Bytecodes::_anewarray); };
 
   // Creation
   inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,25 @@
       }
     }
   }
-  _code = code;
+  _raw_code = code;
   return code;
 }
+
+#ifdef ASSERT
+void BaseBytecodeStream::assert_raw_index_size(int size) const {
+  if (raw_code() == Bytecodes::_invokedynamic && is_raw()) {
+    // in raw mode, pretend indy is "bJJ__"
+    assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
+  } else {
+    bytecode()->assert_index_size(size, raw_code(), is_wide());
+  }
+}
+
+void BaseBytecodeStream::assert_raw_stream(bool want_raw) const {
+  if (want_raw) {
+    assert( is_raw(), "this function only works on raw streams");
+  } else {
+    assert(!is_raw(), "this function only works on non-raw streams");
+  }
+}
+#endif //ASSERT
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,13 @@
 // while ((c = s.next()) >= 0) {
 //   ...
 // }
-//
+
 // A RawBytecodeStream is a simple version of BytecodeStream.
 // It is used ONLY when we know the bytecodes haven't been rewritten
-// yet, such as in the rewriter or the verifier. Currently only the
-// verifier uses this class.
+// yet, such as in the rewriter or the verifier.
 
-class RawBytecodeStream: StackObj {
+// Here is the common base class for both RawBytecodeStream and BytecodeStream:
+class BaseBytecodeStream: StackObj {
  protected:
   // stream buffer
   methodHandle    _method;                       // read from method directly
@@ -49,15 +49,17 @@
   int             _end_bci;                      // bci after the current iteration interval
 
   // last bytecode read
-  Bytecodes::Code _code;
+  Bytecodes::Code _raw_code;
   bool            _is_wide;
+  bool            _is_raw;                       // false in 'cooked' BytecodeStream
+
+  // Construction
+  BaseBytecodeStream(methodHandle method) : _method(method) {
+    set_interval(0, _method->code_size());
+    _is_raw = false;
+  }
 
  public:
-  // Construction
-  RawBytecodeStream(methodHandle method) : _method(method) {
-    set_interval(0, _method->code_size());
-  }
-
   // Iteration control
   void set_interval(int beg_bci, int end_bci) {
     // iterate over the interval [beg_bci, end_bci)
@@ -72,6 +74,46 @@
     set_interval(beg_bci, _method->code_size());
   }
 
+  bool is_raw() const { return _is_raw; }
+
+  // Stream attributes
+  methodHandle    method() const                 { return _method; }
+
+  int             bci() const                    { return _bci; }
+  int             next_bci() const               { return _next_bci; }
+  int             end_bci() const                { return _end_bci; }
+
+  Bytecodes::Code raw_code() const               { return _raw_code; }
+  bool            is_wide() const                { return _is_wide; }
+  int             instruction_size() const       { return (_next_bci - _bci); }
+  bool            is_last_bytecode() const       { return _next_bci >= _end_bci; }
+
+  address         bcp() const                    { return method()->code_base() + _bci; }
+  Bytecode*       bytecode() const               { return Bytecode_at(bcp()); }
+
+  // State changes
+  void            set_next_bci(int bci)          { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
+
+  // Bytecode-specific attributes
+  int             dest() const                   { return bci() + bytecode()->get_offset_s2(raw_code()); }
+  int             dest_w() const                 { return bci() + bytecode()->get_offset_s4(raw_code()); }
+
+  // One-byte indices.
+  int             get_index_u1() const           { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
+
+ protected:
+  void assert_raw_index_size(int size) const NOT_DEBUG_RETURN;
+  void assert_raw_stream(bool want_raw) const NOT_DEBUG_RETURN;
+};
+
+class RawBytecodeStream: public BaseBytecodeStream {
+ public:
+  // Construction
+  RawBytecodeStream(methodHandle method) : BaseBytecodeStream(method) {
+    _is_raw = true;
+  }
+
+ public:
   // Iteration
   // Use raw_next() rather than next() for faster method reference
   Bytecodes::Code raw_next() {
@@ -80,7 +122,7 @@
     _bci = _next_bci;
     assert(!is_last_bytecode(), "caller should check is_last_bytecode()");
 
-    address bcp = RawBytecodeStream::bcp();
+    address bcp = this->bcp();
     code        = Bytecodes::code_or_bp_at(bcp);
 
     // set next bytecode position
@@ -90,84 +132,49 @@
              && code != Bytecodes::_lookupswitch, "can't be special bytecode");
       _is_wide = false;
       _next_bci += l;
-      _code = code;
+      _raw_code = code;
       return code;
-    } else if (code == Bytecodes::_wide && _bci + 1 >= _end_bci) {
-      return Bytecodes::_illegal;
     } else {
       return raw_next_special(code);
     }
   }
   Bytecodes::Code raw_next_special(Bytecodes::Code code);
 
-  // Stream attributes
-  methodHandle    method() const                 { return _method; }
-
-  int             bci() const                    { return _bci; }
-  int             next_bci() const               { return _next_bci; }
-  int             end_bci() const                { return _end_bci; }
-
-  Bytecodes::Code code() const                   { return _code; }
-  bool            is_wide() const                { return _is_wide; }
-  int             instruction_size() const       { return (_next_bci - _bci); }
-  bool            is_last_bytecode() const       { return _next_bci >= _end_bci; }
-
-  address         bcp() const                    { return method()->code_base() + _bci; }
-  address         next_bcp()                     { return method()->code_base() + _next_bci; }
-
-  // State changes
-  void            set_next_bci(int bci)          { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
-
-  // Bytecode-specific attributes
-  int             dest() const                   { return bci() + (short)Bytes::get_Java_u2(bcp() + 1); }
-  int             dest_w() const                 { return bci() + (int  )Bytes::get_Java_u4(bcp() + 1); }
-
-  // Unsigned indices, widening
-  int             get_index() const              { assert_index_size(is_wide() ? 2 : 1);
-                                                   return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
-  int             get_index_big() const          { assert_index_size(2);
-                                                   return (int)Bytes::get_Java_u2(bcp() + 1);  }
-  int             get_index_int() const          { return has_giant_index() ? get_index_giant() : get_index_big(); }
-  int             get_index_giant() const        { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); }
-  int             has_giant_index() const        { return (code() == Bytecodes::_invokedynamic); }
+  // Unsigned indices, widening, with no swapping of bytes
+  int             get_index() const          { return (is_wide()) ? get_index_u2_raw(bcp() + 2) : get_index_u1(); }
+  // Get an unsigned 2-byte index, with no swapping of bytes.
+  int             get_index_u2() const       { assert(!is_wide(), ""); return get_index_u2_raw(bcp() + 1);  }
 
  private:
-  void assert_index_size(int required_size) const {
-#ifdef ASSERT
-    int isize = instruction_size() - (int)_is_wide - 1;
-    if (isize == 2 && code() == Bytecodes::_iinc)
-      isize = 1;
-    else if (isize <= 2)
-      ;                         // no change
-    else if (has_giant_index())
-      isize = 4;
-    else
-      isize = 2;
-    assert(isize = required_size, "wrong index size");
-#endif
+  int get_index_u2_raw(address p) const {
+    assert_raw_index_size(2); assert_raw_stream(true);
+    return Bytes::get_Java_u2(p);
   }
 };
 
 // In BytecodeStream, non-java bytecodes will be translated into the
 // corresponding java bytecodes.
 
-class BytecodeStream: public RawBytecodeStream {
+class BytecodeStream: public BaseBytecodeStream {
+  Bytecodes::Code _code;
+
  public:
   // Construction
-  BytecodeStream(methodHandle method) : RawBytecodeStream(method) { }
+  BytecodeStream(methodHandle method) : BaseBytecodeStream(method) { }
 
   // Iteration
   Bytecodes::Code next() {
-    Bytecodes::Code code;
+    Bytecodes::Code raw_code, code;
     // set reading position
     _bci = _next_bci;
     if (is_last_bytecode()) {
       // indicate end of bytecode stream
-      code = Bytecodes::_illegal;
+      raw_code = code = Bytecodes::_illegal;
     } else {
       // get bytecode
-      address bcp = BytecodeStream::bcp();
-      code        = Bytecodes::java_code_at(bcp);
+      address bcp = this->bcp();
+      raw_code = Bytecodes::code_at(bcp);
+      code = Bytecodes::java_code(raw_code);
       // set next bytecode position
       //
       // note that we cannot advance before having the
@@ -181,14 +188,29 @@
       _is_wide      = false;
       // check for special (uncommon) cases
       if (code == Bytecodes::_wide) {
-        code = (Bytecodes::Code)bcp[1];
+        raw_code = (Bytecodes::Code)bcp[1];
+        code = raw_code;  // wide BCs are always Java-normal
         _is_wide = true;
       }
       assert(Bytecodes::is_java_code(code), "sanity check");
     }
+    _raw_code = raw_code;
     _code = code;
     return _code;
   }
 
   bool            is_active_breakpoint() const   { return Bytecodes::is_active_breakpoint_at(bcp()); }
+  Bytecodes::Code code() const                   { return _code; }
+
+  // Unsigned indices, widening
+  int             get_index() const              { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
+  // Get an unsigned 2-byte index, swapping the bytes if necessary.
+  int             get_index_u2() const           { assert_raw_stream(false);
+                                                   return bytecode()->get_index_u2(raw_code(), false); }
+  // Get an unsigned 2-byte index in native order.
+  int             get_index_u2_cpcache() const   { assert_raw_stream(false);
+                                                   return bytecode()->get_index_u2_cpcache(raw_code()); }
+  int             get_index_u4() const           { assert_raw_stream(false);
+                                                   return bytecode()->get_index_u4(raw_code()); }
+  bool            has_index_u4() const           { return bytecode()->has_index_u4(raw_code()); }
 };
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
   // (Also, ensure that occasional false positives are benign.)
   methodOop _current_method;
   bool      _is_wide;
+  Bytecodes::Code _code;
   address   _next_pc;                // current decoding position
 
   void      align()                  { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
@@ -46,23 +47,26 @@
   short     get_short()              { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
   int       get_int()                { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
 
-  int       get_index()              { return *(address)_next_pc++; }
-  int       get_big_index()          { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
-  int       get_giant_index()        { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
-  int       get_index_special()      { return (is_wide()) ? get_big_index() : get_index(); }
+  int       get_index_u1()           { return *(address)_next_pc++; }
+  int       get_index_u2()           { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+  int       get_index_u2_cpcache()   { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
+  int       get_index_u4()           { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
+  int       get_index_special()      { return (is_wide()) ? get_index_u2() : get_index_u1(); }
   methodOop method()                 { return _current_method; }
   bool      is_wide()                { return _is_wide; }
+  Bytecodes::Code raw_code()         { return Bytecodes::Code(_code); }
 
 
-  bool      check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty);
+  bool      check_index(int i, int& cp_index, outputStream* st = tty);
   void      print_constant(int i, outputStream* st = tty);
   void      print_field_or_method(int i, outputStream* st = tty);
-  void      print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty);
+  void      print_attributes(int bci, outputStream* st = tty);
   void      bytecode_epilog(int bci, outputStream* st = tty);
 
  public:
   BytecodePrinter() {
     _is_wide = false;
+    _code = Bytecodes::_illegal;
   }
 
   // This method is called while executing the raw bytecodes, so none of
@@ -89,7 +93,8 @@
     } else {
       code = Bytecodes::code_at(bcp);
     }
-    int bci = bcp - method->code_base();
+    _code = code;
+     int bci = bcp - method->code_base();
     st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
     if (Verbose) {
       st->print("%8d  %4d  " INTPTR_FORMAT " " INTPTR_FORMAT " %s",
@@ -99,10 +104,11 @@
            BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
     }
     _next_pc = is_wide() ? bcp+2 : bcp+1;
-    print_attributes(code, bci);
+    print_attributes(bci);
     // Set is_wide for the next one, since the caller of this doesn't skip
     // the next bytecode.
     _is_wide = (code == Bytecodes::_wide);
+    _code = Bytecodes::_illegal;
   }
 
   // Used for methodOop::print_codes().  The input bcp comes from
@@ -116,6 +122,7 @@
     if (is_wide()) {
       code = Bytecodes::code_at(bcp+1);
     }
+    _code = code;
     int bci = bcp - method->code_base();
     // Print bytecode index and name
     if (is_wide()) {
@@ -124,7 +131,7 @@
       st->print("%d %s", bci, Bytecodes::name(code));
     }
     _next_pc = is_wide() ? bcp+2 : bcp+1;
-    print_attributes(code, bci, st);
+    print_attributes(bci, st);
     bytecode_epilog(bci, st);
   }
 };
@@ -185,12 +192,13 @@
   }
 }
 
-bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) {
+bool BytecodePrinter::check_index(int i, int& cp_index, outputStream* st) {
   constantPoolOop constants = method()->constants();
   int ilimit = constants->length(), climit = 0;
+  Bytecodes::Code code = raw_code();
 
   constantPoolCacheOop cache = NULL;
-  if (in_cp_cache) {
+  if (Bytecodes::uses_cp_cache(code)) {
     cache = constants->cache();
     if (cache != NULL) {
       //climit = cache->length();  // %%% private!
@@ -201,7 +209,7 @@
     }
   }
 
-  if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) {
+  if (cache != NULL && constantPoolCacheOopDesc::is_secondary_index(i)) {
     i = constantPoolCacheOopDesc::decode_secondary_index(i);
     st->print(" secondary cache[%d] of", i);
     if (i >= 0 && i < climit) {
@@ -218,8 +226,6 @@
   }
 
   if (cache != NULL) {
-    i = Bytes::swap_u2(i);
-    if (WizardMode)  st->print(" (swap=%d)", i);
     goto check_cache_index;
   }
 
@@ -234,6 +240,17 @@
   return false;
 
  check_cache_index:
+#ifdef ASSERT
+  {
+    const int CPCACHE_INDEX_TAG = constantPoolOopDesc::CPCACHE_INDEX_TAG;
+    if (i >= CPCACHE_INDEX_TAG && i < climit + CPCACHE_INDEX_TAG) {
+      i -= CPCACHE_INDEX_TAG;
+    } else {
+      st->print_cr(" CP[%d] missing bias?", i);
+      return false;
+    }
+  }
+#endif //ASSERT
   if (i >= 0 && i < climit) {
     if (cache->entry_at(i)->is_secondary_entry()) {
       st->print_cr(" secondary entry?");
@@ -248,7 +265,7 @@
 
 void BytecodePrinter::print_constant(int i, outputStream* st) {
   int orig_i = i;
-  if (!check_index(orig_i, false, i, st))  return;
+  if (!check_index(orig_i, i, st))  return;
 
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
@@ -279,7 +296,7 @@
 
 void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
   int orig_i = i;
-  if (!check_index(orig_i, true, i, st))  return;
+  if (!check_index(orig_i, i, st))  return;
 
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
@@ -303,9 +320,9 @@
 }
 
 
-void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStream* st) {
+void BytecodePrinter::print_attributes(int bci, outputStream* st) {
   // Show attributes of pre-rewritten codes
-  code = Bytecodes::java_code(code);
+  Bytecodes::Code code = Bytecodes::java_code(raw_code());
   // If the code doesn't have any fields there's nothing to print.
   // note this is ==1 because the tableswitch and lookupswitch are
   // zero size (for some reason) and we want to print stuff out for them.
@@ -323,12 +340,12 @@
       st->print_cr(" " INT32_FORMAT, get_short());
       break;
     case Bytecodes::_ldc:
-      print_constant(get_index(), st);
+      print_constant(get_index_u1(), st);
       break;
 
     case Bytecodes::_ldc_w:
     case Bytecodes::_ldc2_w:
-      print_constant(get_big_index(), st);
+      print_constant(get_index_u2(), st);
       break;
 
     case Bytecodes::_iload:
@@ -352,7 +369,7 @@
       break;
 
     case Bytecodes::_newarray: {
-        BasicType atype = (BasicType)get_index();
+        BasicType atype = (BasicType)get_index_u1();
         const char* str = type2name(atype);
         if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) {
           assert(false, "Unidentified basic type");
@@ -361,15 +378,15 @@
       }
       break;
     case Bytecodes::_anewarray: {
-        int klass_index = get_big_index();
+        int klass_index = get_index_u2();
         constantPoolOop constants = method()->constants();
         symbolOop name = constants->klass_name_at(klass_index);
         st->print_cr(" %s ", name->as_C_string());
       }
       break;
     case Bytecodes::_multianewarray: {
-        int klass_index = get_big_index();
-        int nof_dims = get_index();
+        int klass_index = get_index_u2();
+        int nof_dims = get_index_u1();
         constantPoolOop constants = method()->constants();
         symbolOop name = constants->klass_name_at(klass_index);
         st->print_cr(" %s %d", name->as_C_string(), nof_dims);
@@ -451,31 +468,31 @@
     case Bytecodes::_getstatic:
     case Bytecodes::_putfield:
     case Bytecodes::_getfield:
-      print_field_or_method(get_big_index(), st);
+      print_field_or_method(get_index_u2_cpcache(), st);
       break;
 
     case Bytecodes::_invokevirtual:
     case Bytecodes::_invokespecial:
     case Bytecodes::_invokestatic:
-      print_field_or_method(get_big_index(), st);
+      print_field_or_method(get_index_u2_cpcache(), st);
       break;
 
     case Bytecodes::_invokeinterface:
-      { int i = get_big_index();
-        int n = get_index();
-        get_index();            // ignore zero byte
+      { int i = get_index_u2_cpcache();
+        int n = get_index_u1();
+        get_byte();            // ignore zero byte
         print_field_or_method(i, st);
       }
       break;
 
     case Bytecodes::_invokedynamic:
-      print_field_or_method(get_giant_index(), st);
+      print_field_or_method(get_index_u4(), st);
       break;
 
     case Bytecodes::_new:
     case Bytecodes::_checkcast:
     case Bytecodes::_instanceof:
-      { int i = get_big_index();
+      { int i = get_index_u2();
         constantPoolOop constants = method()->constants();
         symbolOop name = constants->klass_name_at(i);
         st->print_cr(" %d <%s>", i, name->as_C_string());
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,14 +37,11 @@
 
 bool            Bytecodes::_is_initialized = false;
 const char*     Bytecodes::_name          [Bytecodes::number_of_codes];
-const char*     Bytecodes::_format        [Bytecodes::number_of_codes];
-const char*     Bytecodes::_wide_format   [Bytecodes::number_of_codes];
 BasicType       Bytecodes::_result_type   [Bytecodes::number_of_codes];
 s_char          Bytecodes::_depth         [Bytecodes::number_of_codes];
-u_char          Bytecodes::_length        [Bytecodes::number_of_codes];
-bool            Bytecodes::_can_trap      [Bytecodes::number_of_codes];
+u_char          Bytecodes::_lengths       [Bytecodes::number_of_codes];
 Bytecodes::Code Bytecodes::_java_code     [Bytecodes::number_of_codes];
-bool            Bytecodes::_can_rewrite   [Bytecodes::number_of_codes];
+u_short         Bytecodes::_flags         [(1<<BitsPerByte)*2];
 
 
 Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
@@ -91,6 +88,7 @@
       return (len > 0 && len == (int)len) ? len : -1;
     }
   }
+  // Note: Length functions must return <=0 for invalid bytecodes.
   return 0;
 }
 
@@ -124,15 +122,22 @@
 
 void Bytecodes::def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code) {
   assert(wide_format == NULL || format != NULL, "short form must exist if there's a wide form");
+  int len  = (format      != NULL ? (int) strlen(format)      : 0);
+  int wlen = (wide_format != NULL ? (int) strlen(wide_format) : 0);
   _name          [code] = name;
-  _format        [code] = format;
-  _wide_format   [code] = wide_format;
   _result_type   [code] = result_type;
   _depth         [code] = depth;
-  _can_trap      [code] = can_trap;
-  _length        [code] = format != NULL ? (u_char)strlen(format) : 0;
+  _lengths       [code] = (wlen << 4) | (len & 0xF);
   _java_code     [code] = java_code;
-  if (java_code != code)  _can_rewrite[java_code] = true;
+  int bc_flags = 0;
+  if (can_trap)           bc_flags |= _bc_can_trap;
+  if (java_code != code)  bc_flags |= _bc_can_rewrite;
+  _flags[(u1)code+0*(1<<BitsPerByte)] = compute_flags(format,      bc_flags);
+  _flags[(u1)code+1*(1<<BitsPerByte)] = compute_flags(wide_format, bc_flags);
+  assert(is_defined(code)      == (format != NULL),      "");
+  assert(wide_is_defined(code) == (wide_format != NULL), "");
+  assert(length_for(code)      == len, "");
+  assert(wide_length_for(code) == wlen, "");
 }
 
 
@@ -140,23 +145,92 @@
 //
 // b: bytecode
 // c: signed constant, Java byte-ordering
-// i: unsigned index , Java byte-ordering
-// j: unsigned index , native byte-ordering
-// o: branch offset  , Java byte-ordering
+// i: unsigned local index, Java byte-ordering (I = native byte ordering)
+// j: unsigned CP cache index, Java byte-ordering (J = native byte ordering)
+// k: unsigned CP index, Java byte-ordering
+// o: branch offset, Java byte-ordering
 // _: unused/ignored
 // w: wide bytecode
 //
-// Note: Right now the format strings are used for 2 purposes:
+// Note: The format strings are used for 2 purposes:
 //       1. to specify the length of the bytecode
 //          (= number of characters in format string)
-//       2. to specify the bytecode attributes
-//
-//       The bytecode attributes are currently used only for bytecode tracing
-//       (see BytecodeTracer); thus if more specific format information is
-//       used, one would also have to adjust the bytecode tracer.
+//       2. to derive bytecode format flags (_fmt_has_k, etc.)
 //
 // Note: For bytecodes with variable length, the format string is the empty string.
 
+int Bytecodes::compute_flags(const char* format, int more_flags) {
+  if (format == NULL)  return 0;  // not even more_flags
+  int flags = more_flags;
+  const char* fp = format;
+  switch (*fp) {
+  case '\0':
+    flags |= _fmt_not_simple; // but variable
+    break;
+  case 'b':
+    flags |= _fmt_not_variable;  // but simple
+    ++fp;  // skip 'b'
+    break;
+  case 'w':
+    flags |= _fmt_not_variable | _fmt_not_simple;
+    ++fp;  // skip 'w'
+    guarantee(*fp == 'b', "wide format must start with 'wb'");
+    ++fp;  // skip 'b'
+    break;
+  }
+
+  int has_nbo = 0, has_jbo = 0, has_size = 0;
+  for (;;) {
+    int this_flag = 0;
+    char fc = *fp++;
+    switch (fc) {
+    case '\0':  // end of string
+      assert(flags == (jchar)flags, "change _format_flags");
+      return flags;
+
+    case '_': continue;         // ignore these
+
+    case 'j': this_flag = _fmt_has_j; has_jbo = 1; break;
+    case 'k': this_flag = _fmt_has_k; has_jbo = 1; break;
+    case 'i': this_flag = _fmt_has_i; has_jbo = 1; break;
+    case 'c': this_flag = _fmt_has_c; has_jbo = 1; break;
+    case 'o': this_flag = _fmt_has_o; has_jbo = 1; break;
+
+    // uppercase versions mark native byte order (from Rewriter)
+    // actually, only the 'J' case happens currently
+    case 'J': this_flag = _fmt_has_j; has_nbo = 1; break;
+    case 'K': this_flag = _fmt_has_k; has_nbo = 1; break;
+    case 'I': this_flag = _fmt_has_i; has_nbo = 1; break;
+    case 'C': this_flag = _fmt_has_c; has_nbo = 1; break;
+    case 'O': this_flag = _fmt_has_o; has_nbo = 1; break;
+    default:  guarantee(false, "bad char in format");
+    }
+
+    flags |= this_flag;
+
+    guarantee(!(has_jbo && has_nbo), "mixed byte orders in format");
+    if (has_nbo)
+      flags |= _fmt_has_nbo;
+
+    int this_size = 1;
+    if (*fp == fc) {
+      // advance beyond run of the same characters
+      this_size = 2;
+      while (*++fp == fc)  this_size++;
+      switch (this_size) {
+      case 2: flags |= _fmt_has_u2; break;
+      case 4: flags |= _fmt_has_u4; break;
+      default: guarantee(false, "bad rep count in format");
+      }
+    }
+    guarantee(has_size == 0 ||                     // no field yet
+              this_size == has_size ||             // same size
+              this_size < has_size && *fp == '\0', // last field can be short
+              "mixed field sizes in format");
+    has_size = this_size;
+  }
+}
+
 void Bytecodes::initialize() {
   if (_is_initialized) return;
   assert(number_of_codes <= 256, "too many bytecodes");
@@ -191,9 +265,9 @@
   def(_dconst_1            , "dconst_1"            , "b"    , NULL    , T_DOUBLE ,  2, false);
   def(_bipush              , "bipush"              , "bc"   , NULL    , T_INT    ,  1, false);
   def(_sipush              , "sipush"              , "bcc"  , NULL    , T_INT    ,  1, false);
-  def(_ldc                 , "ldc"                 , "bi"   , NULL    , T_ILLEGAL,  1, true );
-  def(_ldc_w               , "ldc_w"               , "bii"  , NULL    , T_ILLEGAL,  1, true );
-  def(_ldc2_w              , "ldc2_w"              , "bii"  , NULL    , T_ILLEGAL,  2, true );
+  def(_ldc                 , "ldc"                 , "bk"   , NULL    , T_ILLEGAL,  1, true );
+  def(_ldc_w               , "ldc_w"               , "bkk"  , NULL    , T_ILLEGAL,  1, true );
+  def(_ldc2_w              , "ldc2_w"              , "bkk"  , NULL    , T_ILLEGAL,  2, true );
   def(_iload               , "iload"               , "bi"   , "wbii"  , T_INT    ,  1, false);
   def(_lload               , "lload"               , "bi"   , "wbii"  , T_LONG   ,  2, false);
   def(_fload               , "fload"               , "bi"   , "wbii"  , T_FLOAT  ,  1, false);
@@ -351,26 +425,26 @@
   def(_dreturn             , "dreturn"             , "b"    , NULL    , T_DOUBLE , -2, true);
   def(_areturn             , "areturn"             , "b"    , NULL    , T_OBJECT , -1, true);
   def(_return              , "return"              , "b"    , NULL    , T_VOID   ,  0, true);
-  def(_getstatic           , "getstatic"           , "bjj"  , NULL    , T_ILLEGAL,  1, true );
-  def(_putstatic           , "putstatic"           , "bjj"  , NULL    , T_ILLEGAL, -1, true );
-  def(_getfield            , "getfield"            , "bjj"  , NULL    , T_ILLEGAL,  0, true );
-  def(_putfield            , "putfield"            , "bjj"  , NULL    , T_ILLEGAL, -2, true );
-  def(_invokevirtual       , "invokevirtual"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
-  def(_invokespecial       , "invokespecial"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
-  def(_invokestatic        , "invokestatic"        , "bjj"  , NULL    , T_ILLEGAL,  0, true);
-  def(_invokeinterface     , "invokeinterface"     , "bjj__", NULL    , T_ILLEGAL, -1, true);
-  def(_invokedynamic       , "invokedynamic"       , "bjjjj", NULL    , T_ILLEGAL,  0, true );
-  def(_new                 , "new"                 , "bii"  , NULL    , T_OBJECT ,  1, true );
+  def(_getstatic           , "getstatic"           , "bJJ"  , NULL    , T_ILLEGAL,  1, true );
+  def(_putstatic           , "putstatic"           , "bJJ"  , NULL    , T_ILLEGAL, -1, true );
+  def(_getfield            , "getfield"            , "bJJ"  , NULL    , T_ILLEGAL,  0, true );
+  def(_putfield            , "putfield"            , "bJJ"  , NULL    , T_ILLEGAL, -2, true );
+  def(_invokevirtual       , "invokevirtual"       , "bJJ"  , NULL    , T_ILLEGAL, -1, true);
+  def(_invokespecial       , "invokespecial"       , "bJJ"  , NULL    , T_ILLEGAL, -1, true);
+  def(_invokestatic        , "invokestatic"        , "bJJ"  , NULL    , T_ILLEGAL,  0, true);
+  def(_invokeinterface     , "invokeinterface"     , "bJJ__", NULL    , T_ILLEGAL, -1, true);
+  def(_invokedynamic       , "invokedynamic"       , "bJJJJ", NULL    , T_ILLEGAL,  0, true );
+  def(_new                 , "new"                 , "bkk"  , NULL    , T_OBJECT ,  1, true );
   def(_newarray            , "newarray"            , "bc"   , NULL    , T_OBJECT ,  0, true );
-  def(_anewarray           , "anewarray"           , "bii"  , NULL    , T_OBJECT ,  0, true );
+  def(_anewarray           , "anewarray"           , "bkk"  , NULL    , T_OBJECT ,  0, true );
   def(_arraylength         , "arraylength"         , "b"    , NULL    , T_VOID   ,  0, true );
   def(_athrow              , "athrow"              , "b"    , NULL    , T_VOID   , -1, true );
-  def(_checkcast           , "checkcast"           , "bii"  , NULL    , T_OBJECT ,  0, true );
-  def(_instanceof          , "instanceof"          , "bii"  , NULL    , T_INT    ,  0, true );
+  def(_checkcast           , "checkcast"           , "bkk"  , NULL    , T_OBJECT ,  0, true );
+  def(_instanceof          , "instanceof"          , "bkk"  , NULL    , T_INT    ,  0, true );
   def(_monitorenter        , "monitorenter"        , "b"    , NULL    , T_VOID   , -1, true );
   def(_monitorexit         , "monitorexit"         , "b"    , NULL    , T_VOID   , -1, true );
   def(_wide                , "wide"                , ""     , NULL    , T_VOID   ,  0, false);
-  def(_multianewarray      , "multianewarray"      , "biic" , NULL    , T_OBJECT ,  1, true );
+  def(_multianewarray      , "multianewarray"      , "bkkc" , NULL    , T_OBJECT ,  1, true );
   def(_ifnull              , "ifnull"              , "boo"  , NULL    , T_VOID   , -1, false);
   def(_ifnonnull           , "ifnonnull"           , "boo"  , NULL    , T_VOID   , -1, false);
   def(_goto_w              , "goto_w"              , "boooo", NULL    , T_VOID   ,  0, false);
@@ -380,35 +454,35 @@
   //  JVM bytecodes
   //  bytecode               bytecode name           format   wide f.   result tp  stk traps  std code
 
-  def(_fast_agetfield      , "fast_agetfield"      , "bjj"  , NULL    , T_OBJECT ,  0, true , _getfield       );
-  def(_fast_bgetfield      , "fast_bgetfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _getfield       );
-  def(_fast_cgetfield      , "fast_cgetfield"      , "bjj"  , NULL    , T_CHAR   ,  0, true , _getfield       );
-  def(_fast_dgetfield      , "fast_dgetfield"      , "bjj"  , NULL    , T_DOUBLE ,  0, true , _getfield       );
-  def(_fast_fgetfield      , "fast_fgetfield"      , "bjj"  , NULL    , T_FLOAT  ,  0, true , _getfield       );
-  def(_fast_igetfield      , "fast_igetfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _getfield       );
-  def(_fast_lgetfield      , "fast_lgetfield"      , "bjj"  , NULL    , T_LONG   ,  0, true , _getfield       );
-  def(_fast_sgetfield      , "fast_sgetfield"      , "bjj"  , NULL    , T_SHORT  ,  0, true , _getfield       );
+  def(_fast_agetfield      , "fast_agetfield"      , "bJJ"  , NULL    , T_OBJECT ,  0, true , _getfield       );
+  def(_fast_bgetfield      , "fast_bgetfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _getfield       );
+  def(_fast_cgetfield      , "fast_cgetfield"      , "bJJ"  , NULL    , T_CHAR   ,  0, true , _getfield       );
+  def(_fast_dgetfield      , "fast_dgetfield"      , "bJJ"  , NULL    , T_DOUBLE ,  0, true , _getfield       );
+  def(_fast_fgetfield      , "fast_fgetfield"      , "bJJ"  , NULL    , T_FLOAT  ,  0, true , _getfield       );
+  def(_fast_igetfield      , "fast_igetfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _getfield       );
+  def(_fast_lgetfield      , "fast_lgetfield"      , "bJJ"  , NULL    , T_LONG   ,  0, true , _getfield       );
+  def(_fast_sgetfield      , "fast_sgetfield"      , "bJJ"  , NULL    , T_SHORT  ,  0, true , _getfield       );
 
-  def(_fast_aputfield      , "fast_aputfield"      , "bjj"  , NULL    , T_OBJECT ,  0, true , _putfield       );
-  def(_fast_bputfield      , "fast_bputfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _putfield       );
-  def(_fast_cputfield      , "fast_cputfield"      , "bjj"  , NULL    , T_CHAR   ,  0, true , _putfield       );
-  def(_fast_dputfield      , "fast_dputfield"      , "bjj"  , NULL    , T_DOUBLE ,  0, true , _putfield       );
-  def(_fast_fputfield      , "fast_fputfield"      , "bjj"  , NULL    , T_FLOAT  ,  0, true , _putfield       );
-  def(_fast_iputfield      , "fast_iputfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _putfield       );
-  def(_fast_lputfield      , "fast_lputfield"      , "bjj"  , NULL    , T_LONG   ,  0, true , _putfield       );
-  def(_fast_sputfield      , "fast_sputfield"      , "bjj"  , NULL    , T_SHORT  ,  0, true , _putfield       );
+  def(_fast_aputfield      , "fast_aputfield"      , "bJJ"  , NULL    , T_OBJECT ,  0, true , _putfield       );
+  def(_fast_bputfield      , "fast_bputfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _putfield       );
+  def(_fast_cputfield      , "fast_cputfield"      , "bJJ"  , NULL    , T_CHAR   ,  0, true , _putfield       );
+  def(_fast_dputfield      , "fast_dputfield"      , "bJJ"  , NULL    , T_DOUBLE ,  0, true , _putfield       );
+  def(_fast_fputfield      , "fast_fputfield"      , "bJJ"  , NULL    , T_FLOAT  ,  0, true , _putfield       );
+  def(_fast_iputfield      , "fast_iputfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _putfield       );
+  def(_fast_lputfield      , "fast_lputfield"      , "bJJ"  , NULL    , T_LONG   ,  0, true , _putfield       );
+  def(_fast_sputfield      , "fast_sputfield"      , "bJJ"  , NULL    , T_SHORT  ,  0, true , _putfield       );
 
   def(_fast_aload_0        , "fast_aload_0"        , "b"    , NULL    , T_OBJECT ,  1, true , _aload_0        );
-  def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_jj" , NULL    , T_INT    ,  1, true , _aload_0        );
-  def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_jj" , NULL    , T_OBJECT ,  1, true , _aload_0        );
-  def(_fast_faccess_0      , "fast_faccess_0"      , "b_jj" , NULL    , T_OBJECT ,  1, true , _aload_0        );
+  def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_JJ" , NULL    , T_INT    ,  1, true , _aload_0        );
+  def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_JJ" , NULL    , T_OBJECT ,  1, true , _aload_0        );
+  def(_fast_faccess_0      , "fast_faccess_0"      , "b_JJ" , NULL    , T_OBJECT ,  1, true , _aload_0        );
 
   def(_fast_iload          , "fast_iload"          , "bi"   , NULL    , T_INT    ,  1, false, _iload);
   def(_fast_iload2         , "fast_iload2"         , "bi_i" , NULL    , T_INT    ,  2, false, _iload);
   def(_fast_icaload        , "fast_icaload"        , "bi_"  , NULL    , T_INT    ,  0, false, _iload);
 
   // Faster method invocation.
-  def(_fast_invokevfinal   , "fast_invokevfinal"   , "bjj"  , NULL    , T_ILLEGAL, -1, true, _invokevirtual   );
+  def(_fast_invokevfinal   , "fast_invokevfinal"   , "bJJ"  , NULL    , T_ILLEGAL, -1, true, _invokevirtual   );
 
   def(_fast_linearswitch   , "fast_linearswitch"   , ""     , NULL    , T_VOID   , -1, false, _lookupswitch   );
   def(_fast_binaryswitch   , "fast_binaryswitch"   , ""     , NULL    , T_VOID   , -1, false, _lookupswitch   );
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,17 +280,43 @@
     number_of_codes
   };
 
+  // Flag bits derived from format strings, can_trap, can_rewrite, etc.:
+  enum Flags {
+    // semantic flags:
+    _bc_can_trap      = 1<<0,     // bytecode execution can trap or block
+    _bc_can_rewrite   = 1<<1,     // bytecode execution has an alternate form
+
+    // format bits (determined only by the format string):
+    _fmt_has_c        = 1<<2,     // constant, such as sipush "bcc"
+    _fmt_has_j        = 1<<3,     // constant pool cache index, such as getfield "bjj"
+    _fmt_has_k        = 1<<4,     // constant pool index, such as ldc "bk"
+    _fmt_has_i        = 1<<5,     // local index, such as iload
+    _fmt_has_o        = 1<<6,     // offset, such as ifeq
+    _fmt_has_nbo      = 1<<7,     // contains native-order field(s)
+    _fmt_has_u2       = 1<<8,     // contains double-byte field(s)
+    _fmt_has_u4       = 1<<9,     // contains quad-byte field
+    _fmt_not_variable = 1<<10,    // not of variable length (simple or wide)
+    _fmt_not_simple   = 1<<11,    // either wide or variable length
+    _all_fmt_bits     = (_fmt_not_simple*2 - _fmt_has_c),
+
+    // Example derived format syndromes:
+    _fmt_b      = _fmt_not_variable,
+    _fmt_bc     = _fmt_b | _fmt_has_c,
+    _fmt_bi     = _fmt_b | _fmt_has_i,
+    _fmt_bkk    = _fmt_b | _fmt_has_k | _fmt_has_u2,
+    _fmt_bJJ    = _fmt_b | _fmt_has_j | _fmt_has_u2 | _fmt_has_nbo,
+    _fmt_bo2    = _fmt_b | _fmt_has_o | _fmt_has_u2,
+    _fmt_bo4    = _fmt_b | _fmt_has_o | _fmt_has_u4
+  };
+
  private:
   static bool        _is_initialized;
   static const char* _name          [number_of_codes];
-  static const char* _format        [number_of_codes];
-  static const char* _wide_format   [number_of_codes];
   static BasicType   _result_type   [number_of_codes];
   static s_char      _depth         [number_of_codes];
-  static u_char      _length        [number_of_codes];
-  static bool        _can_trap      [number_of_codes];
+  static u_char      _lengths       [number_of_codes];
   static Code        _java_code     [number_of_codes];
-  static bool        _can_rewrite   [number_of_codes];
+  static jchar       _flags         [(1<<BitsPerByte)*2]; // all second page for wide formats
 
   static void        def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap);
   static void        def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code);
@@ -322,24 +348,20 @@
    static Code       non_breakpoint_code_at(address bcp, methodOop method = NULL);
 
   // Bytecode attributes
-  static bool        is_defined     (int  code)    { return 0 <= code && code < number_of_codes && _format[code] != NULL; }
-  static bool        wide_is_defined(int  code)    { return is_defined(code) && _wide_format[code] != NULL; }
+  static bool        is_defined     (int  code)    { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
+  static bool        wide_is_defined(int  code)    { return is_defined(code) && flags(code, true) != 0; }
   static const char* name           (Code code)    { check(code);      return _name          [code]; }
-  static const char* format         (Code code)    { check(code);      return _format        [code]; }
-  static const char* wide_format    (Code code)    { return _wide_format[code]; }
   static BasicType   result_type    (Code code)    { check(code);      return _result_type   [code]; }
   static int         depth          (Code code)    { check(code);      return _depth         [code]; }
-  static int         length_for     (Code code)    { return _length[code]; }
-  static bool        can_trap       (Code code)    { check(code);      return _can_trap      [code]; }
+  // Note: Length functions must return <=0 for invalid bytecodes.
+  // Calling check(code) in length functions would throw an unwanted assert.
+  static int         length_for     (Code code)    { /*no check*/      return _lengths       [code] & 0xF; }
+  static int         wide_length_for(Code code)    { /*no check*/      return _lengths       [code] >> 4; }
+  static bool        can_trap       (Code code)    { check(code);      return has_all_flags(code, _bc_can_trap, false); }
   static Code        java_code      (Code code)    { check(code);      return _java_code     [code]; }
-  static bool        can_rewrite    (Code code)    { check(code);      return _can_rewrite   [code]; }
-  static int         wide_length_for(Code code)    {
-    if (!is_defined(code)) {
-      return 0;
-    }
-    const char* wf = wide_format(code);
-    return (wf == NULL) ? 0 : (int)strlen(wf);
-  }
+  static bool        can_rewrite    (Code code)    { check(code);      return has_all_flags(code, _bc_can_rewrite, false); }
+  static bool        native_byte_order(Code code)  { check(code);      return has_all_flags(code, _fmt_has_nbo, false); }
+  static bool        uses_cp_cache  (Code code)    { check(code);      return has_all_flags(code, _fmt_has_j, false); }
   // if 'end' is provided, it indicates the end of the code buffer which
   // should not be read past when parsing.
   static int         special_length_at(address bcp, address end = NULL);
@@ -355,6 +377,16 @@
 
   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
                                                            || code == _fconst_0 || code == _dconst_0); }
+  static int         compute_flags  (const char* format, int more_flags = 0);  // compute the flags
+  static int         flags          (int code, bool is_wide) {
+    assert(code == (u_char)code, "must be a byte");
+    return _flags[code + (is_wide ? (1<<BitsPerByte) : 0)];
+  }
+  static int         format_bits    (Code code, bool is_wide) { return flags(code, is_wide) & _all_fmt_bits; }
+  static bool        has_all_flags  (Code code, int test_flags, bool is_wide) {
+    return (flags(code, is_wide) & test_flags) == test_flags;
+  }
+
   // Initialization
   static void        initialize     ();
 };
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -226,8 +226,9 @@
 // not yet been executed (in Java semantics, not in actual operation).
 bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
   address bcp = method->bcp_from(bci);
+  Bytecodes::Code code = Bytecodes::code_at(bcp, method());
 
-  if (!Bytecode_at(bcp)->must_rewrite()) {
+  if (!Bytecode_at(bcp)->must_rewrite(code)) {
     // might have been reached
     return false;
   }
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -63,7 +63,7 @@
 IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
   // access constant pool
   constantPoolOop pool = method(thread)->constants();
-  int index = wide ? two_byte_index(thread) : one_byte_index(thread);
+  int index = wide ? get_index_u2(thread, Bytecodes::_ldc_w) : get_index_u1(thread, Bytecodes::_ldc);
   constantTag tag = pool->tag_at(index);
 
   if (tag.is_unresolved_klass() || tag.is_klass()) {
@@ -135,7 +135,7 @@
 IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
   // We may want to pass in more arguments - could make this slightly faster
   constantPoolOop constants = method(thread)->constants();
-  int          i = two_byte_index(thread);
+  int          i = get_index_u2(thread, Bytecodes::_multianewarray);
   klassOop klass = constants->klass_at(i, CHECK);
   int   nof_dims = number_of_dimensions(thread);
   assert(oop(klass)->is_klass(), "not a class");
@@ -169,7 +169,7 @@
 // Quicken instance-of and check-cast bytecodes
 IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
   // Force resolving; quicken the bytecode
-  int which = two_byte_index(thread);
+  int which = get_index_u2(thread, Bytecodes::_checkcast);
   constantPoolOop cpool = method(thread)->constants();
   // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
   // program we might have seen an unquick'd bytecode in the interpreter but have another
@@ -463,7 +463,7 @@
 
   {
     JvmtiHideSingleStepping jhss(thread);
-    LinkResolver::resolve_field(info, pool, two_byte_index(thread),
+    LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
                                 bytecode, false, CHECK);
   } // end JvmtiHideSingleStepping
 
@@ -634,7 +634,7 @@
   {
     JvmtiHideSingleStepping jhss(thread);
     LinkResolver::resolve_invoke(info, receiver, pool,
-                                 two_byte_index(thread), bytecode, CHECK);
+                                 get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
     if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
       int retry_count = 0;
       while (info.resolved_method()->is_old()) {
@@ -645,7 +645,7 @@
                   "Could not resolve to latest version of redefined method");
         // method is redefined in the middle of resolve so re-try.
         LinkResolver::resolve_invoke(info, receiver, pool,
-                                     two_byte_index(thread), bytecode, CHECK);
+                                     get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
       }
     }
   } // end JvmtiHideSingleStepping
@@ -704,7 +704,7 @@
     caller_bci = caller_method->bci_from(caller_bcp);
     site_index = Bytes::get_native_u4(caller_bcp+1);
   }
-  assert(site_index == four_byte_index(thread), "");
+  assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
   assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
   // there is a second CPC entries that is of interest; it caches signature info:
   int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,9 +40,13 @@
     return Bytecodes::code_at(bcp(thread), method(thread));
   }
   static bool      already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
-  static int       one_byte_index(JavaThread *thread)   { return bcp(thread)[1]; }
-  static int       two_byte_index(JavaThread *thread)   { return Bytes::get_Java_u2(bcp(thread) + 1); }
-  static int       four_byte_index(JavaThread *thread)  { return Bytes::get_native_u4(bcp(thread) + 1); }
+  static Bytecode* bytecode(JavaThread *thread)      { return Bytecode_at(bcp(thread)); }
+  static int       get_index_u1(JavaThread *thread, Bytecodes::Code bc)
+                                                        { return bytecode(thread)->get_index_u1(bc); }
+  static int       get_index_u2(JavaThread *thread, Bytecodes::Code bc)
+                                                        { return bytecode(thread)->get_index_u2(bc); }
+  static int       get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
+                                                        { return bytecode(thread)->get_index_u2_cpcache(bc); }
   static int       number_of_dimensions(JavaThread *thread)  { return bcp(thread)[3]; }
 
   static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i)  { return method(thread)->constants()->cache()->entry_at(i); }
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,16 +103,15 @@
 
 
 // Rewrite a classfile-order CP index into a native-order CPC index.
-int Rewriter::rewrite_member_reference(address bcp, int offset) {
+void Rewriter::rewrite_member_reference(address bcp, int offset) {
   address p = bcp + offset;
   int  cp_index    = Bytes::get_Java_u2(p);
   int  cache_index = cp_entry_to_cp_cache(cp_index);
   Bytes::put_native_u2(p, cache_index);
-  return cp_index;
 }
 
 
-void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
+void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
   address p = bcp + offset;
   assert(p[-1] == Bytecodes::_invokedynamic, "");
   int cp_index = Bytes::get_Java_u2(p);
@@ -178,7 +177,7 @@
         case Bytecodes::_lookupswitch   : {
 #ifndef CC_INTERP
           Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
-          bc->set_code(
+          (*bcp) = (
             bc->number_of_pairs() < BinarySwitchThreshold
             ? Bytecodes::_fast_linearswitch
             : Bytecodes::_fast_binaryswitch
@@ -197,7 +196,7 @@
           rewrite_member_reference(bcp, prefix_length+1);
           break;
         case Bytecodes::_invokedynamic:
-          rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME"));
+          rewrite_invokedynamic(bcp, prefix_length+1);
           break;
         case Bytecodes::_jsr            : // fall through
         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
@@ -308,5 +307,19 @@
 
     // Set up method entry points for compiler and interpreter.
     m->link_method(m, CHECK);
+
+#ifdef ASSERT
+    if (StressMethodComparator) {
+      static int nmc = 0;
+      for (int j = i; j >= 0 && j >= i-4; j--) {
+        if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
+        bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
+        if (j == i && !z) {
+          tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
+          assert(z, "method must compare equal to itself");
+        }
+      }
+    }
+#endif //ASSERT
   }
 }
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,8 +64,8 @@
   void scan_method(methodOop m);
   methodHandle rewrite_jsrs(methodHandle m, TRAPS);
   void rewrite_Object_init(methodHandle m, TRAPS);
-  int  rewrite_member_reference(address bcp, int offset);
-  void rewrite_invokedynamic(address bcp, int offset, int cp_index);
+  void rewrite_member_reference(address bcp, int offset);
+  void rewrite_invokedynamic(address bcp, int offset);
 
  public:
   // Driver routine:
--- a/hotspot/src/share/vm/interpreter/templateTable.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/templateTable.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -434,15 +434,15 @@
   def(Bytecodes::_dreturn             , ____|disp|clvm|____, dtos, dtos, _return             , dtos         );
   def(Bytecodes::_areturn             , ____|disp|clvm|____, atos, atos, _return             , atos         );
   def(Bytecodes::_return              , ____|disp|clvm|____, vtos, vtos, _return             , vtos         );
-  def(Bytecodes::_getstatic           , ubcp|____|clvm|____, vtos, vtos, getstatic           ,  1           );
-  def(Bytecodes::_putstatic           , ubcp|____|clvm|____, vtos, vtos, putstatic           ,  2           );
-  def(Bytecodes::_getfield            , ubcp|____|clvm|____, vtos, vtos, getfield            ,  1           );
-  def(Bytecodes::_putfield            , ubcp|____|clvm|____, vtos, vtos, putfield            ,  2           );
-  def(Bytecodes::_invokevirtual       , ubcp|disp|clvm|____, vtos, vtos, invokevirtual       ,  2           );
-  def(Bytecodes::_invokespecial       , ubcp|disp|clvm|____, vtos, vtos, invokespecial       ,  1           );
-  def(Bytecodes::_invokestatic        , ubcp|disp|clvm|____, vtos, vtos, invokestatic        ,  1           );
-  def(Bytecodes::_invokeinterface     , ubcp|disp|clvm|____, vtos, vtos, invokeinterface     ,  1           );
-  def(Bytecodes::_invokedynamic       , ubcp|disp|clvm|____, vtos, vtos, invokedynamic       ,  1           );
+  def(Bytecodes::_getstatic           , ubcp|____|clvm|____, vtos, vtos, getstatic           , f1_byte      );
+  def(Bytecodes::_putstatic           , ubcp|____|clvm|____, vtos, vtos, putstatic           , f2_byte      );
+  def(Bytecodes::_getfield            , ubcp|____|clvm|____, vtos, vtos, getfield            , f1_byte      );
+  def(Bytecodes::_putfield            , ubcp|____|clvm|____, vtos, vtos, putfield            , f2_byte      );
+  def(Bytecodes::_invokevirtual       , ubcp|disp|clvm|____, vtos, vtos, invokevirtual       , f2_byte      );
+  def(Bytecodes::_invokespecial       , ubcp|disp|clvm|____, vtos, vtos, invokespecial       , f1_byte      );
+  def(Bytecodes::_invokestatic        , ubcp|disp|clvm|____, vtos, vtos, invokestatic        , f1_byte      );
+  def(Bytecodes::_invokeinterface     , ubcp|disp|clvm|____, vtos, vtos, invokeinterface     , f1_byte      );
+  def(Bytecodes::_invokedynamic       , ubcp|disp|clvm|____, vtos, vtos, invokedynamic       , f1_oop       );
   def(Bytecodes::_new                 , ubcp|____|clvm|____, vtos, atos, _new                ,  _           );
   def(Bytecodes::_newarray            , ubcp|____|clvm|____, itos, atos, newarray            ,  _           );
   def(Bytecodes::_anewarray           , ubcp|____|clvm|____, itos, atos, anewarray           ,  _           );
@@ -502,7 +502,7 @@
   def(Bytecodes::_fast_iload2         , ubcp|____|____|____, vtos, itos, fast_iload2         ,  _       );
   def(Bytecodes::_fast_icaload        , ubcp|____|____|____, vtos, itos, fast_icaload        ,  _       );
 
-  def(Bytecodes::_fast_invokevfinal   , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal   ,  2           );
+  def(Bytecodes::_fast_invokevfinal   , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal   , f2_byte      );
 
   def(Bytecodes::_fast_linearswitch   , ubcp|disp|____|____, itos, vtos, fast_linearswitch   ,  _           );
   def(Bytecodes::_fast_binaryswitch   , ubcp|disp|____|____, itos, vtos, fast_binaryswitch   ,  _           );
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,6 +73,7 @@
  public:
   enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr };
   enum Condition { equal, not_equal, less, less_equal, greater, greater_equal };
+  enum CacheByte { f1_byte = 1, f2_byte = 2, f1_oop = 0x11 };  // byte_no codes
 
  private:
   static bool            _is_initialized;        // true if TemplateTable has been initialized
@@ -244,13 +245,18 @@
 
   static void _return(TosState state);
 
-  static void resolve_cache_and_index(int byte_no, Register cache, Register index);
+  static void resolve_cache_and_index(int byte_no,       // one of 1,2,11
+                                      Register result ,  // either noreg or output for f1/f2
+                                      Register cache,    // output for CP cache
+                                      Register index,    // output for CP index
+                                      size_t index_size); // one of 1,2,4
   static void load_invoke_cp_cache_entry(int byte_no,
                                          Register method,
                                          Register itable_index,
                                          Register flags,
-                                         bool is_invokevirtual = false,
-                                         bool is_virtual_final = false);
+                                         bool is_invokevirtual,
+                                         bool is_virtual_final,
+                                         bool is_invokedynamic);
   static void load_field_cp_cache_entry(Register obj,
                                         Register cache,
                                         Register index,
--- a/hotspot/src/share/vm/memory/iterator.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/memory/iterator.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,8 @@
 }
 
 void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
-  if (!cb->is_nmethod())  return;
-  nmethod* nm = (nmethod*) cb;
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm == NULL)  return;
   if (!nm->test_set_oops_do_mark()) {
     NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
     do_newly_marked_nmethod(nm);
@@ -74,11 +74,14 @@
 
 void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
   if (!_do_marking) {
-    NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod())  ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
+    nmethod* nm = cb->as_nmethod_or_null();
+    NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL)  nm->print_on(tty, "oops_do, unmarked visit\n"));
     // This assert won't work, since there are lots of mini-passes
     // (mostly in debug mode) that co-exist with marking phases.
     //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
-    cb->oops_do(_cl);
+    if (nm != NULL) {
+      nm->oops_do(_cl);
+    }
   } else {
     MarkingCodeBlobClosure::do_code_blob(cb);
   }
--- a/hotspot/src/share/vm/memory/space.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/memory/space.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -861,9 +861,9 @@
   }
   size = align_object_size(size);
 
-  const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
-  if (size >= min_int_array_size) {
-    size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
+  const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
+  if (size >= (size_t)align_object_size(array_header_size)) {
+    size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
     // allocate uninitialized int array
     typeArrayOop t = (typeArrayOop) allocate(size);
     assert(t != NULL, "allocation should succeed");
@@ -871,7 +871,7 @@
     t->set_klass(Universe::intArrayKlassObj());
     t->set_length((int)length);
   } else {
-    assert((int) size == instanceOopDesc::header_size(),
+    assert(size == CollectedHeap::min_fill_size(),
            "size for smallest fake object doesn't match");
     instanceOop obj = (instanceOop) allocate(size);
     obj->set_mark(markOopDesc::prototype());
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -31,7 +31,7 @@
     // Skip mangling the space corresponding to the object header to
     // ensure that the returned space is not considered parsable by
     // any concurrent GC thread.
-    size_t hdr_size = CollectedHeap::min_fill_size();
+    size_t hdr_size = oopDesc::header_size();
     Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
 #endif // ASSERT
     // This addition is safe because we know that top is
--- a/hotspot/src/share/vm/memory/universe.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -748,7 +748,7 @@
 // 4Gb
 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 // 32Gb
-static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
+// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
   size_t base = 0;
@@ -1261,7 +1261,7 @@
 
   // decide which low-order bits we require to be clear:
   size_t alignSize = MinObjAlignmentInBytes;
-  size_t min_object_size = oopDesc::header_size();
+  size_t min_object_size = CollectedHeap::min_fill_size();
 
   // make an inclusive limit:
   uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
--- a/hotspot/src/share/vm/oops/arrayOop.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -92,7 +92,7 @@
   static int header_size(BasicType type) {
     size_t typesize_in_bytes = header_size_in_bytes();
     return (int)(Universe::element_type_should_be_aligned(type)
-      ? align_object_size(typesize_in_bytes/HeapWordSize)
+      ? align_object_offset(typesize_in_bytes/HeapWordSize)
       : typesize_in_bytes/HeapWordSize);
   }
 
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -310,15 +310,12 @@
   Klass::oop_print_on(obj, st);
   constantPoolOop cp = constantPoolOop(obj);
   if (cp->flags() != 0) {
-    st->print(" - flags : 0x%x", cp->flags());
+    st->print(" - flags: 0x%x", cp->flags());
     if (cp->has_pseudo_string()) st->print(" has_pseudo_string");
     if (cp->has_invokedynamic()) st->print(" has_invokedynamic");
     st->cr();
   }
-
-  // Temp. remove cache so we can do lookups with original indicies.
-  constantPoolCacheHandle cache (THREAD, cp->cache());
-  cp->set_cache(NULL);
+  st->print_cr(" - cache: " INTPTR_FORMAT, cp->cache());
 
   for (int index = 1; index < cp->length(); index++) {      // Index 0 is unused
     st->print(" - %3d : ", index);
@@ -334,8 +331,8 @@
       case JVM_CONSTANT_Fieldref :
       case JVM_CONSTANT_Methodref :
       case JVM_CONSTANT_InterfaceMethodref :
-        st->print("klass_index=%d", cp->klass_ref_index_at(index));
-        st->print(" name_and_type_index=%d", cp->name_and_type_ref_index_at(index));
+        st->print("klass_index=%d", cp->uncached_klass_ref_index_at(index));
+        st->print(" name_and_type_index=%d", cp->uncached_name_and_type_ref_index_at(index));
         break;
       case JVM_CONSTANT_UnresolvedString :
       case JVM_CONSTANT_String :
@@ -382,9 +379,6 @@
     st->cr();
   }
   st->cr();
-
-  // Restore cache
-  cp->set_cache(cache());
 }
 
 #endif
@@ -398,6 +392,9 @@
   cp->print_address_on(st);
   st->print(" for ");
   cp->pool_holder()->print_value_on(st);
+  if (cp->cache() != NULL) {
+    st->print(" cache=" PTR_FORMAT, cp->cache());
+  }
 }
 
 const char* constantPoolKlass::internal_name() const {
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -297,11 +297,9 @@
 
 
 int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
-  // Operand was fetched by a stream using get_Java_u2, yet was stored
-  // by Rewriter::rewrite_member_reference in native order.
-  // So now we have to fix the damage by swapping back to native order.
-  assert((int)(u2)operand == operand, "clean u2");
-  int cpc_index = Bytes::swap_u2(operand);
+  int cpc_index = operand;
+  DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
+  assert((int)(u2)cpc_index == cpc_index, "clean u2");
   int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
   return member_index;
 }
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -434,6 +434,10 @@
   // Debugging
   const char* printable_name_at(int which) PRODUCT_RETURN0;
 
+#ifdef ASSERT
+  enum { CPCACHE_INDEX_TAG = 0x10000 };  // helps keep CP cache indices distinct from CP indices
+#endif //ASSERT
+
  private:
 
   symbolOop impl_name_ref_at(int which, bool uncached);
@@ -441,7 +445,7 @@
   int       impl_klass_ref_index_at(int which, bool uncached);
   int       impl_name_and_type_ref_index_at(int which, bool uncached);
 
-  int remap_instruction_operand_from_cache(int operand);
+  int remap_instruction_operand_from_cache(int operand);  // operand must be biased by CPCACHE_INDEX_TAG
 
   // Used while constructing constant pool (only by ClassFileParser)
   jint klass_index_at(int which) {
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1254,7 +1254,7 @@
       case Bytecodes::_invokestatic:
       case Bytecodes::_invokedynamic:
       case Bytecodes::_invokeinterface:
-        int idx = currentBC->get_index_int();
+        int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
         constantPoolOop cp    = method()->constants();
         int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
         int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);
@@ -1286,7 +1286,7 @@
       case Bytecodes::_invokestatic:
       case Bytecodes::_invokedynamic:
       case Bytecodes::_invokeinterface:
-        int idx = currentBC->get_index_int();
+        int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
         constantPoolOop cp    = method()->constants();
         int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
         int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);
@@ -1356,8 +1356,8 @@
 
     case Bytecodes::_ldc2_w:            ppush(vvCTS);               break;
 
-    case Bytecodes::_ldc:               do_ldc(itr->get_index(), itr->bci());    break;
-    case Bytecodes::_ldc_w:             do_ldc(itr->get_index_big(), itr->bci());break;
+    case Bytecodes::_ldc:               do_ldc(itr->get_index(),    itr->bci()); break;
+    case Bytecodes::_ldc_w:             do_ldc(itr->get_index_u2(), itr->bci()); break;
 
     case Bytecodes::_iload:
     case Bytecodes::_fload:             ppload(vCTS, itr->get_index()); break;
@@ -1550,17 +1550,17 @@
     case Bytecodes::_jsr_w:             do_jsr(itr->dest_w());       break;
 
     case Bytecodes::_getstatic:         do_field(true,  true,
-                                                 itr->get_index_big(),
+                                                 itr->get_index_u2_cpcache(),
                                                  itr->bci()); break;
-    case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_putfield:          do_field(false, false, itr->get_index_big(), itr->bci()); break;
+    case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_u2_cpcache(), itr->bci()); break;
+    case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_u2_cpcache(), itr->bci()); break;
+    case Bytecodes::_putfield:          do_field(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
 
     case Bytecodes::_invokevirtual:
-    case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_invokedynamic:     do_method(true,  false, itr->get_index_int(), itr->bci()); break;
-    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;
+    case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+    case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_u2_cpcache(), itr->bci()); break;
+    case Bytecodes::_invokedynamic:     do_method(true,  false, itr->get_index_u4(),         itr->bci()); break;
+    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_u2_cpcache(), itr->bci()); break;
     case Bytecodes::_newarray:
     case Bytecodes::_anewarray:         pp_new_ref(vCTS, itr->bci()); break;
     case Bytecodes::_checkcast:         do_checkcast(); break;
--- a/hotspot/src/share/vm/oops/methodKlass.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -237,7 +237,7 @@
   Klass::oop_print_on(obj, st);
   methodOop m = methodOop(obj);
   // get the effect of PrintOopAddress, always, for methods:
-  st->print   (" - this oop:          "INTPTR_FORMAT, (intptr_t)m);
+  st->print_cr(" - this oop:          "INTPTR_FORMAT, (intptr_t)m);
   st->print   (" - method holder:     ");    m->method_holder()->print_value_on(st); st->cr();
   st->print   (" - constants:         "INTPTR_FORMAT" ", (address)m->constants());
   m->constants()->print_value_on(st); st->cr();
--- a/hotspot/src/share/vm/oops/oop.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -149,10 +149,6 @@
   // Need this as public for garbage collection.
   template <class T> T* obj_field_addr(int offset) const;
 
-  // Oop encoding heap max
-  static const uint64_t OopEncodingHeapMax =
-              (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
-
   static bool is_null(oop obj);
   static bool is_null(narrowOop obj);
 
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -146,8 +146,13 @@
 // offset from the heap base.  Saving the check for null can save instructions
 // in inner GC loops so these are separated.
 
+inline bool check_obj_alignment(oop obj) {
+  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
+}
+
 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   assert(!is_null(v), "oop value can never be zero");
+  assert(check_obj_alignment(v), "Address not aligned");
   assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
   address base = Universe::narrow_oop_base();
   int    shift = Universe::narrow_oop_shift();
@@ -167,7 +172,9 @@
   assert(!is_null(v), "narrow oop value can never be zero");
   address base = Universe::narrow_oop_base();
   int    shift = Universe::narrow_oop_shift();
-  return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+  oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+  assert(check_obj_alignment(result), "Address not aligned");
+  return result;
 }
 
 inline oop oopDesc::decode_heap_oop(narrowOop v) {
@@ -522,10 +529,6 @@
   return mark()->has_bias_pattern();
 }
 
-inline bool check_obj_alignment(oop obj) {
-  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
-}
-
 
 // used only for asserts
 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
@@ -600,6 +603,8 @@
 
 // Used by scavengers
 inline void oopDesc::forward_to(oop p) {
+  assert(check_obj_alignment(p),
+         "forwarding to something not aligned");
   assert(Universe::heap()->is_in_reserved(p),
          "forwarding to something not in heap");
   markOop m = markOopDesc::encode_pointer_as_mark(p);
@@ -609,6 +614,8 @@
 
 // Used by parallel scavengers
 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
+  assert(check_obj_alignment(p),
+         "forwarding to something not aligned");
   assert(Universe::heap()->is_in_reserved(p),
          "forwarding to something not in heap");
   markOop m = markOopDesc::encode_pointer_as_mark(p);
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -188,8 +188,8 @@
     return NULL;
   }
 
-  // Always inline MethodHandle methods.
-  if (callee_method->is_method_handle_invoke())
+  // Always inline MethodHandle methods and generated MethodHandle adapters.
+  if (callee_method->is_method_handle_invoke() || callee_method->is_method_handle_adapter())
     return NULL;
 
   // First check all inlining restrictions which are required for correctness
@@ -340,7 +340,7 @@
     Bytecodes::Code call_bc = iter.cur_bc();
     // An invokedynamic instruction does not have a klass.
     if (call_bc != Bytecodes::_invokedynamic) {
-      int index = iter.get_index_int();
+      int index = iter.get_index_u2_cpcache();
       if (!caller_method->is_klass_loaded(index, true)) {
         return false;
       }
--- a/hotspot/src/share/vm/opto/compile.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -2176,14 +2176,14 @@
 
 #ifdef _LP64
   case Op_CastPP:
-    if (n->in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks()) {
+    if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
       Compile* C = Compile::current();
       Node* in1 = n->in(1);
       const Type* t = n->bottom_type();
       Node* new_in1 = in1->clone();
       new_in1->as_DecodeN()->set_type(t);
 
-      if (!Matcher::clone_shift_expressions) {
+      if (!Matcher::narrow_oop_use_complex_address()) {
         //
         // x86, ARM and friends can handle 2 adds in addressing mode
         // and Matcher can fold a DecodeN node into address by using
@@ -2231,8 +2231,12 @@
         new_in2 = in2->in(1);
       } else if (in2->Opcode() == Op_ConP) {
         const Type* t = in2->bottom_type();
-        if (t == TypePtr::NULL_PTR && Universe::narrow_oop_use_implicit_null_checks()) {
-          new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
+        if (t == TypePtr::NULL_PTR) {
+          // Don't convert CmpP null check into CmpN if compressed
+          // oops implicit null check is not generated.
+          // This will allow to generate normal oop implicit null check.
+          if (Matcher::gen_narrow_oop_implicit_null_checks())
+            new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
           //
           // This transformation together with CastPP transformation above
           // will generated code for implicit NULL checks for compressed oops.
@@ -2289,9 +2293,9 @@
 
   case Op_DecodeN:
     assert(!n->in(1)->is_EncodeP(), "should be optimized out");
-    // DecodeN could be pinned on Sparc where it can't be fold into
+    // DecodeN could be pinned when it can't be fold into
     // an address expression, see the code for Op_CastPP above.
-    assert(n->in(0) == NULL || !Matcher::clone_shift_expressions, "no control except on sparc");
+    assert(n->in(0) == NULL || !Matcher::narrow_oop_use_complex_address(), "no control");
     break;
 
   case Op_EncodeP: {
@@ -2496,6 +2500,10 @@
     }
   }
 
+  // Skip next transformation if compressed oops are not used.
+  if (!UseCompressedOops || !Matcher::gen_narrow_oop_implicit_null_checks())
+    return;
+
   // Go over safepoints nodes to skip DecodeN nodes for debug edges.
   // It could be done for an uncommon traps or any safepoints/calls
   // if the DecodeN node is referenced only in a debug info.
--- a/hotspot/src/share/vm/opto/connode.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/connode.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -437,7 +437,7 @@
 // If not converting int->oop, throw away cast after constant propagation
 Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
   const Type *t = ccp->type(in(1));
-  if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks())) {
+  if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks())) {
     return NULL; // do not transform raw pointers or narrow oops
   }
   return ConstraintCastNode::Ideal_DU_postCCP(ccp);
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -3487,7 +3487,6 @@
 
   Node* tls = __ thread(); // ThreadLocalStorage
 
-  Node* no_ctrl = NULL;
   Node* no_base = __ top();
   float likely  = PROB_LIKELY(0.999);
   float unlikely  = PROB_UNLIKELY(0.999);
@@ -3511,10 +3510,10 @@
   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
 
   // Now some values
-
-  Node* index  = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
-  Node* buffer = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
-
+  // Use ctrl to avoid hoisting these values past a safepoint, which could
+  // potentially reset these fields in the JavaThread.
+  Node* index  = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
+  Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 
   // Convert the store obj pointer to an int prior to doing math on it
   // Must use ctrl to prevent "integerized oop" existing across safepoint
--- a/hotspot/src/share/vm/opto/lcm.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -32,7 +32,8 @@
 // with suitable memory ops nearby.  Use the memory op to do the NULL check.
 // I can generate a memory op if there is not one nearby.
 // The proj is the control projection for the not-null case.
-// The val is the pointer being checked for nullness.
+// The val is the pointer being checked for nullness or
+// decodeHeapOop_not_null node if it did not fold into address.
 void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
   // Assume if null check need for 0 offset then always needed
   // Intel solaris doesn't support any null checks yet and no
@@ -96,6 +97,13 @@
     }
   }
 
+  // Check for decodeHeapOop_not_null node which did not fold into address
+  bool is_decoden = ((intptr_t)val) & 1;
+  val = (Node*)(((intptr_t)val) & ~1);
+
+  assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() &&
+         (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity");
+
   // Search the successor block for a load or store who's base value is also
   // the tested value.  There may be several.
   Node_List *out = new Node_List(Thread::current()->resource_area());
@@ -148,7 +156,8 @@
       if( !mach->needs_anti_dependence_check() )
         continue;               // Not an memory op; skip it
       {
-        // Check that value is used in memory address.
+        // Check that value is used in memory address in
+        // instructions with embedded load (CmpP val1,(val2+off)).
         Node* base;
         Node* index;
         const MachOper* oper = mach->memory_inputs(base, index);
@@ -213,7 +222,11 @@
     uint vidx = 0;              // Capture index of value into memop
     uint j;
     for( j = mach->req()-1; j > 0; j-- ) {
-      if( mach->in(j) == val ) vidx = j;
+      if( mach->in(j) == val ) {
+        vidx = j;
+        // Ignore DecodeN val which could be hoisted to where needed.
+        if( is_decoden ) continue;
+      }
       // Block of memory-op input
       Block *inb = cfg->_bbs[mach->in(j)->_idx];
       Block *b = this;          // Start from nul check
@@ -270,6 +283,26 @@
   extern int implicit_null_checks;
   implicit_null_checks++;
 
+  if( is_decoden ) {
+    // Check if we need to hoist decodeHeapOop_not_null first.
+    Block *valb = cfg->_bbs[val->_idx];
+    if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+      // Hoist it up to the end of the test block.
+      valb->find_remove(val);
+      this->add_inst(val);
+      cfg->_bbs.map(val->_idx,this);
+      // DecodeN on x86 may kill flags. Check for flag-killing projections
+      // that also need to be hoisted.
+      for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
+        Node* n = val->fast_out(j);
+        if( n->Opcode() == Op_MachProj ) {
+          cfg->_bbs[n->_idx]->find_remove(n);
+          this->add_inst(n);
+          cfg->_bbs.map(n->_idx,this);
+        }
+      }
+    }
+  }
   // Hoist the memory candidate up to the end of the test block.
   Block *old_block = cfg->_bbs[best->_idx];
   old_block->find_remove(best);
--- a/hotspot/src/share/vm/opto/matcher.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1334,7 +1334,7 @@
       if( j == max_scan )       // No post-domination before scan end?
         return true;            // Then break the match tree up
     }
-    if (m->is_DecodeN() && Matcher::clone_shift_expressions) {
+    if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) {
       // These are commonly used in address expressions and can
       // efficiently fold into them on X64 in some cases.
       return false;
@@ -2110,8 +2110,8 @@
         _null_check_tests.push(proj);
         Node* val = cmp->in(1);
 #ifdef _LP64
-        if (UseCompressedOops && !Matcher::clone_shift_expressions &&
-            val->bottom_type()->isa_narrowoop()) {
+        if (val->bottom_type()->isa_narrowoop() &&
+            !Matcher::narrow_oop_use_complex_address()) {
           //
           // Look for DecodeN node which should be pinned to orig_proj.
           // On platforms (Sparc) which can not handle 2 adds
@@ -2127,6 +2127,9 @@
             if (d->is_DecodeN() && d->in(1) == val) {
               val = d;
               val->set_req(0, NULL); // Unpin now.
+              // Mark this as special case to distinguish from
+              // a regular case: CmpP(DecodeN, NULL).
+              val = (Node*)(((intptr_t)val) | 1);
               break;
             }
           }
@@ -2146,9 +2149,21 @@
   for( uint i=0; i < cnt; i+=2 ) {
     Node *test = _null_check_tests[i];
     Node *val = _null_check_tests[i+1];
+    bool is_decoden = ((intptr_t)val) & 1;
+    val = (Node*)(((intptr_t)val) & ~1);
     if (has_new_node(val)) {
+      Node* new_val = new_node(val);
+      if (is_decoden) {
+        assert(val->is_DecodeN() && val->in(0) == NULL, "sanity");
+        // Note: new_val may have a control edge if
+        // the original ideal node DecodeN was matched before
+        // it was unpinned in Matcher::collect_null_checks().
+        // Unpin the mach node and mark it.
+        new_val->set_req(0, NULL);
+        new_val = (Node*)(((intptr_t)new_val) | 1);
+      }
       // Is a match-tree root, so replace with the matched value
-      _null_check_tests.map(i+1, new_node(val));
+      _null_check_tests.map(i+1, new_val);
     } else {
       // Yank from candidate list
       _null_check_tests.map(i+1,_null_check_tests[--cnt]);
--- a/hotspot/src/share/vm/opto/matcher.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/matcher.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -352,6 +352,38 @@
   // registers?  True for Intel but false for most RISCs
   static const bool clone_shift_expressions;
 
+  static bool narrow_oop_use_complex_address();
+
+  // Generate implicit null check for narrow oops if it can fold
+  // into address expression (x64).
+  //
+  // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression
+  // NullCheck narrow_oop_reg
+  //
+  // When narrow oops can't fold into address expression (Sparc) and
+  // base is not null use decode_not_null and normal implicit null check.
+  // Note, decode_not_null node can be used here since it is referenced
+  // only on non null path but it requires special handling, see
+  // collect_null_checks():
+  //
+  // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base'
+  // [oop_reg + offset]
+  // NullCheck oop_reg
+  //
+  // With Zero base and when narrow oops can not fold into address
+  // expression use normal implicit null check since only shift
+  // is needed to decode narrow oop.
+  //
+  // decode narrow_oop_reg, oop_reg // only 'shift'
+  // [oop_reg + offset]
+  // NullCheck oop_reg
+  //
+  inline static bool gen_narrow_oop_implicit_null_checks() {
+    return Universe::narrow_oop_use_implicit_null_checks() &&
+           (narrow_oop_use_complex_address() ||
+            Universe::narrow_oop_base() != NULL);
+  }
+
   // Is it better to copy float constants, or load them directly from memory?
   // Intel can load a float constant from a direct address, requiring no
   // extra registers.  Most RISCs will have to materialize an address into a
--- a/hotspot/src/share/vm/opto/parse2.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1317,8 +1317,8 @@
   case Bytecodes::_iconst_3: push(intcon( 3)); break;
   case Bytecodes::_iconst_4: push(intcon( 4)); break;
   case Bytecodes::_iconst_5: push(intcon( 5)); break;
-  case Bytecodes::_bipush:   push(intcon( iter().get_byte())); break;
-  case Bytecodes::_sipush:   push(intcon( iter().get_short())); break;
+  case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
+  case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
   case Bytecodes::_aconst_null: push(null());  break;
   case Bytecodes::_ldc:
   case Bytecodes::_ldc_w:
--- a/hotspot/src/share/vm/opto/superword.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/superword.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -514,6 +514,13 @@
 bool SuperWord::are_adjacent_refs(Node* s1, Node* s2) {
   if (!s1->is_Mem() || !s2->is_Mem()) return false;
   if (!in_bb(s1)    || !in_bb(s2))    return false;
+
+  // Do not use superword for non-primitives
+  if (!is_java_primitive(s1->as_Mem()->memory_type()) ||
+      !is_java_primitive(s2->as_Mem()->memory_type())) {
+    return false;
+  }
+
   // FIXME - co_locate_pack fails on Stores in different mem-slices, so
   // only pack memops that are in the same alias set until that's fixed.
   if (_phase->C->get_alias_index(s1->as_Mem()->adr_type()) !=
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -638,7 +638,7 @@
 
     // length of bytecode (mnemonic + operands)
     address bcp = bs.bcp();
-    int len = bs.next_bcp() - bcp;
+    int     len = bs.instruction_size();
     assert(len > 0, "length must be > 0");
 
     // copy the bytecodes
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -726,6 +726,32 @@
 GrowableArray<const void *>* JvmtiExport::_pending_compiled_method_unload_code_begins;
 JavaThread* JvmtiExport::_current_poster;
 
+void JvmtiExport::post_compiled_method_unload_internal(JavaThread* self, jmethodID method, const void *code_begin) {
+  EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+                 ("JVMTI [%s] method compile unload event triggered",
+                  JvmtiTrace::safe_get_thread_name(self)));
+
+  // post the event for each environment that has this event enabled.
+  JvmtiEnvIterator it;
+  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+    if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
+
+      EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+                ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
+                 JvmtiTrace::safe_get_thread_name(self), method));
+
+      ResourceMark rm(self);
+
+      JvmtiEventMark jem(self);
+      JvmtiJavaThreadEventTransition jet(self);
+      jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
+      if (callback != NULL) {
+        (*callback)(env->jvmti_external(), method, code_begin);
+      }
+    }
+  }
+}
+
 // post any pending CompiledMethodUnload events
 
 void JvmtiExport::post_pending_compiled_method_unload_events() {
@@ -788,26 +814,7 @@
   // flag, cleanup _current_poster to indicate that no thread is now servicing the
   // pending events list, and finally notify any thread that might be waiting.
   for (;;) {
-    EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
-                   ("JVMTI [%s] method compile unload event triggered",
-                   JvmtiTrace::safe_get_thread_name(self)));
-
-    // post the event for each environment that has this event enabled.
-    JvmtiEnvIterator it;
-    for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
-      if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
-        EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
-                  ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
-                  JvmtiTrace::safe_get_thread_name(self), method));
-
-        JvmtiEventMark jem(self);
-        JvmtiJavaThreadEventTransition jet(self);
-        jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
-        if (callback != NULL) {
-          (*callback)(env->jvmti_external(), method, code_begin);
-        }
-      }
-    }
+    post_compiled_method_unload_internal(self, method, code_begin);
 
     // event posted, now re-grab monitor and get the next event
     // If there's no next event then we are done. If this is the first
@@ -1864,17 +1871,25 @@
 }
 
 // used at a safepoint to post a CompiledMethodUnload event
-void JvmtiExport::post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
-
-  // create list lazily
-  if (_pending_compiled_method_unload_method_ids == NULL) {
-    _pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray<jmethodID>(10,true);
-    _pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray<const void *>(10,true);
+void JvmtiExport::post_compiled_method_unload(jmethodID mid, const void *code_begin) {
+  if (SafepointSynchronize::is_at_safepoint()) {
+    // Class unloading can cause nmethod unloading which is reported
+    // by the VMThread.  These must be batched to be processed later.
+    if (_pending_compiled_method_unload_method_ids == NULL) {
+      // create list lazily
+      _pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray<jmethodID>(10,true);
+      _pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray<const void *>(10,true);
+    }
+    _pending_compiled_method_unload_method_ids->append(mid);
+    _pending_compiled_method_unload_code_begins->append(code_begin);
+    _have_pending_compiled_method_unload_events = true;
+  } else {
+    // Unloading caused by the sweeper can be reported synchronously.
+    if (have_pending_compiled_method_unload_events()) {
+      post_pending_compiled_method_unload_events();
+    }
+    post_compiled_method_unload_internal(JavaThread::current(), mid, code_begin);
   }
-  _pending_compiled_method_unload_method_ids->append(mid);
-  _pending_compiled_method_unload_code_begins->append(code_begin);
-  _have_pending_compiled_method_unload_events = true;
 }
 
 void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
--- a/hotspot/src/share/vm/prims/jvmtiExport.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -144,6 +144,9 @@
   // posts any pending CompiledMethodUnload events.
   static void post_pending_compiled_method_unload_events();
 
+  // Perform the actual notification to interested JvmtiEnvs.
+  static void post_compiled_method_unload_internal(JavaThread* self, jmethodID mid, const void* code_begin);
+
   // posts a DynamicCodeGenerated event (internal/private implementation).
   // The public post_dynamic_code_generated* functions make use of the
   // internal implementation.
@@ -299,8 +302,8 @@
   static void post_compiled_method_load(nmethod *nm) KERNEL_RETURN;
   static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
 
-  // used at a safepoint to post a CompiledMethodUnload event
-  static void post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) KERNEL_RETURN;
+  // used to post a CompiledMethodUnload event
+  static void post_compiled_method_unload(jmethodID mid, const void *code_begin) KERNEL_RETURN;
 
   // similiar to post_dynamic_code_generated except that it can be used to
   // post a DynamicCodeGenerated event while holding locks in the VM. Any event
--- a/hotspot/src/share/vm/prims/methodComparator.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodComparator.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -130,8 +130,8 @@
   case Bytecodes::_multianewarray : // fall through
   case Bytecodes::_checkcast      : // fall through
   case Bytecodes::_instanceof     : {
-    u2 cpi_old = _s_old->get_index_big();
-    u2 cpi_new = _s_new->get_index_big();
+    u2 cpi_old = _s_old->get_index_u2();
+    u2 cpi_new = _s_new->get_index_u2();
     if ((_old_cp->klass_at_noresolve(cpi_old) != _new_cp->klass_at_noresolve(cpi_new)))
         return false;
     if (c_old == Bytecodes::_multianewarray &&
@@ -147,9 +147,10 @@
   case Bytecodes::_invokevirtual   : // fall through
   case Bytecodes::_invokespecial   : // fall through
   case Bytecodes::_invokestatic    : // fall through
+  case Bytecodes::_invokedynamic   : // fall through
   case Bytecodes::_invokeinterface : {
-    u2 cpci_old = _s_old->get_index_int();
-    u2 cpci_new = _s_new->get_index_int();
+    int cpci_old = _s_old->has_index_u4() ? _s_old->get_index_u4() : _s_old->get_index_u2_cpcache();
+    int cpci_new = _s_new->has_index_u4() ? _s_new->get_index_u4() : _s_new->get_index_u2_cpcache();
     // Check if the names of classes, field/method names and signatures at these indexes
     // are the same. Indices which are really into constantpool cache (rather than constant
     // pool itself) are accepted by the constantpool query routines below.
@@ -162,14 +163,10 @@
 
   case Bytecodes::_ldc   : // fall through
   case Bytecodes::_ldc_w : {
-    u2 cpi_old, cpi_new;
-    if (c_old == Bytecodes::_ldc) {
-      cpi_old = _s_old->bcp()[1];
-      cpi_new = _s_new->bcp()[1];
-    } else {
-      cpi_old = _s_old->get_index_big();
-      cpi_new = _s_new->get_index_big();
-    }
+    Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method()(), _s_old->bcp());
+    Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method()(), _s_new->bcp());
+    int cpi_old = ldc_old->index();
+    int cpi_new = ldc_new->index();
     constantTag tag_old = _old_cp->tag_at(cpi_old);
     constantTag tag_new = _new_cp->tag_at(cpi_new);
     if (tag_old.is_int() || tag_old.is_float()) {
@@ -179,7 +176,9 @@
         if (_old_cp->int_at(cpi_old) != _new_cp->int_at(cpi_new))
           return false;
       } else {
-        if (_old_cp->float_at(cpi_old) != _new_cp->float_at(cpi_new))
+        // Use jint_cast to compare the bits rather than numerical values.
+        // This makes a difference for NaN constants.
+        if (jint_cast(_old_cp->float_at(cpi_old)) != jint_cast(_new_cp->float_at(cpi_new)))
           return false;
       }
     } else if (tag_old.is_string() || tag_old.is_unresolved_string()) {
@@ -199,8 +198,8 @@
   }
 
   case Bytecodes::_ldc2_w : {
-    u2 cpi_old = _s_old->get_index_big();
-    u2 cpi_new = _s_new->get_index_big();
+    u2 cpi_old = _s_old->get_index_u2();
+    u2 cpi_new = _s_new->get_index_u2();
     constantTag tag_old = _old_cp->tag_at(cpi_old);
     constantTag tag_new = _new_cp->tag_at(cpi_new);
     if (tag_old.value() != tag_new.value())
@@ -209,7 +208,9 @@
       if (_old_cp->long_at(cpi_old) != _new_cp->long_at(cpi_new))
         return false;
     } else {
-      if (_old_cp->double_at(cpi_old) != _new_cp->double_at(cpi_new))
+      // Use jlong_cast to compare the bits rather than numerical values.
+      // This makes a difference for NaN constants.
+      if (jlong_cast(_old_cp->double_at(cpi_old)) != jlong_cast(_new_cp->double_at(cpi_new)))
         return false;
     }
     break;
@@ -221,7 +222,7 @@
     break;
 
   case Bytecodes::_sipush    :
-    if (_s_old->get_index_big() != _s_new->get_index_big())
+    if (_s_old->get_index_u2() != _s_new->get_index_u2())
       return false;
     break;
 
@@ -260,8 +261,8 @@
   case Bytecodes::_ifnonnull : // fall through
   case Bytecodes::_ifnull    : // fall through
   case Bytecodes::_jsr       : {
-    short old_ofs = (short) _s_old->get_index_big();
-    short new_ofs = (short) _s_new->get_index_big();
+    int old_ofs = _s_old->bytecode()->get_offset_s2(c_old);
+    int new_ofs = _s_new->bytecode()->get_offset_s2(c_new);
     if (_switchable_test) {
       int old_dest = _s_old->bci() + old_ofs;
       int new_dest = _s_new->bci() + new_ofs;
@@ -285,9 +286,11 @@
     if (_s_old->is_wide() != _s_new->is_wide())
       return false;
     if (! _s_old->is_wide()) {
-      if (_s_old->get_index_big() != _s_new->get_index_big())
+      // We could use get_index_u1 and get_constant_u1, but it's simpler to grab both bytes at once:
+      if (Bytes::get_Java_u2(_s_old->bcp() + 1) != Bytes::get_Java_u2(_s_new->bcp() + 1))
         return false;
     } else {
+      // We could use get_index_u2 and get_constant_u2, but it's simpler to grab all four bytes at once:
       if (Bytes::get_Java_u4(_s_old->bcp() + 1) != Bytes::get_Java_u4(_s_new->bcp() + 1))
         return false;
     }
@@ -295,8 +298,8 @@
 
   case Bytecodes::_goto_w : // fall through
   case Bytecodes::_jsr_w  : {
-    int old_ofs = (int) Bytes::get_Java_u4(_s_old->bcp() + 1);
-    int new_ofs = (int) Bytes::get_Java_u4(_s_new->bcp() + 1);
+    int old_ofs = _s_old->bytecode()->get_offset_s4(c_old);
+    int new_ofs = _s_new->bytecode()->get_offset_s4(c_new);
     if (_switchable_test) {
       int old_dest = _s_old->bci() + old_ofs;
       int new_dest = _s_new->bci() + new_ofs;
@@ -357,8 +360,8 @@
         }
       }
     } else { // !_switchable_test, can use fast rough compare
-      int len_old = _s_old->next_bcp() - _s_old->bcp();
-      int len_new = _s_new->next_bcp() - _s_new->bcp();
+      int len_old = _s_old->instruction_size();
+      int len_new = _s_new->instruction_size();
       if (len_old != len_new)
         return false;
       if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
--- a/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -732,7 +732,7 @@
   case Bytecodes::_dreturn:
   case Bytecodes::_areturn:
   case Bytecodes::_return:
-    assert(strcmp(Bytecodes::format(op), "b") == 0, "wrong bytecode format");
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_b, "wrong bytecode format");
     _bytecode.push(op);
     break;
 
@@ -748,7 +748,7 @@
   case Bytecodes::_fstore:
   case Bytecodes::_dstore:
   case Bytecodes::_astore:
-    assert(strcmp(Bytecodes::format(op), "bi") == 0, "wrong bytecode format");
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
     assert((char) index == index, "index does not fit in 8-bit");
     _bytecode.push(op);
     _bytecode.push(index);
@@ -757,18 +757,18 @@
   // bii
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
-    assert(strcmp(Bytecodes::format(op), "bii") == 0, "wrong bytecode format");
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
     assert((short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
     break;
 
-  // bjj
+  // bJJ
   case Bytecodes::_invokestatic:
   case Bytecodes::_invokespecial:
   case Bytecodes::_invokevirtual:
-    assert(strcmp(Bytecodes::format(op), "bjj") == 0, "wrong bytecode format");
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
     assert((short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1211,8 +1211,44 @@
 }
 #endif // KERNEL
 
+void set_object_alignment() {
+  // Object alignment.
+  assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
+  MinObjAlignmentInBytes     = ObjectAlignmentInBytes;
+  assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
+  MinObjAlignment            = MinObjAlignmentInBytes / HeapWordSize;
+  assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
+  MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
+
+  LogMinObjAlignmentInBytes  = exact_log2(ObjectAlignmentInBytes);
+  LogMinObjAlignment         = LogMinObjAlignmentInBytes - LogHeapWordSize;
+
+  // Oop encoding heap max
+  OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
+
+#ifndef KERNEL
+  // Set CMS global values
+  CompactibleFreeListSpace::set_cms_values();
+#endif // KERNEL
+}
+
+bool verify_object_alignment() {
+  // Object alignment.
+  if (!is_power_of_2(ObjectAlignmentInBytes)) {
+    jio_fprintf(defaultStream::error_stream(),
+                "error: ObjectAlignmentInBytes=%d must be power of 2", (int)ObjectAlignmentInBytes);
+    return false;
+  }
+  if ((int)ObjectAlignmentInBytes < BytesPerLong) {
+    jio_fprintf(defaultStream::error_stream(),
+                "error: ObjectAlignmentInBytes=%d must be greater or equal %d", (int)ObjectAlignmentInBytes, BytesPerLong);
+    return false;
+  }
+  return true;
+}
+
 inline uintx max_heap_for_compressed_oops() {
-  LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
+  LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
   NOT_LP64(ShouldNotReachHere(); return 0);
 }
 
@@ -1776,6 +1812,8 @@
   status = status && verify_interval(TLABWasteTargetPercent,
                                      1, 100, "TLABWasteTargetPercent");
 
+  status = status && verify_object_alignment();
+
   return status;
 }
 
@@ -2848,6 +2886,9 @@
   UseCompressedOops = false;
 #endif
 
+  // Set object alignment values.
+  set_object_alignment();
+
 #ifdef SERIALGC
   force_serial_gc();
 #endif // SERIALGC
--- a/hotspot/src/share/vm/runtime/globals.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -321,6 +321,9 @@
   diagnostic(bool, PrintCompressedOopsMode, false,                          \
             "Print compressed oops base address and encoding mode")         \
                                                                             \
+  lp64_product(intx, ObjectAlignmentInBytes, 8,                             \
+          "Default object alignment in bytes, 8 is minimum")                \
+                                                                            \
   /* UseMembar is theoretically a temp flag used for memory barrier         \
    * removal testing.  It was supposed to be removed before FCS but has     \
    * been re-added (see 6401008) */                                         \
@@ -920,6 +923,10 @@
                                                                             \
   product(intx, AlwaysInflate, 0, "(Unstable) Force inflation")             \
                                                                             \
+  product(intx, MonitorBound, 0, "Bound Monitor population")                \
+                                                                            \
+  product(bool, MonitorInUseLists, false, "Track Monitors for Deflation")   \
+                                                                            \
   product(intx, Atomics, 0,                                                 \
           "(Unsafe,Unstable) Diagnostic - Controls emission of atomics")    \
                                                                             \
@@ -1117,6 +1124,9 @@
   product(intx, TraceRedefineClasses, 0,                                    \
           "Trace level for JVMTI RedefineClasses")                          \
                                                                             \
+  develop(bool, StressMethodComparator, false,                              \
+          "run the MethodComparator on all loaded methods")                 \
+                                                                            \
   /* change to false by default sometime after Mustang */                   \
   product(bool, VerifyMergedCPBytecodes, true,                              \
           "Verify bytecodes after RedefineClasses constant pool merging")   \
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -82,9 +82,6 @@
 Mutex*   DerivedPointerTableGC_lock   = NULL;
 Mutex*   Compile_lock                 = NULL;
 Monitor* MethodCompileQueue_lock      = NULL;
-#ifdef TIERED
-Monitor* C1_lock                      = NULL;
-#endif // TIERED
 Monitor* CompileThread_lock           = NULL;
 Mutex*   CompileTaskAlloc_lock        = NULL;
 Mutex*   CompileStatistics_lock       = NULL;
@@ -255,11 +252,6 @@
   def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
   def(ProfileVM_lock               , Monitor, nonleaf+4,   false); // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
-#ifdef TIERED
-  def(C1_lock                      , Monitor, nonleaf+5,   false );
-#endif // TIERED
-
-
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Tue Jun 29 10:48:02 2010 -0700
@@ -84,9 +84,6 @@
 extern Mutex*   EvacFailureStack_lock;           // guards the evac failure scan stack
 extern Mutex*   Compile_lock;                    // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
 extern Monitor* MethodCompileQueue_lock;         // a lock held when method compilations are enqueued, dequeued
-#ifdef TIERED
-extern Monitor* C1_lock;                         // a lock to ensure on single c1 compile is ever active
-#endif // TIERED
 extern Monitor* CompileThread_lock;              // a lock held by compile threads during compilation system initialization
 extern Mutex*   CompileTaskAlloc_lock;           // a lock held when CompileTasks are allocated
 extern Mutex*   CompileStatistics_lock;          // a lock held when updating compilation statistics
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -1435,7 +1435,7 @@
       // for the rest of its life! Just another racing bug in the life of
       // fixup_callers_callsite ...
       //
-      RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
+      RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
       iter.next();
       assert(iter.has_current(), "must have a reloc at java call site");
       relocInfo::relocType typ = iter.reloc()->type();
@@ -2055,11 +2055,11 @@
   void scan() {
     while (_index < _table->table_size()) {
       AdapterHandlerEntry* a = _table->bucket(_index);
+      _index++;
       if (a != NULL) {
         _current = a;
         return;
       }
-      _index++;
     }
   }
 
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Mon Jun 21 11:00:15 2010 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Tue Jun 29 10:48:02 2010 -0700
@@ -185,6 +185,8 @@
 } ;
 
 static SharedGlobals GVars ;
+static int MonitorScavengeThreshold = 1000000 ;
+static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
 
 
 // Tunabl