changeset 3836:8aaef2cee3b2

Merge
author minqi
date Thu, 08 Nov 2012 16:48:01 -0800
parents 64812523d72e 18fb7da42534
children 4efcd79826f2 e26ce0e8b666
files src/share/vm/prims/jvmtiClassFileReconstituter.cpp test/runtime/7158800/BadUtf8.java test/runtime/7158800/InternTest.java test/runtime/7158800/Test7158800.sh test/runtime/7158800/badstrings.txt
diffstat 161 files changed, 11544 insertions(+), 33939 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Oct 31 16:20:03 2012 -0700
+++ b/.hgtags	Thu Nov 08 16:48:01 2012 -0800
@@ -287,3 +287,7 @@
 b261523fe66c40a02968f0aa7e73602491bb3386 hs25-b05
 4547dc71db765276e027b0c2780b724bae0a07d3 jdk8-b61
 d0337c31c8be7716369b4e7c3bd5f352983c6a06 hs25-b06
+dccd40de8db1fa96f186e6179907818d75320440 jdk8-b62
+dc16fe422c535ecd4e9f80fb814a1bb9704da6f5 hs25-b07
+acabb5c282f59be7e3238920b2ea06b684ab68f7 jdk8-b63
+8cb93eadfb6dcab88d91b8e2cd3e0e07d0ac4048 hs25-b08
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Oct 31 16:20:03 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Thu Nov 08 16:48:01 2012 -0800
@@ -121,7 +121,7 @@
     Address addr = cache.getValue(getAddress());
     return (ConstantPoolCache) VMObjectFactory.newObject(ConstantPoolCache.class, addr);
   }
-  public Klass             getPoolHolder() { return (Klass)             poolHolder.getValue(this); }
+  public InstanceKlass     getPoolHolder() { return (InstanceKlass)poolHolder.getValue(this); }
   public int               getLength()     { return (int)length.getValue(getAddress()); }
   public Oop               getResolvedReferences() {
     Address handle = resolvedReferences.getValue(getAddress());
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Wed Oct 31 16:20:03 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Thu Nov 08 16:48:01 2012 -0800
@@ -177,7 +177,7 @@
       bci. It is required that there is currently a bytecode at this
       bci. */
   public int getOrigBytecodeAt(int bci) {
-    BreakpointInfo bp = ((InstanceKlass) getMethodHolder()).getBreakpoints();
+    BreakpointInfo bp = getMethodHolder().getBreakpoints();
     for (; bp != null; bp = bp.getNext()) {
       if (bp.match(this, bci)) {
         return bp.getOrigBytecode();
@@ -238,7 +238,7 @@
   }
 
   // Method holder (the Klass holding this method)
-  public Klass   getMethodHolder()  { return getConstants().getPoolHolder();                           }
+  public InstanceKlass   getMethodHolder()  { return getConstants().getPoolHolder();                   }
 
   // Access flags
   public boolean isPublic()         { return getAccessFlagsObj().isPublic();                           }
--- a/make/Makefile	Wed Oct 31 16:20:03 2012 -0700
+++ b/make/Makefile	Thu Nov 08 16:48:01 2012 -0800
@@ -453,14 +453,30 @@
     ifeq ($(JVM_VARIANT_ZEROSHARK), true)
         $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo):	$(SHARK_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(SHARK_DIR)/%.diz
+		$(install-file)
         $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.debuginfo:		$(SHARK_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_DIR)/%.diz
+		$(install-file)
     endif
     ifeq ($(JVM_VARIANT_ZERO), true)
         $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(ZERO_DIR)/%.diz
+		$(install-file)
         $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_DIR)/%.diz
+		$(install-file)
     endif
     ifeq ($(JVM_VARIANT_MINIMAL1), true)
         $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
--- a/make/excludeSrc.make	Wed Oct 31 16:20:03 2012 -0700
+++ b/make/excludeSrc.make	Thu Nov 08 16:48:01 2012 -0800
@@ -79,10 +79,10 @@
       CXXFLAGS += -DSERIALGC
       CFLAGS += -DSERIALGC
       Src_Files_EXCLUDE += \
-	binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
+	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
 	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
-	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \
-	freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
+	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
+	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
 	concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
 	dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
 	g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
--- a/make/hotspot_version	Wed Oct 31 16:20:03 2012 -0700
+++ b/make/hotspot_version	Thu Nov 08 16:48:01 2012 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=07
+HS_BUILD_NUMBER=09
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -2322,7 +2322,7 @@
   // Pre-load a static method's oop into O1.  Used both by locking code and
   // the normal JNI call code.
   if (method->is_static() && !is_critical_native) {
-    __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
+    __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
 
     // Now handlize the static class mirror in O1.  It's known not-null.
     __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -1007,6 +1007,67 @@
   emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
 }
 
+void Assembler::aesdec(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xde);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xde);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::aesdeclast(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdf);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdf);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::aesenc(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdc);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdc);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::aesenclast(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdd);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdd);
+  emit_byte(0xC0 | encode);
+}
+
+
 void Assembler::andl(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefix(dst);
@@ -2307,6 +2368,22 @@
   a_byte(p);
 }
 
+void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_ssse3(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0x00);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::pshufb(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_ssse3(), "");
+  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0x00);
+  emit_operand(dst, src);
+}
+
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
   assert(isByte(mode), "invalid value");
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -8067,6 +8144,15 @@
   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
 }
 
+void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
+  if (reachable(src)) {
+    Assembler::movdqu(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    Assembler::movdqu(dst, Address(rscratch1, 0));
+  }
+}
+
 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
   if (reachable(src)) {
     Assembler::movsd(dst, as_Address(src));
@@ -8357,6 +8443,17 @@
   }
 }
 
+void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
+  // Used in sign-bit flipping with aligned address.
+  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+  if (reachable(src)) {
+    Assembler::pshufb(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    Assembler::pshufb(dst, Address(rscratch1, 0));
+  }
+}
+
 // AVX 3-operands instructions
 
 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -875,6 +875,17 @@
   void addss(XMMRegister dst, Address src);
   void addss(XMMRegister dst, XMMRegister src);
 
+  // AES instructions
+  void aesdec(XMMRegister dst, Address src);
+  void aesdec(XMMRegister dst, XMMRegister src);
+  void aesdeclast(XMMRegister dst, Address src);
+  void aesdeclast(XMMRegister dst, XMMRegister src);
+  void aesenc(XMMRegister dst, Address src);
+  void aesenc(XMMRegister dst, XMMRegister src);
+  void aesenclast(XMMRegister dst, Address src);
+  void aesenclast(XMMRegister dst, XMMRegister src);
+
+
   void andl(Address  dst, int32_t imm32);
   void andl(Register dst, int32_t imm32);
   void andl(Register dst, Address src);
@@ -1424,6 +1435,10 @@
   void prefetcht2(Address src);
   void prefetchw(Address src);
 
+  // Shuffle Bytes
+  void pshufb(XMMRegister dst, XMMRegister src);
+  void pshufb(XMMRegister dst, Address src);
+
   // Shuffle Packed Doublewords
   void pshufd(XMMRegister dst, XMMRegister src, int mode);
   void pshufd(XMMRegister dst, Address src,     int mode);
@@ -2611,6 +2626,12 @@
   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
   void divss(XMMRegister dst, AddressLiteral src);
 
+  // Move Unaligned Double Quadword
+  void movdqu(Address     dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
+  void movdqu(XMMRegister dst, Address src)       { Assembler::movdqu(dst, src); }
+  void movdqu(XMMRegister dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
+  void movdqu(XMMRegister dst, AddressLiteral src);
+
   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
@@ -2658,6 +2679,10 @@
   void xorps(XMMRegister dst, Address src)     { Assembler::xorps(dst, src); }
   void xorps(XMMRegister dst, AddressLiteral src);
 
+  // Shuffle Bytes
+  void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
+  void pshufb(XMMRegister dst, Address src)     { Assembler::pshufb(dst, src); }
+  void pshufb(XMMRegister dst, AddressLiteral src);
   // AVX 3-operands instructions
 
   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -1936,7 +1936,7 @@
   if (method->is_static() && !is_critical_native) {
 
     //  load opp into a register
-    __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
+    __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
 
     // Now handlize the static class mirror it's known not-null.
     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -2179,7 +2179,7 @@
   if (method->is_static() && !is_critical_native) {
 
     //  load oop into a register
-    __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
+    __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
 
     // Now handlize the static class mirror it's known not-null.
     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -2137,6 +2137,529 @@
     }
   }
 
+  // AES intrinsic stubs
+  enum {AESBlockSize = 16};
+
+  address generate_key_shuffle_mask() {
+    __ align(16);
+    StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
+    address start = __ pc();
+    __ emit_data(0x00010203, relocInfo::none, 0 );
+    __ emit_data(0x04050607, relocInfo::none, 0 );
+    __ emit_data(0x08090a0b, relocInfo::none, 0 );
+    __ emit_data(0x0c0d0e0f, relocInfo::none, 0 );
+    return start;
+  }
+
+  // Utility routine for loading a 128-bit key word in little endian format
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    __ movdqu(xmmdst, Address(key, offset));
+    if (xmm_shuf_mask != NULL) {
+      __ pshufb(xmmdst, xmm_shuf_mask);
+    } else {
+      __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    }
+  }
+
+  // aesenc using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesenc(xmmdst, xmmtmp);
+  }
+
+  // aesdec using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesdec(xmmdst, xmmtmp);
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_encryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register keylen      = rax;
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ push(rsi);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));  // get 16 bytes of input
+
+    // For encryption, the java expanded key ordering is just what we need
+
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ pxor(xmm_result, xmm_temp);
+    for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    load_key  (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 192 and 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    __ aesenclast(xmm_result, xmm_temp);
+    __ movdqu(Address(to, 0), xmm_result);        // store the result
+    __ xorptr(rax, rax); // return 0
+    __ pop(rsi);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_decryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register keylen      = rax;
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ push(rsi);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));
+
+    // for decryption java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    // we don't know if the key is aligned, hence not using load-execute form
+    load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
+    __ pxor  (xmm_result, xmm_temp);
+    for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
+      aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 192 and 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    // for decryption the aesdeclast operation is always on key+0x00
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ aesdeclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, 0), xmm_result);  // store the result
+
+    __ xorptr(rax, rax); // return 0
+    __ pop(rsi);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+  void handleSOERegisters(bool saving) {
+    const int saveFrameSizeInBytes = 4 * wordSize;
+    const Address saved_rbx     (rbp, -3 * wordSize);
+    const Address saved_rsi     (rbp, -2 * wordSize);
+    const Address saved_rdi     (rbp, -1 * wordSize);
+
+    if (saving) {
+      __ subptr(rsp, saveFrameSizeInBytes);
+      __ movptr(saved_rsi, rsi);
+      __ movptr(saved_rdi, rdi);
+      __ movptr(saved_rbx, rbx);
+    } else {
+      // restoring
+      __ movptr(rsi, saved_rsi);
+      __ movptr(rdi, saved_rdi);
+      __ movptr(rbx, saved_rbx);
+    }
+  }
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+  address generate_cipherBlockChaining_encryptAESCrypt() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register rvec        = rdi;      // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+    const Register len_reg     = rbx;      // src len (must be multiple of blocksize 16)
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    // first 6 keys preloaded into xmm2-xmm7
+    const int XMM_REG_NUM_KEY_FIRST = 2;
+    const int XMM_REG_NUM_KEY_LAST  = 7;
+    const XMMRegister xmm_key0   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    handleSOERegisters(true /*saving*/);
+
+    // load registers from incoming parameters
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+    const Address  rvec_param (rbp, 8+12);
+    const Address  len_param  (rbp, 8+16);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+    __ movptr(rvec , rvec_param);
+    __ movptr(len_reg , len_param);
+
+    const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 2 thru 7 with keys 0-5
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    __ movdqu(xmm_result, Address(rvec, 0x00));   // initialize xmm_result with r vec
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+    // 128 bit code follows here
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_128);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);                                // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0xa0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_128);
+
+    __ BIND(L_exit);
+    __ movdqu(Address(rvec, 0), xmm_result);     // final value of r stored in rvec of CipherBlockChaining object
+
+    handleSOERegisters(false /*restoring*/);
+    __ movl(rax, 0);                             // return 0 (why?)
+    __ leave();                                  // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+  __ BIND(L_key_192_256);
+  // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+  __ align(OptoLoopAlignment);
+  __ BIND(L_loopTop_192);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);                                // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0xc0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);   // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_192);
+    __ jmp(L_exit);
+
+  __ BIND(L_key_256);
+    // 256-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+  __ align(OptoLoopAlignment);
+  __ BIND(L_loopTop_256);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);                                // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0xe0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);   // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
+  // CBC AES Decryption.
+  // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time.
+  //
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+
+  address generate_cipherBlockChaining_decryptAESCrypt() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256;
+    Label L_singleBlock_loopTop_128;
+    Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register rvec        = rdi;      // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+    const Register len_reg     = rbx;      // src len (must be multiple of blocksize 16)
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    // first 6 keys preloaded into xmm2-xmm7
+    const int XMM_REG_NUM_KEY_FIRST = 2;
+    const int XMM_REG_NUM_KEY_LAST  = 7;
+    const int FIRST_NON_REG_KEY_offset = 0x70;
+    const XMMRegister xmm_key_first   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    handleSOERegisters(true /*saving*/);
+
+    // load registers from incoming parameters
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+    const Address  rvec_param (rbp, 8+12);
+    const Address  len_param  (rbp, 8+16);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+    __ movptr(rvec , rvec_param);
+    __ movptr(len_reg , len_param);
+
+    // the java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 2 thru 6 with first 5 keys
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    // inside here, use the rvec register to point to previous block cipher
+    // with which we xor at the end of each newly decrypted block
+    const Register  prev_block_cipher_ptr = rvec;
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+
+    // 128-bit code follows here, parallelized
+    __ movptr(pos, 0);
+  __ align(OptoLoopAlignment);
+  __ BIND(L_singleBlock_loopTop_128);
+    __ cmpptr(len_reg, 0);           // any blocks left??
+    __ jcc(Assembler::equal, L_exit);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ pxor  (xmm_result, xmm_key_first);                             // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) {   // 128-bit runs up to key offset a0
+      aes_dec_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0x00);                                     // final key is stored in java expanded array at offset 0
+    __ aesdeclast(xmm_result, xmm_temp);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ pxor  (xmm_result, xmm_temp);                                  // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0));     // set up new ptr
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jmp(L_singleBlock_loopTop_128);
+
+
+    __ BIND(L_exit);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ movptr(rvec , rvec_param);                                     // restore this since used in loop
+    __ movdqu(Address(rvec, 0), xmm_temp);                            // final value of r stored in rvec of CipherBlockChaining object
+    handleSOERegisters(false /*restoring*/);
+    __ movl(rax, 0);                                                  // return 0 (why?)
+    __ leave();                                                       // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+
+    __ BIND(L_key_192_256);
+    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_192);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ pxor  (xmm_result, xmm_key_first);                             // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) {   // 192-bit runs up to key offset c0
+      aes_dec_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0x00);                                     // final key is stored in java expanded array at offset 0
+    __ aesdeclast(xmm_result, xmm_temp);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ pxor  (xmm_result, xmm_temp);                                  // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0));     // set up new ptr
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
+    __ jmp(L_exit);
+
+    __ BIND(L_key_256);
+    // 256-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_256);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ pxor  (xmm_result, xmm_key_first);                             // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) {   // 256-bit runs up to key offset e0
+      aes_dec_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0x00);                                     // final key is stored in java expanded array at offset 0
+    __ aesdeclast(xmm_result, xmm_temp);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ pxor  (xmm_result, xmm_temp);                                  // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0));     // set up new ptr
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
  public:
   // Information about frame layout at time of blocking runtime call.
   // Note that we only have to preserve callee-saved registers since
@@ -2332,6 +2855,16 @@
     generate_arraycopy_stubs();
 
     generate_math_stubs();
+
+    // don't bother generating these AES intrinsic stubs unless global flag is set
+    if (UseAESIntrinsics) {
+      StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask();  // might be needed by the others
+
+      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+      StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
+      StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
+    }
   }
 
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -2941,6 +2941,548 @@
     }
   }
 
+  // AES intrinsic stubs
+  enum {AESBlockSize = 16};
+
+  address generate_key_shuffle_mask() {
+    __ align(16);
+    StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
+    address start = __ pc();
+    __ emit_data64( 0x0405060700010203, relocInfo::none );
+    __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none );
+    return start;
+  }
+
+  // Utility routine for loading a 128-bit key word in little endian format
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    __ movdqu(xmmdst, Address(key, offset));
+    if (xmm_shuf_mask != NULL) {
+      __ pshufb(xmmdst, xmm_shuf_mask);
+    } else {
+      __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    }
+  }
+
+  // aesenc using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesenc(xmmdst, xmmtmp);
+  }
+
+  // aesdec using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesdec(xmmdst, xmmtmp);
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_encryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register keylen      = rax;
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));  // get 16 bytes of input
+
+    // For encryption, the java expanded key ordering is just what we need
+    // we don't know if the key is aligned, hence not using load-execute form
+
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ pxor(xmm_result, xmm_temp);
+    for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    load_key  (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 192 and 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    __ aesenclast(xmm_result, xmm_temp);
+    __ movdqu(Address(to, 0), xmm_result);        // store the result
+    __ xorptr(rax, rax); // return 0
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_decryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register keylen      = rax;
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));
+
+    // for decryption java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    // we don't know if the key is aligned, hence not using load-execute form
+    load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
+    __ pxor  (xmm_result, xmm_temp);
+    for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
+      aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 192 and 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    // for decryption the aesdeclast operation is always on key+0x00
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ aesdeclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, 0), xmm_result);  // store the result
+
+    __ xorptr(rax, rax); // return 0
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+  address generate_cipherBlockChaining_encryptAESCrypt() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register rvec        = c_rarg3;  // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+#ifndef _WIN64
+    const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
+#else
+    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
+    const Register len_reg     = r10;      // pick the first volatile windows register
+#endif
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    // keys 0-10 preloaded into xmm2-xmm12
+    const int XMM_REG_NUM_KEY_FIRST = 2;
+    const int XMM_REG_NUM_KEY_LAST  = 12;
+    const XMMRegister xmm_key0   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+    const XMMRegister xmm_key10  = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+#ifdef _WIN64
+    // on win64, fill len_reg from stack position
+    __ movl(len_reg, len_mem);
+    // save the xmm registers which must be preserved 6-12
+    __ subptr(rsp, -rsp_after_call_off * wordSize);
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(xmm_save(i), as_XMMRegister(i));
+    }
+#endif
+
+    const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 2 thru 12 with key 0x00 - 0xa0
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    __ movdqu(xmm_result, Address(rvec, 0x00));   // initialize xmm_result with r vec
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+    // 128 bit code follows here
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_128);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    __ aesenclast(xmm_result, xmm_key10);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_128);
+
+    __ BIND(L_exit);
+    __ movdqu(Address(rvec, 0), xmm_result);     // final value of r stored in rvec of CipherBlockChaining object
+
+#ifdef _WIN64
+    // restore xmm regs belonging to calling function
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(as_XMMRegister(i), xmm_save(i));
+    }
+#endif
+    __ movl(rax, 0); // return 0 (why?)
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    __ BIND(L_key_192_256);
+    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_192);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0);
+    load_key(xmm_temp, key, 0xc0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_192);
+    __ jmp(L_exit);
+
+    __ BIND(L_key_256);
+    // 256-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_256);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0);
+    aes_enc_key(xmm_result, xmm_temp, key, 0xc0);
+    aes_enc_key(xmm_result, xmm_temp, key, 0xd0);
+    load_key(xmm_temp, key, 0xe0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
+
+  // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
+  // to hide instruction latency
+  //
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+
+  address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256;
+    Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128;
+    Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register rvec        = c_rarg3;  // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+#ifndef _WIN64
+    const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
+#else
+    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
+    const Register len_reg     = r10;      // pick the first volatile windows register
+#endif
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    // keys 0-10 preloaded into xmm2-xmm12
+    const int XMM_REG_NUM_KEY_FIRST = 5;
+    const int XMM_REG_NUM_KEY_LAST  = 15;
+    const XMMRegister xmm_key_first   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+    const XMMRegister xmm_key_last  = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+#ifdef _WIN64
+    // on win64, fill len_reg from stack position
+    __ movl(len_reg, len_mem);
+    // save the xmm registers which must be preserved 6-15
+    __ subptr(rsp, -rsp_after_call_off * wordSize);
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(xmm_save(i), as_XMMRegister(i));
+    }
+#endif
+    // the java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      if (rnum == XMM_REG_NUM_KEY_LAST) offset = 0x00;
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    const XMMRegister xmm_prev_block_cipher = xmm1;  // holds cipher of previous block
+    // registers holding the four results in the parallelized loop
+    const XMMRegister xmm_result0 = xmm0;
+    const XMMRegister xmm_result1 = xmm2;
+    const XMMRegister xmm_result2 = xmm3;
+    const XMMRegister xmm_result3 = xmm4;
+
+    __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00));   // initialize with initial rvec
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+
+    // 128-bit code follows here, parallelized
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_multiBlock_loopTop_128);
+    __ cmpptr(len_reg, 4*AESBlockSize);           // see if at least 4 blocks left
+    __ jcc(Assembler::less, L_singleBlock_loopTop_128);
+
+    __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize));   // get next 4 blocks into xmmresult registers
+    __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize));
+    __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize));
+    __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize));
+
+#define DoFour(opc, src_reg)                    \
+    __ opc(xmm_result0, src_reg);               \
+    __ opc(xmm_result1, src_reg);               \
+    __ opc(xmm_result2, src_reg);               \
+    __ opc(xmm_result3, src_reg);
+
+    DoFour(pxor, xmm_key_first);
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      DoFour(aesdec, as_XMMRegister(rnum));
+    }
+    DoFour(aesdeclast, xmm_key_last);
+    // for each result, xor with the r vector of previous cipher block
+    __ pxor(xmm_result0, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize));
+    __ pxor(xmm_result1, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize));
+    __ pxor(xmm_result2, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize));
+    __ pxor(xmm_result3, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize));   // this will carry over to next set of blocks
+
+    __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0);     // store 4 results into the next 64 bytes of output
+    __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1);
+    __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2);
+    __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3);
+
+    __ addptr(pos, 4*AESBlockSize);
+    __ subptr(len_reg, 4*AESBlockSize);
+    __ jmp(L_multiBlock_loopTop_128);
+
+    // registers used in the non-parallelized loops
+    const XMMRegister xmm_prev_block_cipher_save = xmm2;
+    const XMMRegister xmm_temp   = xmm3;
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_128);
+    __ cmpptr(len_reg, 0);           // any blocks left??
+    __ jcc(Assembler::equal, L_exit);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
+    __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    __ aesdeclast(xmm_result, xmm_key_last);
+    __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
+
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jmp(L_singleBlock_loopTop_128);
+
+
+    __ BIND(L_exit);
+    __ movdqu(Address(rvec, 0), xmm_prev_block_cipher);     // final value of r stored in rvec of CipherBlockChaining object
+#ifdef _WIN64
+    // restore regs belonging to calling function
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(as_XMMRegister(i), xmm_save(i));
+    }
+#endif
+    __ movl(rax, 0); // return 0 (why?)
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+
+    __ BIND(L_key_192_256);
+    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_192);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
+    __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0);     // 192-bit key goes up to c0
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0);
+    __ aesdeclast(xmm_result, xmm_key_last);                    // xmm15 always came from key+0
+    __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
+
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
+    __ jmp(L_exit);
+
+    __ BIND(L_key_256);
+    // 256-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_256);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
+    __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0);     // 256-bit key goes up to e0
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xd0);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xe0);
+    __ aesdeclast(xmm_result, xmm_key_last);             // xmm15 came from key+0
+    __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
+
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
+
 #undef __
 #define __ masm->
 
@@ -3135,6 +3677,16 @@
     generate_arraycopy_stubs();
 
     generate_math_stubs();
+
+    // don't bother generating these AES intrinsic stubs unless global flag is set
+    if (UseAESIntrinsics) {
+      StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask();  // needed by the others
+
+      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+      StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
+      StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
+    }
   }
 
  public:
--- a/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -44,3 +44,4 @@
 
 address StubRoutines::x86::_verify_mxcsr_entry         = NULL;
 address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
+address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -41,10 +41,14 @@
  private:
   static address _verify_mxcsr_entry;
   static address _verify_fpu_cntrl_wrd_entry;
+  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
+  static address _key_shuffle_mask_addr;
 
  public:
   static address verify_mxcsr_entry()                        { return _verify_mxcsr_entry; }
   static address verify_fpu_cntrl_wrd_entry()                { return _verify_fpu_cntrl_wrd_entry; }
+  static address key_shuffle_mask_addr()                     { return _key_shuffle_mask_addr; }
+
 };
 
   static bool    returns_to_call_stub(address return_pc)     { return return_pc == _call_stub_return_address; }
--- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -56,3 +56,4 @@
 address StubRoutines::x86::_double_sign_mask = NULL;
 address StubRoutines::x86::_double_sign_flip = NULL;
 address StubRoutines::x86::_mxcsr_std = NULL;
+address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -54,6 +54,8 @@
   static address _double_sign_mask;
   static address _double_sign_flip;
   static address _mxcsr_std;
+  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
+  static address _key_shuffle_mask_addr;
 
  public:
 
@@ -116,6 +118,9 @@
   {
     return _mxcsr_std;
   }
+
+  static address key_shuffle_mask_addr()                     { return _key_shuffle_mask_addr; }
+
 };
 
 #endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -419,13 +419,16 @@
   if (UseAVX < 1)
     _cpuFeatures &= ~CPU_AVX;
 
+  if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
+    _cpuFeatures &= ~CPU_AES;
+
   if (logical_processors_per_package() == 1) {
     // HT processor could be installed on a system which doesn't support HT.
     _cpuFeatures &= ~CPU_HT;
   }
 
   char buf[256];
-  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                cores_per_cpu(), threads_per_core(),
                cpu_family(), _model, _stepping,
                (supports_cmov() ? ", cmov" : ""),
@@ -441,6 +444,7 @@
                (supports_popcnt() ? ", popcnt" : ""),
                (supports_avx()    ? ", avx" : ""),
                (supports_avx2()   ? ", avx2" : ""),
+               (supports_aes()    ? ", aes" : ""),
                (supports_mmx_ext() ? ", mmxext" : ""),
                (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
                (supports_lzcnt()   ? ", lzcnt": ""),
@@ -472,6 +476,29 @@
   if (!supports_avx ()) // Drop to 0 if no AVX  support
     UseAVX = 0;
 
+  // Use AES instructions if available.
+  if (supports_aes()) {
+    if (FLAG_IS_DEFAULT(UseAES)) {
+      UseAES = true;
+    }
+  } else if (UseAES) {
+    if (!FLAG_IS_DEFAULT(UseAES))
+      warning("AES instructions not available on this CPU");
+    FLAG_SET_DEFAULT(UseAES, false);
+  }
+
+  // The AES intrinsic stubs require AES instruction support (of course)
+  // but also require AVX mode for misaligned SSE access
+  if (UseAES && (UseAVX > 0)) {
+    if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+      UseAESIntrinsics = true;
+    }
+  } else if (UseAESIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
+      warning("AES intrinsics not available on this CPU");
+    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+  }
+
 #ifdef COMPILER2
   if (UseFPUForSpilling) {
     if (UseSSE < 2) {
@@ -714,6 +741,9 @@
     if (UseAVX > 0) {
       tty->print("  UseAVX=%d",UseAVX);
     }
+    if (UseAES) {
+      tty->print("  UseAES=1");
+    }
     tty->cr();
     tty->print("Allocation");
     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -78,7 +78,9 @@
                sse4_2   : 1,
                         : 2,
                popcnt   : 1,
-                        : 3,
+                        : 1,
+               aes      : 1,
+                        : 1,
                osxsave  : 1,
                avx      : 1,
                         : 3;
@@ -244,7 +246,8 @@
     CPU_TSC    = (1 << 15),
     CPU_TSCINV = (1 << 16),
     CPU_AVX    = (1 << 17),
-    CPU_AVX2   = (1 << 18)
+    CPU_AVX2   = (1 << 18),
+    CPU_AES    = (1 << 19)
   } cpuFeatureFlags;
 
   enum {
@@ -420,6 +423,8 @@
       result |= CPU_TSC;
     if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
       result |= CPU_TSCINV;
+    if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
+      result |= CPU_AES;
 
     // AMD features.
     if (is_amd()) {
@@ -544,6 +549,7 @@
   static bool supports_avx()      { return (_cpuFeatures & CPU_AVX) != 0; }
   static bool supports_avx2()     { return (_cpuFeatures & CPU_AVX2) != 0; }
   static bool supports_tsc()      { return (_cpuFeatures & CPU_TSC)    != 0; }
+  static bool supports_aes()      { return (_cpuFeatures & CPU_AES) != 0; }
 
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
--- a/src/cpu/x86/vm/x86.ad	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/x86/vm/x86.ad	Thu Nov 08 16:48:01 2012 -0800
@@ -4102,9 +4102,158 @@
 
 // ----------------------- LogicalRightShift -----------------------------------
 
-// Shorts/Chars vector logical right shift produces incorrect Java result
+// Shorts vector logical right shift produces incorrect Java result
 // for negative data because java code convert short value into int with
-// sign extension before a shift.
+// sign extension before a shift. But char vectors are fine since chars are
+// unsigned values.
+
+instruct vsrl2S(vecS dst, vecS shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl2S_imm(vecS dst, immI8 shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, (int)$shift$$constant);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl2S_reg(vecS dst, vecS src, vecS shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S(vecD dst, vecS shift) %{
+  predicate(n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S_imm(vecD dst, immI8 shift) %{
+  predicate(n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, (int)$shift$$constant);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S_reg(vecD dst, vecD src, vecS shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S(vecX dst, vecS shift) %{
+  predicate(n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S_imm(vecX dst, immI8 shift) %{
+  predicate(n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, (int)$shift$$constant);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S_reg(vecX dst, vecX src, vecS shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl16S_reg(vecY dst, vecY src, vecS shift) %{
+  predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed16S" %}
+  ins_encode %{
+    bool vector256 = true;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
+  predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed16S" %}
+  ins_encode %{
+    bool vector256 = true;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
 
 // Integers vector logical right shift
 instruct vsrl2I(vecD dst, vecS shift) %{
--- a/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -31,12 +31,17 @@
     return _masm;
   }
 
- protected:
-  address generate_entry(address entry_point) {
-    ZeroEntry *entry = (ZeroEntry *) assembler()->pc();
-    assembler()->advance(sizeof(ZeroEntry));
+ public:
+  static address generate_entry_impl(MacroAssembler* masm, address entry_point) {
+    ZeroEntry *entry = (ZeroEntry *) masm->pc();
+    masm->advance(sizeof(ZeroEntry));
     entry->set_entry_point(entry_point);
     return (address) entry;
   }
 
+ protected:
+  address generate_entry(address entry_point) {
+        return generate_entry_impl(assembler(), entry_point);
+  }
+
 #endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -180,25 +180,6 @@
         method, istate->osr_entry(), istate->osr_buf(), THREAD);
       return;
     }
-    else if (istate->msg() == BytecodeInterpreter::call_method_handle) {
-      oop method_handle = istate->callee();
-
-      // Trim back the stack to put the parameters at the top
-      stack->set_sp(istate->stack() + 1);
-
-      // Make the call
-      process_method_handle(method_handle, THREAD);
-      fixup_after_potential_safepoint();
-
-      // Convert the result
-      istate->set_stack(stack->sp() - 1);
-
-      // Restore the stack
-      stack->set_sp(istate->stack_limit() + 1);
-
-      // Resume the interpreter
-      istate->set_msg(BytecodeInterpreter::method_resume);
-    }
     else {
       ShouldNotReachHere();
     }
@@ -535,35 +516,35 @@
   if (entry->is_volatile()) {
     switch (entry->flag_state()) {
     case ctos:
-      SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->char_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case btos:
-      SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->byte_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case stos:
-      SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->short_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case itos:
-      SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->int_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case ltos:
-      SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0);
+      SET_LOCALS_LONG(object->long_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case ftos:
-      SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0);
+      SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case dtos:
-      SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0);
+      SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case atos:
-      SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0);
+      SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2_as_index()), 0);
       break;
 
     default:
@@ -573,35 +554,35 @@
   else {
     switch (entry->flag_state()) {
     case ctos:
-      SET_LOCALS_INT(object->char_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->char_field(entry->f2_as_index()), 0);
       break;
 
     case btos:
-      SET_LOCALS_INT(object->byte_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->byte_field(entry->f2_as_index()), 0);
       break;
 
     case stos:
-      SET_LOCALS_INT(object->short_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->short_field(entry->f2_as_index()), 0);
       break;
 
     case itos:
-      SET_LOCALS_INT(object->int_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->int_field(entry->f2_as_index()), 0);
       break;
 
     case ltos:
-      SET_LOCALS_LONG(object->long_field(entry->f2()), 0);
+      SET_LOCALS_LONG(object->long_field(entry->f2_as_index()), 0);
       break;
 
     case ftos:
-      SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0);
+      SET_LOCALS_FLOAT(object->float_field(entry->f2_as_index()), 0);
       break;
 
     case dtos:
-      SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0);
+      SET_LOCALS_DOUBLE(object->double_field(entry->f2_as_index()), 0);
       break;
 
     case atos:
-      SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0);
+      SET_LOCALS_OBJECT(object->obj_field(entry->f2_as_index()), 0);
       break;
 
     default:
@@ -629,516 +610,6 @@
   return 0;
 }
 
-int CppInterpreter::method_handle_entry(Method* method,
-                                        intptr_t UNUSED, TRAPS) {
-  JavaThread *thread = (JavaThread *) THREAD;
-  ZeroStack *stack = thread->zero_stack();
-  int argument_slots = method->size_of_parameters();
-  int result_slots = type2size[result_type_of(method)];
-  intptr_t *vmslots = stack->sp();
-  intptr_t *unwind_sp = vmslots + argument_slots;
-
-  // Find the MethodType
-  address p = (address) method;
-  for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) {
-    p = *(address*)(p + (*pc));
-  }
-  oop method_type = (oop) p;
-
-  // The MethodHandle is in the slot after the arguments
-  int num_vmslots = argument_slots - 1;
-  oop method_handle = VMSLOTS_OBJECT(num_vmslots);
-
-  // InvokeGeneric requires some extra shuffling
-  oop mhtype = java_lang_invoke_MethodHandle::type(method_handle);
-  bool is_exact = mhtype == method_type;
-  if (!is_exact) {
-    if (true || // FIXME
-        method->intrinsic_id() == vmIntrinsics::_invokeExact) {
-      CALL_VM_NOCHECK_NOFIX(
-        SharedRuntime::throw_WrongMethodTypeException(
-          thread, method_type, mhtype));
-      // NB all oops trashed!
-      assert(HAS_PENDING_EXCEPTION, "should do");
-      stack->set_sp(unwind_sp);
-      return 0;
-    }
-    assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be");
-
-    // Load up an adapter from the calling type
-    // NB the x86 code for this (in methodHandles_x86.cpp, search for
-    // "genericInvoker") is really really odd.  I'm hoping it's trying
-    // to accomodate odd VM/class library combinations I can ignore.
-    oop adapter = NULL; //FIXME: load the adapter from the CP cache
-    IF (adapter == NULL) {
-      CALL_VM_NOCHECK_NOFIX(
-        SharedRuntime::throw_WrongMethodTypeException(
-          thread, method_type, mhtype));
-      // NB all oops trashed!
-      assert(HAS_PENDING_EXCEPTION, "should do");
-      stack->set_sp(unwind_sp);
-      return 0;
-    }
-
-    // Adapters are shared among form-families of method-type.  The
-    // type being called is passed as a trusted first argument so that
-    // the adapter knows the actual types of its arguments and return
-    // values.
-    insert_vmslots(num_vmslots + 1, 1, THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      // NB all oops trashed!
-      stack->set_sp(unwind_sp);
-      return 0;
-    }
-
-    vmslots = stack->sp();
-    num_vmslots++;
-    SET_VMSLOTS_OBJECT(method_type, num_vmslots);
-
-    method_handle = adapter;
-  }
-
-  // Start processing
-  process_method_handle(method_handle, THREAD);
-  if (HAS_PENDING_EXCEPTION)
-    result_slots = 0;
-
-  // If this is an invokeExact then the eventual callee will not
-  // have unwound the method handle argument so we have to do it.
-  // If a result is being returned the it will be above the method
-  // handle argument we're unwinding.
-  if (is_exact) {
-    intptr_t result[2];
-    for (int i = 0; i < result_slots; i++)
-      result[i] = stack->pop();
-    stack->pop();
-    for (int i = result_slots - 1; i >= 0; i--)
-      stack->push(result[i]);
-  }
-
-  // Check
-  assert(stack->sp() == unwind_sp - result_slots, "should be");
-
-  // No deoptimized frames on the stack
-  return 0;
-}
-
-void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
-  JavaThread *thread = (JavaThread *) THREAD;
-  ZeroStack *stack = thread->zero_stack();
-  intptr_t *vmslots = stack->sp();
-
-  bool direct_to_method = false;
-  BasicType src_rtype = T_ILLEGAL;
-  BasicType dst_rtype = T_ILLEGAL;
-
-  MethodHandleEntry *entry =
-    java_lang_invoke_MethodHandle::vmentry(method_handle);
-  MethodHandles::EntryKind entry_kind =
-    (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff);
-
-  Method* method = NULL;
-  switch (entry_kind) {
-  case MethodHandles::_invokestatic_mh:
-    direct_to_method = true;
-    break;
-
-  case MethodHandles::_invokespecial_mh:
-  case MethodHandles::_invokevirtual_mh:
-  case MethodHandles::_invokeinterface_mh:
-    {
-      oop receiver =
-        VMSLOTS_OBJECT(
-          java_lang_invoke_MethodHandle::vmslots(method_handle) - 1);
-      if (receiver == NULL) {
-          stack->set_sp(calculate_unwind_sp(stack, method_handle));
-          CALL_VM_NOCHECK_NOFIX(
-            throw_exception(
-              thread, vmSymbols::java_lang_NullPointerException()));
-          // NB all oops trashed!
-          assert(HAS_PENDING_EXCEPTION, "should do");
-          return;
-      }
-      if (entry_kind != MethodHandles::_invokespecial_mh) {
-        intptr_t index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
-        InstanceKlass* rcvrKlass =
-          (InstanceKlass *) receiver->klass();
-        if (entry_kind == MethodHandles::_invokevirtual_mh) {
-          method = (Method*) rcvrKlass->start_of_vtable()[index];
-        }
-        else {
-          oop iclass = java_lang_invoke_MethodHandle::next_target(method_handle);
-          itableOffsetEntry* ki =
-            (itableOffsetEntry *) rcvrKlass->start_of_itable();
-          int i, length = rcvrKlass->itable_length();
-          for (i = 0; i < length; i++, ki++ ) {
-            if (ki->interface_klass() == iclass)
-              break;
-          }
-          if (i == length) {
-            stack->set_sp(calculate_unwind_sp(stack, method_handle));
-            CALL_VM_NOCHECK_NOFIX(
-              throw_exception(
-                thread, vmSymbols::java_lang_IncompatibleClassChangeError()));
-            // NB all oops trashed!
-            assert(HAS_PENDING_EXCEPTION, "should do");
-            return;
-          }
-          itableMethodEntry* im = ki->first_method_entry(receiver->klass());
-          method = im[index].method();
-          if (method == NULL) {
-            stack->set_sp(calculate_unwind_sp(stack, method_handle));
-            CALL_VM_NOCHECK_NOFIX(
-              throw_exception(
-                thread, vmSymbols::java_lang_AbstractMethodError()));
-            // NB all oops trashed!
-            assert(HAS_PENDING_EXCEPTION, "should do");
-            return;
-          }
-        }
-      }
-    }
-    direct_to_method = true;
-    break;
-
-  case MethodHandles::_bound_ref_direct_mh:
-  case MethodHandles::_bound_int_direct_mh:
-  case MethodHandles::_bound_long_direct_mh:
-    direct_to_method = true;
-    // fall through
-  case MethodHandles::_bound_ref_mh:
-  case MethodHandles::_bound_int_mh:
-  case MethodHandles::_bound_long_mh:
-    {
-      BasicType arg_type  = T_ILLEGAL;
-      int       arg_mask  = -1;
-      int       arg_slots = -1;
-      MethodHandles::get_ek_bound_mh_info(
-        entry_kind, arg_type, arg_mask, arg_slots);
-      int arg_slot =
-        java_lang_invoke_BoundMethodHandle::vmargslot(method_handle);
-
-      // Create the new slot(s)
-      intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-      insert_vmslots(arg_slot, arg_slots, THREAD);
-      if (HAS_PENDING_EXCEPTION) {
-        // all oops trashed
-        stack->set_sp(unwind_sp);
-        return;
-      }
-      vmslots = stack->sp();
-
-      // Store bound argument into new stack slot
-      oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle);
-      if (arg_type == T_OBJECT) {
-        assert(arg_slots == 1, "should be");
-        SET_VMSLOTS_OBJECT(arg, arg_slot);
-      }
-      else {
-        jvalue arg_value;
-        arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
-        switch (arg_type) {
-        case T_BOOLEAN:
-          SET_VMSLOTS_INT(arg_value.z, arg_slot);
-          break;
-        case T_CHAR:
-          SET_VMSLOTS_INT(arg_value.c, arg_slot);
-          break;
-        case T_BYTE:
-          SET_VMSLOTS_INT(arg_value.b, arg_slot);
-          break;
-        case T_SHORT:
-          SET_VMSLOTS_INT(arg_value.s, arg_slot);
-          break;
-        case T_INT:
-          SET_VMSLOTS_INT(arg_value.i, arg_slot);
-          break;
-        case T_FLOAT:
-          SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
-          break;
-        case T_LONG:
-          SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1);
-          break;
-        case T_DOUBLE:
-          SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1);
-          break;
-        default:
-          tty->print_cr("unhandled type %s", type2name(arg_type));
-          ShouldNotReachHere();
-        }
-      }
-    }
-    break;
-
-  case MethodHandles::_adapter_retype_only:
-  case MethodHandles::_adapter_retype_raw:
-    src_rtype = result_type_of_handle(
-      java_lang_invoke_MethodHandle::next_target(method_handle));
-    dst_rtype = result_type_of_handle(method_handle);
-    break;
-
-  case MethodHandles::_adapter_check_cast:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      oop arg = VMSLOTS_OBJECT(arg_slot);
-      if (arg != NULL) {
-        Klass* objKlassOop = arg->klass();
-        Klass* klassOf = java_lang_Class::as_Klass(
-          java_lang_invoke_AdapterMethodHandle::argument(method_handle));
-
-        if (objKlassOop != klassOf &&
-            !objKlassOop->is_subtype_of(klassOf)) {
-          ResourceMark rm(THREAD);
-          const char* objName = Klass::cast(objKlassOop)->external_name();
-          const char* klassName = Klass::cast(klassOf)->external_name();
-          char* message = SharedRuntime::generate_class_cast_message(
-            objName, klassName);
-
-          stack->set_sp(calculate_unwind_sp(stack, method_handle));
-          CALL_VM_NOCHECK_NOFIX(
-            throw_exception(
-              thread, vmSymbols::java_lang_ClassCastException(), message));
-          // NB all oops trashed!
-          assert(HAS_PENDING_EXCEPTION, "should do");
-          return;
-        }
-      }
-    }
-    break;
-
-  case MethodHandles::_adapter_dup_args:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int conv =
-        java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
-      int num_slots = -MethodHandles::adapter_conversion_stack_move(conv);
-      assert(num_slots > 0, "should be");
-
-      // Create the new slot(s)
-      intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-      stack->overflow_check(num_slots, THREAD);
-      if (HAS_PENDING_EXCEPTION) {
-        // all oops trashed
-        stack->set_sp(unwind_sp);
-        return;
-      }
-
-      // Duplicate the arguments
-      for (int i = num_slots - 1; i >= 0; i--)
-        stack->push(*VMSLOTS_SLOT(arg_slot + i));
-
-      vmslots = stack->sp(); // unused, but let the compiler figure that out
-    }
-    break;
-
-  case MethodHandles::_adapter_drop_args:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int conv =
-        java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
-      int num_slots = MethodHandles::adapter_conversion_stack_move(conv);
-      assert(num_slots > 0, "should be");
-
-      remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap
-      vmslots = stack->sp(); // unused, but let the compiler figure that out
-    }
-    break;
-
-  case MethodHandles::_adapter_opt_swap_1:
-  case MethodHandles::_adapter_opt_swap_2:
-  case MethodHandles::_adapter_opt_rot_1_up:
-  case MethodHandles::_adapter_opt_rot_1_down:
-  case MethodHandles::_adapter_opt_rot_2_up:
-  case MethodHandles::_adapter_opt_rot_2_down:
-    {
-      int arg1 =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int conv =
-        java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
-      int arg2 = MethodHandles::adapter_conversion_vminfo(conv);
-
-      int swap_bytes = 0, rotate = 0;
-      MethodHandles::get_ek_adapter_opt_swap_rot_info(
-        entry_kind, swap_bytes, rotate);
-      int swap_slots = swap_bytes >> LogBytesPerWord;
-
-      intptr_t tmp;
-      switch (rotate) {
-      case 0: // swap
-        for (int i = 0; i < swap_slots; i++) {
-          tmp = *VMSLOTS_SLOT(arg1 + i);
-          SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i);
-          SET_VMSLOTS_SLOT(&tmp, arg2 + i);
-        }
-        break;
-
-      case 1: // up
-        assert(arg1 - swap_slots > arg2, "should be");
-
-        tmp = *VMSLOTS_SLOT(arg1);
-        for (int i = arg1 - swap_slots; i >= arg2; i--)
-          SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots);
-        SET_VMSLOTS_SLOT(&tmp, arg2);
-
-        break;
-
-      case -1: // down
-        assert(arg2 - swap_slots > arg1, "should be");
-
-        tmp = *VMSLOTS_SLOT(arg1);
-        for (int i = arg1 + swap_slots; i <= arg2; i++)
-          SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots);
-        SET_VMSLOTS_SLOT(&tmp, arg2);
-        break;
-
-      default:
-        ShouldNotReachHere();
-      }
-    }
-    break;
-
-  case MethodHandles::_adapter_opt_i2l:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int arg = VMSLOTS_INT(arg_slot);
-      intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-      insert_vmslots(arg_slot, 1, THREAD);
-      if (HAS_PENDING_EXCEPTION) {
-        // all oops trashed
-        stack->set_sp(unwind_sp);
-        return;
-      }
-      vmslots = stack->sp();
-      arg_slot++;
-      SET_VMSLOTS_LONG(arg, arg_slot);
-    }
-    break;
-
-  case MethodHandles::_adapter_opt_unboxi:
-  case MethodHandles::_adapter_opt_unboxl:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      oop arg = VMSLOTS_OBJECT(arg_slot);
-      jvalue arg_value;
-      if (arg == NULL) {
-        // queue a nullpointer exception for the caller
-        stack->set_sp(calculate_unwind_sp(stack, method_handle));
-        CALL_VM_NOCHECK_NOFIX(
-          throw_exception(
-            thread, vmSymbols::java_lang_NullPointerException()));
-        // NB all oops trashed!
-        assert(HAS_PENDING_EXCEPTION, "should do");
-        return;
-      }
-      BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
-      if (arg_type == T_LONG || arg_type == T_DOUBLE) {
-        intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-        insert_vmslots(arg_slot, 1, THREAD);
-        if (HAS_PENDING_EXCEPTION) {
-          // all oops trashed
-          stack->set_sp(unwind_sp);
-          return;
-        }
-        vmslots = stack->sp();
-        arg_slot++;
-      }
-      switch (arg_type) {
-      case T_BOOLEAN:
-        SET_VMSLOTS_INT(arg_value.z, arg_slot);
-        break;
-      case T_CHAR:
-        SET_VMSLOTS_INT(arg_value.c, arg_slot);
-        break;
-      case T_BYTE:
-        SET_VMSLOTS_INT(arg_value.b, arg_slot);
-        break;
-      case T_SHORT:
-        SET_VMSLOTS_INT(arg_value.s, arg_slot);
-        break;
-      case T_INT:
-        SET_VMSLOTS_INT(arg_value.i, arg_slot);
-        break;
-      case T_FLOAT:
-        SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
-        break;
-      case T_LONG:
-        SET_VMSLOTS_LONG(arg_value.j, arg_slot);
-        break;
-      case T_DOUBLE:
-        SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot);
-        break;
-      default:
-        tty->print_cr("unhandled type %s", type2name(arg_type));
-        ShouldNotReachHere();
-      }
-    }
-    break;
-
-  default:
-    tty->print_cr("unhandled entry_kind %s",
-                  MethodHandles::entry_name(entry_kind));
-    ShouldNotReachHere();
-  }
-
-  // Continue along the chain
-  if (direct_to_method) {
-    if (method == NULL) {
-      method =
-        (Method*) java_lang_invoke_MethodHandle::vmtarget(method_handle);
-    }
-    address entry_point = method->from_interpreted_entry();
-    Interpreter::invoke_method(method, entry_point, THREAD);
-  }
-  else {
-    process_method_handle(
-      java_lang_invoke_MethodHandle::next_target(method_handle), THREAD);
-  }
-  // NB all oops now trashed
-
-  // Adapt the result type, if necessary
-  if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) {
-    switch (dst_rtype) {
-    case T_VOID:
-      for (int i = 0; i < type2size[src_rtype]; i++)
-        stack->pop();
-      return;
-
-    case T_INT:
-      switch (src_rtype) {
-      case T_VOID:
-        stack->overflow_check(1, CHECK);
-        stack->push(0);
-        return;
-
-      case T_BOOLEAN:
-      case T_CHAR:
-      case T_BYTE:
-      case T_SHORT:
-        return;
-      }
-      // INT results sometimes need narrowing
-    case T_BOOLEAN:
-    case T_CHAR:
-    case T_BYTE:
-    case T_SHORT:
-      switch (src_rtype) {
-      case T_INT:
-        return;
-      }
-    }
-
-    tty->print_cr("unhandled conversion:");
-    tty->print_cr("src_rtype = %s", type2name(src_rtype));
-    tty->print_cr("dst_rtype = %s", type2name(dst_rtype));
-    ShouldNotReachHere();
-  }
-}
-
 // The new slots will be inserted before slot insert_before.
 // Slots < insert_before will have the same slot number after the insert.
 // Slots >= insert_before will become old_slot + num_slots.
@@ -1380,10 +851,6 @@
     entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
     break;
 
-  case Interpreter::method_handle:
-    entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();
-    break;
-
   case Interpreter::java_lang_math_sin:
   case Interpreter::java_lang_math_cos:
   case Interpreter::java_lang_math_tan:
@@ -1391,6 +858,8 @@
   case Interpreter::java_lang_math_log:
   case Interpreter::java_lang_math_log10:
   case Interpreter::java_lang_math_sqrt:
+  case Interpreter::java_lang_math_pow:
+  case Interpreter::java_lang_math_exp:
     entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
     break;
 
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -36,7 +36,6 @@
   static int native_entry(Method* method, intptr_t UNUSED, TRAPS);
   static int accessor_entry(Method* method, intptr_t UNUSED, TRAPS);
   static int empty_entry(Method* method, intptr_t UNUSED, TRAPS);
-  static int method_handle_entry(Method* method, intptr_t UNUSED, TRAPS);
 
  public:
   // Main loop of normal_entry
@@ -44,7 +43,6 @@
 
  private:
   // Helpers for method_handle_entry
-  static void process_method_handle(oop method_handle, TRAPS);
   static void insert_vmslots(int insert_before, int num_slots, TRAPS);
   static void remove_vmslots(int first_slot, int num_slots, TRAPS);
   static BasicType result_type_of_handle(oop method_handle);
--- a/src/cpu/zero/vm/frame_zero.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/frame_zero.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -351,7 +351,7 @@
   switch (offset) {
   case pc_off:
     strncpy(fieldbuf, "pc", buflen);
-    if (method()->is_oop()) {
+    if (method()->is_method()) {
       nmethod *code = method()->code();
       if (code && code->pc_desc_at(pc())) {
         SimpleScopeDesc ssd(code, pc());
@@ -367,7 +367,7 @@
 
   case method_off:
     strncpy(fieldbuf, "method", buflen);
-    if (method()->is_oop()) {
+    if (method()->is_method()) {
       method()->name_and_sig_as_C_string(valuebuf, buflen);
     }
     return;
@@ -378,7 +378,7 @@
   }
 
   // Variable part
-  if (method()->is_oop()) {
+  if (method()->is_method()) {
     identify_vp_word(frame_index, addr_of_word(offset),
                      addr_of_word(header_words + 1),
                      unextended_sp() + method()->max_stack(),
@@ -430,4 +430,3 @@
   // unused... but returns fp() to minimize changes introduced by 7087445
   return fp();
 }
-
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -36,6 +36,8 @@
   _deopt_state = unknown;
 }
 
+inline address  frame::sender_pc()           const { ShouldNotCallThis();  }
+
 inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
   _zeroframe = zf;
   _sp = sp;
--- a/src/cpu/zero/vm/icBuffer_zero.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/icBuffer_zero.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -40,7 +40,7 @@
 }
 
 void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
-                                                Metadata* cached_oop,
+                                                void* cached_oop,
                                                 address entry_point) {
   // NB ic_stub_code_size() must return the size of the code we generate
   ShouldNotCallThis();
@@ -51,7 +51,6 @@
   ShouldNotCallThis();
 }
 
-Metadata* InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) {
-  // NB ic_stub_code_size() must return the size of the code we generate
+void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
   ShouldNotCallThis();
 }
--- a/src/cpu/zero/vm/methodHandles_zero.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -24,26 +24,159 @@
  */
 
 #include "precompiled.hpp"
+#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
 
-int MethodHandles::adapter_conversion_ops_supported_mask() {
-  return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
-         //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
-         );
-  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+void MethodHandles::invoke_target(Method* method, TRAPS) {
+
+  JavaThread *thread = (JavaThread *) THREAD;
+  ZeroStack *stack = thread->zero_stack();
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+
+  // Trim back the stack to put the parameters at the top
+  stack->set_sp(istate->stack() + 1);
+
+  Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD);
+
+  // Convert the result
+  istate->set_stack(stack->sp() - 1);
+
 }
 
-void MethodHandles::generate_method_handle_stub(MacroAssembler*          masm,
-                                                MethodHandles::EntryKind ek) {
-  init_entry(ek, (MethodHandleEntry *) ek);
+oop MethodHandles::popFromStack(TRAPS) {
+
+  JavaThread *thread = (JavaThread *) THREAD;
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+  intptr_t* topOfStack = istate->stack();
+
+  oop top = STACK_OBJECT(-1);
+  MORE_STACK(-1);
+  istate->set_stack(topOfStack);
+
+  return top;
+
 }
+
+int MethodHandles::method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS) {
+
+  JavaThread *thread = (JavaThread *) THREAD;
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+  intptr_t* topOfStack = istate->stack();
+
+  // 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget.
+  int numArgs = method->size_of_parameters();
+  oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form
+  oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1);
+  Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmEntry1);
+
+  invoke_target(vmtarget, THREAD);
+
+  // No deoptimized frames on the stack
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS) {
+
+  // Pop appendix argument from stack. This is a MemberName which we resolve to the
+  // target method.
+  oop vmentry = popFromStack(THREAD);
+
+  Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+
+  invoke_target(vmtarget, THREAD);
+
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+
+  // Pop appendix argument from stack. This is a MemberName which we resolve to the
+  // target method.
+  oop vmentry = popFromStack(THREAD);
+  intptr_t* topOfStack = istate->stack();
+
+  // Resolve target method by looking up in the receiver object's itable.
+  Klass* clazz = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(vmentry));
+  intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
+  Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+
+  int numArgs = target->size_of_parameters();
+  oop recv = STACK_OBJECT(-numArgs);
+
+  InstanceKlass* klass_part = InstanceKlass::cast(recv->klass());
+  itableOffsetEntry* ki = (itableOffsetEntry*) klass_part->start_of_itable();
+  int i;
+  for ( i = 0 ; i < klass_part->itable_length() ; i++, ki++ ) {
+    if (ki->interface_klass() == clazz) break;
+  }
+
+  itableMethodEntry* im = ki->first_method_entry(recv->klass());
+  Method* vmtarget = im[vmindex].method();
+
+  invoke_target(vmtarget, THREAD);
+
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+
+  // Pop appendix argument from stack. This is a MemberName which we resolve to the
+  // target method.
+  oop vmentry = popFromStack(THREAD);
+  intptr_t* topOfStack = istate->stack();
+
+  // Resolve target method by looking up in the receiver object's vtable.
+  intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
+  Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+  int numArgs = target->size_of_parameters();
+  oop recv = STACK_OBJECT(-numArgs);
+  Klass* clazz = recv->klass();
+  Klass* klass_part = InstanceKlass::cast(clazz);
+  klassVtable* vtable = klass_part->vtable();
+  Method* vmtarget = vtable->method_at(vmindex);
+
+  invoke_target(vmtarget, THREAD);
+
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS) {
+  ShouldNotReachHere();
+  return 0;
+}
+
+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm,
+                                                                vmIntrinsics::ID iid) {
+  switch (iid) {
+  case vmIntrinsics::_invokeGeneric:
+  case vmIntrinsics::_compiledLambdaForm:
+    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
+    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
+    // They all allow an appendix argument.
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid);
+  case vmIntrinsics::_invokeBasic:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic);
+  case vmIntrinsics::_linkToStatic:
+  case vmIntrinsics::_linkToSpecial:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial);
+  case vmIntrinsics::_linkToInterface:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface);
+  case vmIntrinsics::_linkToVirtual:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual);
+  default:
+    ShouldNotReachHere();
+    return NULL;
+  }
+}
--- a/src/cpu/zero/vm/methodHandles_zero.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -26,6 +26,14 @@
 
 // Adapters
 enum /* platform_dependent_constants */ {
-  adapter_code_size = 0
+  adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1)
 };
 
+private:
+  static oop popFromStack(TRAPS);
+  static void invoke_target(Method* method, TRAPS);
+  static int method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS);
--- a/src/cpu/zero/vm/register_zero.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/register_zero.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -114,5 +114,8 @@
 };
 
 CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
+#ifndef DONT_USE_REGISTER_DEFINES
+#define noreg ((Register)(noreg_RegisterEnumValue))
+#endif
 
 #endif // CPU_ZERO_VM_REGISTER_ZERO_HPP
--- a/src/cpu/zero/vm/relocInfo_zero.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/relocInfo_zero.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -77,3 +77,7 @@
                                                        CodeBuffer*       dst) {
   ShouldNotCallThis();
 }
+
+void metadata_Relocation::pd_fix_value(address x) {
+  ShouldNotCallThis();
+}
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -35,6 +35,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "vmreg_zero.inline.hpp"
+
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif
@@ -47,6 +48,12 @@
 #endif
 
 
+
+static address zero_null_code_stub() {
+  address start = ShouldNotCallThisStub();
+  return start;
+}
+
 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
                                            VMRegPair *regs,
                                            int total_args_passed,
@@ -63,16 +70,14 @@
                         AdapterFingerPrint *fingerprint) {
   return AdapterHandlerLibrary::new_entry(
     fingerprint,
-    ShouldNotCallThisStub(),
-    ShouldNotCallThisStub(),
-    ShouldNotCallThisStub());
+    CAST_FROM_FN_PTR(address,zero_null_code_stub),
+    CAST_FROM_FN_PTR(address,zero_null_code_stub),
+    CAST_FROM_FN_PTR(address,zero_null_code_stub));
 }
 
 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
                                                 methodHandle method,
                                                 int compile_id,
-                                                int total_args_passed,
-                                                int max_arg,
                                                 BasicType *sig_bt,
                                                 VMRegPair *regs,
                                                 BasicType ret_type) {
@@ -96,19 +101,20 @@
   ShouldNotCallThis();
 }
 
+JRT_LEAF(void, zero_stub())
+  ShouldNotCallThis();
+JRT_END
+
 static RuntimeStub* generate_empty_runtime_stub(const char* name) {
-  CodeBuffer buffer(name, 0, 0);
-  return RuntimeStub::new_runtime_stub(name, &buffer, 0, 0, NULL, false);
+  return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub);
 }
 
 static SafepointBlob* generate_empty_safepoint_blob() {
-  CodeBuffer buffer("handler_blob", 0, 0);
-  return SafepointBlob::create(&buffer, NULL, 0);
+  return CAST_FROM_FN_PTR(SafepointBlob*,zero_stub);
 }
 
 static DeoptimizationBlob* generate_empty_deopt_blob() {
-  CodeBuffer buffer("handler_blob", 0, 0);
-  return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0);
+  return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub);
 }
 
 
@@ -116,7 +122,7 @@
   _deopt_blob = generate_empty_deopt_blob();
 }
 
-SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
   return generate_empty_safepoint_blob();
 }
 
@@ -124,6 +130,7 @@
   return generate_empty_runtime_stub("resolve_blob");
 }
 
+
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
                                          int total_args_passed) {
--- a/src/share/tools/hsdis/hsdis-demo.c	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/tools/hsdis/hsdis-demo.c	Thu Nov 08 16:48:01 2012 -0800
@@ -85,9 +85,11 @@
 
 #include "dlfcn.h"
 
-#define DECODE_INSTRUCTIONS_NAME "decode_instructions_virtual"
+#define DECODE_INSTRUCTIONS_VIRTUAL_NAME "decode_instructions_virtual"
+#define DECODE_INSTRUCTIONS_NAME "decode_instructions"
 #define HSDIS_NAME               "hsdis"
 static void* decode_instructions_pv = 0;
+static void* decode_instructions_sv = 0;
 static const char* hsdis_path[] = {
   HSDIS_NAME"-"LIBARCH LIB_EXT,
   "./" HSDIS_NAME"-"LIBARCH LIB_EXT,
@@ -101,11 +103,12 @@
   void* dllib = NULL;
   const char* *next_in_path = hsdis_path;
   while (1) {
-    decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME);
-    if (decode_instructions_pv != NULL)
+    decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_VIRTUAL_NAME);
+    decode_instructions_sv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME);
+    if (decode_instructions_pv != NULL || decode_instructions_sv != NULL)
       return NULL;
     if (dllib != NULL)
-      return "plugin does not defined "DECODE_INSTRUCTIONS_NAME;
+      return "plugin does not defined "DECODE_INSTRUCTIONS_VIRTUAL_NAME" and "DECODE_INSTRUCTIONS_NAME;
     for (dllib = NULL; dllib == NULL; ) {
       const char* next_lib = (*next_in_path++);
       if (next_lib == NULL)
@@ -213,20 +216,44 @@
     printf("%s: %s\n", err, dlerror());
     exit(1);
   }
-  printf("Decoding from %p to %p...\n", from, to);
-  decode_instructions_ftype decode_instructions
-    = (decode_instructions_ftype) decode_instructions_pv;
+  decode_func_vtype decode_instructions_v
+    = (decode_func_vtype) decode_instructions_pv;
+  decode_func_stype decode_instructions_s
+    = (decode_func_stype) decode_instructions_sv;
   void* res;
-  if (raw && xml) {
-    res = (*decode_instructions)(from, to, (unsigned char*)from, to - from, simple_handle_event, stdout, NULL, stdout, options);
-  } else if (raw) {
-    res = (*decode_instructions)(from, to, (unsigned char*)from, to - from, simple_handle_event, stdout, NULL, stdout, options);
-  } else {
-    res = (*decode_instructions)(from, to, (unsigned char*)from, to - from,
-                                 handle_event, (void*) event_cookie,
-                                 fprintf_callback, stdout,
-                                 options);
+  if (decode_instructions_pv != NULL) {
+    printf("\nDecoding from %p to %p...with %s\n", from, to, DECODE_INSTRUCTIONS_VIRTUAL_NAME);
+    if (raw) {
+      res = (*decode_instructions_v)(from, to,
+                                     (unsigned char*)from, to - from,
+                                     simple_handle_event, stdout,
+                                     NULL, stdout,
+                                     options, 0);
+    } else {
+      res = (*decode_instructions_v)(from, to,
+                                    (unsigned char*)from, to - from,
+                                     handle_event, (void*) event_cookie,
+                                     fprintf_callback, stdout,
+                                     options, 0);
+    }
+    if (res != (void*)to)
+      printf("*** Result was %p!\n", res);
   }
-  if (res != (void*)to)
-    printf("*** Result was %p!\n", res);
+  void* sres;
+  if (decode_instructions_sv != NULL) {
+    printf("\nDecoding from %p to %p...with old decode_instructions\n", from, to, DECODE_INSTRUCTIONS_NAME);
+    if (raw) {
+      sres = (*decode_instructions_s)(from, to,
+                                      simple_handle_event, stdout,
+                                      NULL, stdout,
+                                      options);
+    } else {
+      sres = (*decode_instructions_s)(from, to,
+                                      handle_event, (void*) event_cookie,
+                                      fprintf_callback, stdout,
+                                      options);
+    }
+    if (sres != (void *)to)
+      printf("*** Result of decode_instructions %p!\n", sres);
+  }
 }
--- a/src/share/tools/hsdis/hsdis.c	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/tools/hsdis/hsdis.c	Thu Nov 08 16:48:01 2012 -0800
@@ -99,7 +99,7 @@
                             unsigned char* buffer, uintptr_t length,
                             event_callback_t  event_callback_arg,  void* event_stream_arg,
                             printf_callback_t printf_callback_arg, void* printf_stream_arg,
-                            const char* options) {
+                            const char* options, int newline) {
   struct hsdis_app_data app_data;
   memset(&app_data, 0, sizeof(app_data));
   app_data.start_va    = start_va;
@@ -110,7 +110,7 @@
   app_data.event_stream    = event_stream_arg;
   app_data.printf_callback = printf_callback_arg;
   app_data.printf_stream   = printf_stream_arg;
-  app_data.do_newline = false;
+  app_data.do_newline = newline == 0 ? false : true;
 
   return decode(&app_data, options);
 }
@@ -132,7 +132,7 @@
                              event_stream_arg,
                              printf_callback_arg,
                              printf_stream_arg,
-                             options);
+                             options, false);
 }
 
 static void* decode(struct hsdis_app_data* app_data, const char* options) {
@@ -173,7 +173,7 @@
       if (!app_data->losing) {
         const char* insn_close = format_insn_close("/insn", &app_data->dinfo,
                                                    buf, sizeof(buf));
-        (*event_callback)(event_stream, insn_close, (void*) p) != NULL;
+        (*event_callback)(event_stream, insn_close, (void*) p);
 
         if (app_data->do_newline) {
           /* follow each complete insn by a nice newline */
@@ -182,13 +182,14 @@
       }
     }
 
-    (*event_callback)(event_stream, "/insns", (void*) p);
+    if (app_data->losing) (*event_callback)(event_stream, "/insns", (void*) p);
     return (void*) p;
   }
 }
 
 /* take the address of the function, for luck, and also test the typedef: */
-const decode_instructions_ftype decode_instructions_address = &decode_instructions_virtual;
+const decode_func_vtype decode_func_virtual_address = &decode_instructions_virtual;
+const decode_func_stype decode_func_address = &decode_instructions;
 
 static const char* format_insn_close(const char* close,
                                      disassemble_info* dinfo,
--- a/src/share/tools/hsdis/hsdis.h	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/tools/hsdis/hsdis.h	Thu Nov 08 16:48:01 2012 -0800
@@ -47,6 +47,9 @@
    where tag is a simple identifier, signifying (as in XML) a element start,
    element end, and standalone element.  (To render as XML, add angle brackets.)
 */
+#ifndef SHARED_TOOLS_HSDIS_H
+#define SHARED_TOOLS_HSDIS_H
+
 extern
 #ifdef DLL_EXPORT
   DLL_EXPORT
@@ -57,16 +60,37 @@
                                   void* event_stream,
                                   int (*printf_callback)(void*, const char*, ...),
                                   void* printf_stream,
-                                  const char* options);
+                                  const char* options,
+                                  int newline /* bool value for nice new line */);
+
+/* This is the compatability interface for older versions of hotspot */
+extern
+#ifdef DLL_ENTRY
+  DLL_ENTRY
+#endif
+void* decode_instructions(void* start_pv, void* end_pv,
+                    void* (*event_callback)(void*, const char*, void*),
+                    void* event_stream,
+                    int   (*printf_callback)(void*, const char*, ...),
+                    void* printf_stream,
+                    const char* options);
 
 /* convenience typedefs */
 
 typedef void* (*decode_instructions_event_callback_ftype)  (void*, const char*, void*);
 typedef int   (*decode_instructions_printf_callback_ftype) (void*, const char*, ...);
-typedef void* (*decode_instructions_ftype) (uintptr_t start_va, uintptr_t end_va,
-                                            unsigned char* buffer, uintptr_t length,
-                                            decode_instructions_event_callback_ftype event_callback,
-                                            void* event_stream,
-                                            decode_instructions_printf_callback_ftype printf_callback,
-                                            void* printf_stream,
-                                            const char* options);
+typedef void* (*decode_func_vtype) (uintptr_t start_va, uintptr_t end_va,
+                                    unsigned char* buffer, uintptr_t length,
+                                    decode_instructions_event_callback_ftype event_callback,
+                                    void* event_stream,
+                                    decode_instructions_printf_callback_ftype printf_callback,
+                                    void* printf_stream,
+                                    const char* options,
+                                    int newline);
+typedef void* (*decode_func_stype) (void* start_pv, void* end_pv,
+                                    decode_instructions_event_callback_ftype event_callback,
+                                    void* event_stream,
+                                    decode_instructions_printf_callback_ftype printf_callback,
+                                    void* printf_stream,
+                                    const char* options);
+#endif /* SHARED_TOOLS_HSDIS_H */
--- a/src/share/vm/asm/codeBuffer.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/asm/codeBuffer.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -758,7 +758,7 @@
     }
   }
 
-  if (dest->blob() == NULL) {
+  if (dest->blob() == NULL && dest_filled != NULL) {
     // Destination is a final resting place, not just another buffer.
     // Normalize uninitialized bytes in the final padding.
     Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -1844,17 +1844,12 @@
         code == Bytecodes::_invokevirtual && target->is_final_method() ||
         code == Bytecodes::_invokedynamic) {
       ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
-      bool success = false;
-      if (target->is_method_handle_intrinsic()) {
-        // method handle invokes
-        success = try_method_handle_inline(target);
-      } else {
-        // static binding => check if callee is ok
-        success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
-      }
+      // static binding => check if callee is ok
+      bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
+
       CHECK_BAILOUT();
-
       clear_inline_bailout();
+
       if (success) {
         // Register dependence if JVMTI has either breakpoint
         // setting or hotswapping of methods capabilities since they may
@@ -3201,6 +3196,11 @@
     return false;
   }
 
+  // method handle invokes
+  if (callee->is_method_handle_intrinsic()) {
+    return try_method_handle_inline(callee);
+  }
+
   // handle intrinsics
   if (callee->intrinsic_id() != vmIntrinsics::_none) {
     if (try_inline_intrinsics(callee)) {
@@ -3885,10 +3885,14 @@
       ValueType* type = state()->stack_at(args_base)->type();
       if (type->is_constant()) {
         ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget();
-        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
-        Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
-        if (try_inline(target, /*holder_known*/ true, bc)) {
-          return true;
+        // We don't do CHA here so only inline static and statically bindable methods.
+        if (target->is_static() || target->can_be_statically_bound()) {
+          Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
+          if (try_inline(target, /*holder_known*/ true, bc)) {
+            return true;
+          }
+        } else {
+          print_inlining(target, "not static or statically bindable", /*success*/ false);
         }
       } else {
         print_inlining(callee, "receiver not constant", /*success*/ false);
@@ -3941,9 +3945,14 @@
             }
             j += t->size();  // long and double take two slots
           }
-          Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
-          if (try_inline(target, /*holder_known*/ true, bc)) {
-            return true;
+          // We don't do CHA here so only inline static and statically bindable methods.
+          if (target->is_static() || target->can_be_statically_bound()) {
+            Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
+            if (try_inline(target, /*holder_known*/ true, bc)) {
+              return true;
+            }
+          } else {
+            print_inlining(target, "not static or statically bindable", /*success*/ false);
           }
         }
       } else {
--- a/src/share/vm/ci/ciEnv.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -768,8 +768,8 @@
       Method* m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
       if (m != NULL &&
           (bc == Bytecodes::_invokestatic
-           ?  InstanceKlass::cast(m->method_holder())->is_not_initialized()
-           : !InstanceKlass::cast(m->method_holder())->is_loaded())) {
+           ?  m->method_holder()->is_not_initialized()
+           : !m->method_holder()->is_loaded())) {
         m = NULL;
       }
       if (m != NULL) {
@@ -1056,7 +1056,7 @@
                         method_name,
                         entry_bci);
         }
-        InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
+        method->method_holder()->add_osr_nmethod(nm);
 
       }
     }
--- a/src/share/vm/ci/ciMethod.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/ci/ciMethod.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -105,7 +105,7 @@
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
 
-  if (InstanceKlass::cast(h_m()->method_holder())->is_linked()) {
+  if (h_m()->method_holder()->is_linked()) {
     _can_be_statically_bound = h_m()->can_be_statically_bound();
   } else {
     // Have to use a conservative value in this case.
@@ -188,7 +188,7 @@
 
   // Revert any breakpoint bytecodes in ci's copy
   if (me->number_of_breakpoints() > 0) {
-    BreakpointInfo* bp = InstanceKlass::cast(me->method_holder())->breakpoints();
+    BreakpointInfo* bp = me->method_holder()->breakpoints();
     for (; bp != NULL; bp = bp->next()) {
       if (bp->match(me)) {
         code_at_put(bp->bci(), bp->orig_bytecode());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/bytecodeAssembler.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/bytecodeAssembler.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/constantPool.hpp"
+
+#ifdef TARGET_ARCH_x86
+# include "bytes_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "bytes_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "bytes_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "bytes_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "bytes_ppc.hpp"
+#endif
+
+u2 BytecodeConstantPool::find_or_add(BytecodeCPEntry const& bcpe) {
+  u2 index;
+  u2* probe = _indices.get(bcpe);
+  if (probe == NULL) {
+    index = _entries.length();
+    _entries.append(bcpe);
+    _indices.put(bcpe, index);
+  } else {
+    index = *probe;
+  }
+  return index + _orig->length();
+}
+
+ConstantPool* BytecodeConstantPool::create_constant_pool(TRAPS) const {
+  if (_entries.length() == 0) {
+    return _orig;
+  }
+
+  ConstantPool* cp = ConstantPool::allocate(
+      _orig->pool_holder()->class_loader_data(),
+      _orig->length() + _entries.length(), CHECK_NULL);
+
+  cp->set_pool_holder(_orig->pool_holder());
+  _orig->copy_cp_to(1, _orig->length() - 1, cp, 1, CHECK_NULL);
+
+  for (int i = 0; i < _entries.length(); ++i) {
+    BytecodeCPEntry entry = _entries.at(i);
+    int idx = i + _orig->length();
+    switch (entry._tag) {
+      case BytecodeCPEntry::UTF8:
+        cp->symbol_at_put(idx, entry._u.utf8);
+        entry._u.utf8->increment_refcount();
+        break;
+      case BytecodeCPEntry::KLASS:
+        cp->unresolved_klass_at_put(
+            idx, cp->symbol_at(entry._u.klass));
+        break;
+      case BytecodeCPEntry::STRING:
+        cp->unresolved_string_at_put(
+            idx, cp->symbol_at(entry._u.string));
+        break;
+      case BytecodeCPEntry::NAME_AND_TYPE:
+        cp->name_and_type_at_put(idx,
+            entry._u.name_and_type.name_index,
+            entry._u.name_and_type.type_index);
+        break;
+      case BytecodeCPEntry::METHODREF:
+        cp->method_at_put(idx,
+            entry._u.methodref.class_index,
+            entry._u.methodref.name_and_type_index);
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  }
+  return cp;
+}
+
+void BytecodeAssembler::append(u1 imm_u1) {
+  _code->append(imm_u1);
+}
+
+void BytecodeAssembler::append(u2 imm_u2) {
+  _code->append(0);
+  _code->append(0);
+  Bytes::put_Java_u2(_code->adr_at(_code->length() - 2), imm_u2);
+}
+
+void BytecodeAssembler::append(u4 imm_u4) {
+  _code->append(0);
+  _code->append(0);
+  _code->append(0);
+  _code->append(0);
+  Bytes::put_Java_u4(_code->adr_at(_code->length() - 4), imm_u4);
+}
+
+void BytecodeAssembler::xload(u4 index, u1 onebyteop, u1 twobyteop) {
+  if (index < 4) {
+    _code->append(onebyteop + index);
+  } else {
+    _code->append(twobyteop);
+    _code->append((u2)index);
+  }
+}
+
+void BytecodeAssembler::dup() {
+  _code->append(Bytecodes::_dup);
+}
+
+void BytecodeAssembler::_new(Symbol* sym) {
+  u2 cpool_index = _cp->klass(sym);
+  _code->append(Bytecodes::_new);
+  append(cpool_index);
+}
+
+void BytecodeAssembler::load_string(Symbol* sym) {
+  u2 cpool_index = _cp->string(sym);
+  if (cpool_index < 0x100) {
+    ldc(cpool_index);
+  } else {
+    ldc_w(cpool_index);
+  }
+}
+
+void BytecodeAssembler::ldc(u1 index) {
+  _code->append(Bytecodes::_ldc);
+  append(index);
+}
+
+void BytecodeAssembler::ldc_w(u2 index) {
+  _code->append(Bytecodes::_ldc_w);
+  append(index);
+}
+
+void BytecodeAssembler::athrow() {
+  _code->append(Bytecodes::_athrow);
+}
+
+void BytecodeAssembler::iload(u4 index) {
+  xload(index, Bytecodes::_iload_0, Bytecodes::_iload);
+}
+
+void BytecodeAssembler::lload(u4 index) {
+  xload(index, Bytecodes::_lload_0, Bytecodes::_lload);
+}
+
+void BytecodeAssembler::fload(u4 index) {
+  xload(index, Bytecodes::_fload_0, Bytecodes::_fload);
+}
+
+void BytecodeAssembler::dload(u4 index) {
+  xload(index, Bytecodes::_dload_0, Bytecodes::_dload);
+}
+
+void BytecodeAssembler::aload(u4 index) {
+  xload(index, Bytecodes::_aload_0, Bytecodes::_aload);
+}
+
+void BytecodeAssembler::load(BasicType bt, u4 index) {
+  switch (bt) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:     iload(index); break;
+    case T_FLOAT:   fload(index); break;
+    case T_DOUBLE:  dload(index); break;
+    case T_LONG:    lload(index); break;
+    case T_OBJECT:
+    case T_ARRAY:   aload(index); break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+void BytecodeAssembler::checkcast(Symbol* sym) {
+  u2 cpool_index = _cp->klass(sym);
+  _code->append(Bytecodes::_checkcast);
+  append(cpool_index);
+}
+
+void BytecodeAssembler::invokespecial(Method* method) {
+  invokespecial(method->klass_name(), method->name(), method->signature());
+}
+
+void BytecodeAssembler::invokespecial(Symbol* klss, Symbol* name, Symbol* sig) {
+  u2 methodref_index = _cp->methodref(klss, name, sig);
+  _code->append(Bytecodes::_invokespecial);
+  append(methodref_index);
+}
+
+void BytecodeAssembler::invokevirtual(Method* method) {
+  invokevirtual(method->klass_name(), method->name(), method->signature());
+}
+
+void BytecodeAssembler::invokevirtual(Symbol* klss, Symbol* name, Symbol* sig) {
+  u2 methodref_index = _cp->methodref(klss, name, sig);
+  _code->append(Bytecodes::_invokevirtual);
+  append(methodref_index);
+}
+
+void BytecodeAssembler::ireturn() {
+  _code->append(Bytecodes::_ireturn);
+}
+
+void BytecodeAssembler::lreturn() {
+  _code->append(Bytecodes::_lreturn);
+}
+
+void BytecodeAssembler::freturn() {
+  _code->append(Bytecodes::_freturn);
+}
+
+void BytecodeAssembler::dreturn() {
+  _code->append(Bytecodes::_dreturn);
+}
+
+void BytecodeAssembler::areturn() {
+  _code->append(Bytecodes::_areturn);
+}
+
+void BytecodeAssembler::_return() {
+  _code->append(Bytecodes::_return);
+}
+
+void BytecodeAssembler::_return(BasicType bt) {
+  switch (bt) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:     ireturn(); break;
+    case T_FLOAT:   freturn(); break;
+    case T_DOUBLE:  dreturn(); break;
+    case T_LONG:    lreturn(); break;
+    case T_OBJECT:
+    case T_ARRAY:   areturn(); break;
+    case T_VOID:    _return(); break;
+    default:
+      ShouldNotReachHere();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/bytecodeAssembler.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_BYTECODEASSEMBLER_HPP
+#define SHARE_VM_CLASSFILE_BYTECODEASSEMBLER_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/method.hpp"
+#include "oops/symbol.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/resourceHash.hpp"
+
+
+/**
+ * Bytecode Assembler
+ *
+ * These classes are used to synthesize code for creating new methods from
+ * within the VM.  This is only a partial implementation of an assembler;
+ * only the bytecodes that are needed by clients are implemented at this time.
+ * This is used during default method analysis to create overpass methods
+ * and add them to a call during parsing.  Other uses (such as creating
+ * bridges) may come later.  Any missing bytecodes can be implemented on an
+ * as-need basis.
+ */
+
+class BytecodeBuffer : public GrowableArray<u1> {
+ public:
+  BytecodeBuffer() : GrowableArray<u1>(20) {}
+};
+
+// Entries in a yet-to-be-created constant pool.  Limited types for now.
+class BytecodeCPEntry VALUE_OBJ_CLASS_SPEC {
+ public:
+  enum tag {
+    ERROR_TAG,
+    UTF8,
+    KLASS,
+    STRING,
+    NAME_AND_TYPE,
+    METHODREF
+  };
+
+  u1 _tag;
+  union {
+    Symbol* utf8;
+    u2 klass;
+    u2 string;
+    struct {
+      u2 name_index;
+      u2 type_index;
+    } name_and_type;
+    struct {
+      u2 class_index;
+      u2 name_and_type_index;
+    } methodref;
+    uintptr_t hash;
+  } _u;
+
+  BytecodeCPEntry() : _tag(ERROR_TAG) { _u.hash = 0; }
+  BytecodeCPEntry(u1 tag) : _tag(tag) { _u.hash = 0; }
+
+  static BytecodeCPEntry utf8(Symbol* symbol) {
+    BytecodeCPEntry bcpe(UTF8);
+    bcpe._u.utf8 = symbol;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry klass(u2 index) {
+    BytecodeCPEntry bcpe(KLASS);
+    bcpe._u.klass = index;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry string(u2 index) {
+    BytecodeCPEntry bcpe(STRING);
+    bcpe._u.string = index;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry name_and_type(u2 name, u2 type) {
+    BytecodeCPEntry bcpe(NAME_AND_TYPE);
+    bcpe._u.name_and_type.name_index = name;
+    bcpe._u.name_and_type.type_index = type;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry methodref(u2 class_index, u2 nat) {
+    BytecodeCPEntry bcpe(METHODREF);
+    bcpe._u.methodref.class_index = class_index;
+    bcpe._u.methodref.name_and_type_index = nat;
+    return bcpe;
+  }
+
+  static bool equals(BytecodeCPEntry const& e0, BytecodeCPEntry const& e1) {
+    return e0._tag == e1._tag && e0._u.hash == e1._u.hash;
+  }
+
+  static unsigned hash(BytecodeCPEntry const& e0) {
+    return (unsigned)(e0._tag ^ e0._u.hash);
+  }
+};
+
+class BytecodeConstantPool : ResourceObj {
+ private:
+  typedef ResourceHashtable<BytecodeCPEntry, u2,
+      &BytecodeCPEntry::hash, &BytecodeCPEntry::equals> IndexHash;
+
+  ConstantPool* _orig;
+  GrowableArray<BytecodeCPEntry> _entries;
+  IndexHash _indices;
+
+  u2 find_or_add(BytecodeCPEntry const& bcpe);
+
+ public:
+
+  BytecodeConstantPool(ConstantPool* orig) : _orig(orig) {}
+
+  BytecodeCPEntry const& at(u2 index) const { return _entries.at(index); }
+
+  InstanceKlass* pool_holder() const {
+    return InstanceKlass::cast(_orig->pool_holder());
+  }
+
+  u2 utf8(Symbol* sym) {
+    return find_or_add(BytecodeCPEntry::utf8(sym));
+  }
+
+  u2 klass(Symbol* class_name) {
+    return find_or_add(BytecodeCPEntry::klass(utf8(class_name)));
+  }
+
+  u2 string(Symbol* str) {
+    return find_or_add(BytecodeCPEntry::string(utf8(str)));
+  }
+
+  u2 name_and_type(Symbol* name, Symbol* sig) {
+    return find_or_add(BytecodeCPEntry::name_and_type(utf8(name), utf8(sig)));
+  }
+
+  u2 methodref(Symbol* class_name, Symbol* name, Symbol* sig) {
+    return find_or_add(BytecodeCPEntry::methodref(
+        klass(class_name), name_and_type(name, sig)));
+  }
+
+  ConstantPool* create_constant_pool(TRAPS) const;
+};
+
+// Partial bytecode assembler - only what we need for creating
+// overpass methods for default methods is implemented
+class BytecodeAssembler : StackObj {
+ private:
+  BytecodeBuffer* _code;
+  BytecodeConstantPool* _cp;
+
+  void append(u1 imm_u1);
+  void append(u2 imm_u2);
+  void append(u4 imm_u4);
+
+  void xload(u4 index, u1 quick, u1 twobyte);
+
+ public:
+  BytecodeAssembler(BytecodeBuffer* buffer, BytecodeConstantPool* cp)
+    : _code(buffer), _cp(cp) {}
+
+  void aload(u4 index);
+  void areturn();
+  void athrow();
+  void checkcast(Symbol* sym);
+  void dload(u4 index);
+  void dreturn();
+  void dup();
+  void fload(u4 index);
+  void freturn();
+  void iload(u4 index);
+  void invokespecial(Method* method);
+  void invokespecial(Symbol* cls, Symbol* name, Symbol* sig);
+  void invokevirtual(Method* method);
+  void invokevirtual(Symbol* cls, Symbol* name, Symbol* sig);
+  void ireturn();
+  void ldc(u1 index);
+  void ldc_w(u2 index);
+  void lload(u4 index);
+  void lreturn();
+  void _new(Symbol* sym);
+  void _return();
+
+  void load_string(Symbol* sym);
+  void load(BasicType bt, u4 index);
+  void _return(BasicType bt);
+};
+
+#endif // SHARE_VM_CLASSFILE_BYTECODEASSEMBLER_HPP
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -27,6 +27,8 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/classLoaderData.inline.hpp"
+#include "classfile/defaultMethods.hpp"
+#include "classfile/genericSignatures.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
@@ -84,6 +86,9 @@
 // - to check NameAndType_info signatures more aggressively
 #define JAVA_7_VERSION                    51
 
+// Extension method support.
+#define JAVA_8_VERSION                    52
+
 
 void ClassFileParser::parse_constant_pool_entries(ClassLoaderData* loader_data, constantPoolHandle cp, int length, TRAPS) {
   // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
@@ -785,6 +790,7 @@
                                                  ClassLoaderData* loader_data,
                                                  Handle protection_domain,
                                                  Symbol* class_name,
+                                                 bool* has_default_methods,
                                                  TRAPS) {
   ClassFileStream* cfs = stream();
   assert(length > 0, "only called for length>0");
@@ -821,6 +827,9 @@
     if (!Klass::cast(interf())->is_interface()) {
       THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", NULL);
     }
+    if (InstanceKlass::cast(interf())->has_default_methods()) {
+      *has_default_methods = true;
+    }
     interfaces->at_put(index, interf());
   }
 
@@ -1928,7 +1937,8 @@
     if (method_attribute_name == vmSymbols::tag_code()) {
       // Parse Code attribute
       if (_need_verify) {
-        guarantee_property(!access_flags.is_native() && !access_flags.is_abstract(),
+        guarantee_property(
+            !access_flags.is_native() && !access_flags.is_abstract(),
                         "Code attribute in native or abstract methods in class file %s",
                          CHECK_(nullHandle));
       }
@@ -2125,7 +2135,9 @@
         runtime_visible_annotations_length = method_attribute_length;
         runtime_visible_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_annotations != NULL, "null visible annotations");
-        parse_annotations(runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle));
+        parse_annotations(runtime_visible_annotations,
+            runtime_visible_annotations_length, cp, &parsed_annotations,
+            CHECK_(nullHandle));
         cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
       } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
         runtime_invisible_annotations_length = method_attribute_length;
@@ -2169,12 +2181,10 @@
   }
 
   // All sizing information for a Method* is finally available, now create it
-  Method* m = Method::allocate(loader_data, code_length, access_flags,
-                               linenumber_table_length,
-                               total_lvt_length,
-                               exception_table_length,
-                               checked_exceptions_length,
-                               CHECK_(nullHandle));
+  Method* m = Method::allocate(
+      loader_data, code_length, access_flags, linenumber_table_length,
+      total_lvt_length, exception_table_length, checked_exceptions_length,
+      ConstMethod::NORMAL, CHECK_(nullHandle));
 
   ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
 
@@ -2204,7 +2214,6 @@
   // Fill in code attribute information
   m->set_max_stack(max_stack);
   m->set_max_locals(max_locals);
-
   m->constMethod()->set_stackmap_data(stackmap_data);
 
   // Copy byte codes
@@ -2356,6 +2365,7 @@
                                                Array<AnnotationArray*>** methods_annotations,
                                                Array<AnnotationArray*>** methods_parameter_annotations,
                                                Array<AnnotationArray*>** methods_default_annotations,
+                                               bool* has_default_methods,
                                                TRAPS) {
   ClassFileStream* cfs = stream();
   AnnotationArray* method_annotations = NULL;
@@ -2382,6 +2392,10 @@
       if (method->is_final()) {
         *has_final_method = true;
       }
+      if (is_interface && !method->is_abstract() && !method->is_static()) {
+        // default method
+        *has_default_methods = true;
+      }
       methods->at_put(index, method());
       if (*methods_annotations == NULL) {
         *methods_annotations =
@@ -2907,6 +2921,34 @@
 }
 
 
+#ifndef PRODUCT
+static void parseAndPrintGenericSignatures(
+    instanceKlassHandle this_klass, TRAPS) {
+  assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
+  ResourceMark rm;
+
+  if (this_klass->generic_signature() != NULL) {
+    using namespace generic;
+    ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
+
+    tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
+    spec->print_on(tty);
+
+    for (int i = 0; i < this_klass->methods()->length(); ++i) {
+      Method* m = this_klass->methods()->at(i);
+      MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
+      Symbol* sig = m->generic_signature();
+      if (sig == NULL) {
+        sig = m->signature();
+      }
+      tty->print_cr("Parsing %s", sig->as_C_string());
+      method_spec->print_on(tty);
+    }
+  }
+}
+#endif // ndef PRODUCT
+
+
 instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
                                                     Handle class_loader,
                                                     Handle protection_domain,
@@ -2923,6 +2965,8 @@
   unsigned char *cached_class_file_bytes = NULL;
   jint cached_class_file_length;
   ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
+  bool has_default_methods = false;
+  ResourceMark rm(THREAD);
 
   ClassFileStream* cfs = stream();
   // Timing
@@ -3138,7 +3182,9 @@
     if (itfs_len == 0) {
       local_interfaces = Universe::the_empty_klass_array();
     } else {
-      local_interfaces = parse_interfaces(cp, itfs_len, loader_data, protection_domain, _class_name, CHECK_(nullHandle));
+      local_interfaces = parse_interfaces(
+          cp, itfs_len, loader_data, protection_domain, _class_name,
+          &has_default_methods, CHECK_(nullHandle));
     }
 
     u2 java_fields_count = 0;
@@ -3164,6 +3210,7 @@
                                             &methods_annotations,
                                             &methods_parameter_annotations,
                                             &methods_default_annotations,
+                                            &has_default_methods,
                                             CHECK_(nullHandle));
 
     // Additional attributes
@@ -3193,6 +3240,11 @@
       super_klass = instanceKlassHandle(THREAD, kh());
     }
     if (super_klass.not_null()) {
+
+      if (super_klass->has_default_methods()) {
+        has_default_methods = true;
+      }
+
       if (super_klass->is_interface()) {
         ResourceMark rm(THREAD);
         Exceptions::fthrow(
@@ -3229,14 +3281,11 @@
     int itable_size = 0;
     int num_miranda_methods = 0;
 
-    klassVtable::compute_vtable_size_and_num_mirandas(vtable_size,
-                                                      num_miranda_methods,
-                                                      super_klass(),
-                                                      methods,
-                                                      access_flags,
-                                                      class_loader,
-                                                      class_name,
-                                                      local_interfaces,
+    GrowableArray<Method*> all_mirandas(20);
+
+    klassVtable::compute_vtable_size_and_num_mirandas(
+        &vtable_size, &num_miranda_methods, &all_mirandas, super_klass(), methods,
+        access_flags, class_loader, class_name, local_interfaces,
                                                       CHECK_(nullHandle));
 
     // Size of Java itable (in words)
@@ -3656,6 +3705,7 @@
 
     this_klass->set_minor_version(minor_version);
     this_klass->set_major_version(major_version);
+    this_klass->set_has_default_methods(has_default_methods);
 
     // Set up Method*::intrinsic_id as soon as we know the names of methods.
     // (We used to do this lazily, but now we query it in Rewriter,
@@ -3673,6 +3723,16 @@
                                         cached_class_file_length);
     }
 
+    // Fill in field values obtained by parse_classfile_attributes
+    if (parsed_annotations.has_any_annotations())
+      parsed_annotations.apply_to(this_klass);
+    // Create annotations
+    if (_annotations != NULL && this_klass->annotations() == NULL) {
+      Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
+      this_klass->set_annotations(anno);
+    }
+    apply_parsed_class_attributes(this_klass);
+
     // Miranda methods
     if ((num_miranda_methods > 0) ||
         // if this class introduced new miranda methods or
@@ -3682,18 +3742,6 @@
       this_klass->set_has_miranda_methods(); // then set a flag
     }
 
-    // Fill in field values obtained by parse_classfile_attributes
-    if (parsed_annotations.has_any_annotations()) {
-      parsed_annotations.apply_to(this_klass);
-    }
-    // Create annotations
-    if (_annotations != NULL && this_klass->annotations() == NULL) {
-      Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
-      this_klass->set_annotations(anno);
-    }
-    apply_parsed_class_attributes(this_klass);
-
-    // Compute transitive closure of interfaces this class implements
     this_klass->set_transitive_interfaces(transitive_interfaces);
 
     // Fill in information needed to compute superclasses.
@@ -3702,6 +3750,7 @@
     // Initialize itable offset tables
     klassItable::setup_itable_offset_table(this_klass);
 
+    // Compute transitive closure of interfaces this class implements
     // Do final class setup
     fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts);
 
@@ -3726,6 +3775,21 @@
       check_illegal_static_method(this_klass, CHECK_(nullHandle));
     }
 
+
+#ifdef ASSERT
+    if (ParseAllGenericSignatures) {
+      parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
+    }
+#endif
+
+    // Generate any default methods - default methods are interface methods
+    // that have a default implementation.  This is new with Lambda project.
+    if (has_default_methods && !access_flags.is_interface() &&
+        local_interfaces->length() > 0) {
+      DefaultMethods::generate_default_methods(
+          this_klass(), &all_mirandas, CHECK_(nullHandle));
+    }
+
     // Allocate mirror and initialize static fields
     java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
 
@@ -3744,6 +3808,7 @@
                                              false /* not shared class */);
 
     if (TraceClassLoading) {
+      ResourceMark rm;
       // print in a single call to reduce interleaving of output
       if (cfs->source() != NULL) {
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
@@ -3758,13 +3823,13 @@
           tty->print("[Loaded %s]\n", this_klass->external_name());
         }
       } else {
-        ResourceMark rm;
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
                    InstanceKlass::cast(class_loader->klass())->external_name());
       }
     }
 
     if (TraceClassResolution) {
+      ResourceMark rm;
       // print out the superclass.
       const char * from = Klass::cast(this_klass())->external_name();
       if (this_klass->java_super() != NULL) {
@@ -3785,6 +3850,7 @@
 
 #ifndef PRODUCT
     if( PrintCompactFieldsSavings ) {
+      ResourceMark rm;
       if( nonstatic_field_size < orig_nonstatic_field_size ) {
         tty->print("[Saved %d of %d bytes in %s]\n",
                  (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize,
@@ -3811,7 +3877,6 @@
   return this_klass;
 }
 
-
 unsigned int
 ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
                                        unsigned int nonstatic_oop_map_count,
@@ -4128,7 +4193,7 @@
           }
 
           // continue to look from super_m's holder's super.
-          k = InstanceKlass::cast(super_m->method_holder())->super();
+          k = super_m->method_holder()->super();
           continue;
         }
 
@@ -4263,13 +4328,16 @@
   const bool is_strict       = (flags & JVM_ACC_STRICT)       != 0;
   const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0;
   const bool major_gte_15    = _major_version >= JAVA_1_5_VERSION;
+  const bool major_gte_8     = _major_version >= JAVA_8_VERSION;
   const bool is_initializer  = (name == vmSymbols::object_initializer_name());
 
   bool is_illegal = false;
 
   if (is_interface) {
-    if (!is_abstract || !is_public || is_static || is_final ||
-        is_native || (major_gte_15 && (is_synchronized || is_strict))) {
+    if (!is_public || is_static || is_final || is_native ||
+        ((is_synchronized || is_strict) && major_gte_15 &&
+            (!major_gte_8 || is_abstract)) ||
+        (!major_gte_8 && !is_abstract)) {
       is_illegal = true;
     }
   } else { // not interface
--- a/src/share/vm/classfile/classFileParser.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/classFileParser.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -151,6 +151,7 @@
                                   ClassLoaderData* loader_data,
                                   Handle protection_domain,
                                   Symbol* class_name,
+                                  bool* has_default_methods,
                                   TRAPS);
   void record_defined_class_dependencies(instanceKlassHandle defined_klass, TRAPS);
 
@@ -188,6 +189,7 @@
                                 Array<AnnotationArray*>** methods_annotations,
                                 Array<AnnotationArray*>** methods_parameter_annotations,
                                 Array<AnnotationArray*>** methods_default_annotations,
+                                bool* has_default_method,
                                 TRAPS);
   Array<int>* sort_methods(ClassLoaderData* loader_data,
                            Array<Method*>* methods,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/defaultMethods.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,1387 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/bytecodeAssembler.hpp"
+#include "classfile/defaultMethods.hpp"
+#include "classfile/genericSignatures.hpp"
+#include "classfile/symbolTable.hpp"
+#include "memory/allocation.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/thread.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/klass.hpp"
+#include "oops/method.hpp"
+#include "utilities/accessFlags.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/ostream.hpp"
+#include "utilities/pair.hpp"
+#include "utilities/resourceHash.hpp"
+
+typedef enum { QUALIFIED, DISQUALIFIED } QualifiedState;
+
+// Because we use an iterative algorithm when iterating over the type
+// hierarchy, we can't use traditional scoped objects which automatically do
+// cleanup in the destructor when the scope is exited.  PseudoScope (and
+// PseudoScopeMark) provides a similar functionality, but for when you want a
+// scoped object in non-stack memory (such as in resource memory, as we do
+// here).  You've just got to remember to call 'destroy()' on the scope when
+// leaving it (and marks have to be explicitly added).
+class PseudoScopeMark : public ResourceObj {
+ public:
+  virtual void destroy() = 0;
+};
+
+class PseudoScope : public ResourceObj {
+ private:
+  GrowableArray<PseudoScopeMark*> _marks;
+ public:
+
+  static PseudoScope* cast(void* data) {
+    return static_cast<PseudoScope*>(data);
+  }
+
+  void add_mark(PseudoScopeMark* psm) {
+   _marks.append(psm);
+  }
+
+  void destroy() {
+    for (int i = 0; i < _marks.length(); ++i) {
+      _marks.at(i)->destroy();
+    }
+  }
+};
+
+class ContextMark : public PseudoScopeMark {
+ private:
+  generic::Context::Mark _mark;
+ public:
+  ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
+  virtual void destroy() { _mark.destroy(); }
+};
+
+#ifndef PRODUCT
+static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
+  ResourceMark rm;
+  str->print("%s%s", name->as_C_string(), signature->as_C_string());
+}
+
+static void print_method(outputStream* str, Method* mo, bool with_class=true) {
+  ResourceMark rm;
+  if (with_class) {
+    str->print("%s.", mo->klass_name()->as_C_string());
+  }
+  print_slot(str, mo->name(), mo->signature());
+}
+#endif // ndef PRODUCT
+
+/**
+ * Perform a depth-first iteration over the class hierarchy, applying
+ * algorithmic logic as it goes.
+ *
+ * This class is one half of the inheritance hierarchy analysis mechanism.
+ * It is meant to be used in conjunction with another class, the algorithm,
+ * which is indicated by the ALGO template parameter.  This class can be
+ * paired with any algorithm class that provides the required methods.
+ *
+ * This class contains all the mechanics for iterating over the class hierarchy
+ * starting at a particular root, without recursing (thus limiting stack growth
+ * from this point).  It visits each superclass (if present) and superinterface
+ * in a depth-first manner, with callbacks to the ALGO class as each class is
+ * encountered (visit()), The algorithm can cut-off further exploration of a
+ * particular branch by returning 'false' from a visit() call.
+ *
+ * The ALGO class, must provide a visit() method, which each of which will be
+ * called once for each node in the inheritance tree during the iteration.  In
+ * addition, it can provide a memory block via new_node_data(InstanceKlass*),
+ * which it can use for node-specific storage (and access via the
+ * current_data() and data_at_depth(int) methods).
+ *
+ * Bare minimum needed to be an ALGO class:
+ * class Algo : public HierarchyVisitor<Algo> {
+ *   void* new_node_data(InstanceKlass* cls) { return NULL; }
+ *   void free_node_data(void* data) { return; }
+ *   bool visit() { return true; }
+ * };
+ */
+template <class ALGO>
+class HierarchyVisitor : StackObj {
+ private:
+
+  class Node : public ResourceObj {
+   public:
+    InstanceKlass* _class;
+    bool _super_was_visited;
+    int _interface_index;
+    void* _algorithm_data;
+
+    Node(InstanceKlass* cls, void* data, bool visit_super)
+        : _class(cls), _super_was_visited(!visit_super),
+          _interface_index(0), _algorithm_data(data) {}
+
+    int number_of_interfaces() { return _class->local_interfaces()->length(); }
+    int interface_index() { return _interface_index; }
+    void set_super_visited() { _super_was_visited = true; }
+    void increment_visited_interface() { ++_interface_index; }
+    void set_all_interfaces_visited() {
+      _interface_index = number_of_interfaces();
+    }
+    bool has_visited_super() { return _super_was_visited; }
+    bool has_visited_all_interfaces() {
+      return interface_index() >= number_of_interfaces();
+    }
+    InstanceKlass* interface_at(int index) {
+      return InstanceKlass::cast(_class->local_interfaces()->at(index));
+    }
+    InstanceKlass* next_super() { return _class->java_super(); }
+    InstanceKlass* next_interface() {
+      return interface_at(interface_index());
+    }
+  };
+
+  bool _cancelled;
+  GrowableArray<Node*> _path;
+
+  Node* current_top() const { return _path.top(); }
+  bool has_more_nodes() const { return !_path.is_empty(); }
+  void push(InstanceKlass* cls, void* data) {
+    assert(cls != NULL, "Requires a valid instance class");
+    Node* node = new Node(cls, data, has_super(cls));
+    _path.push(node);
+  }
+  void pop() { _path.pop(); }
+
+  void reset_iteration() {
+    _cancelled = false;
+    _path.clear();
+  }
+  bool is_cancelled() const { return _cancelled; }
+
+  static bool has_super(InstanceKlass* cls) {
+    return cls->super() != NULL && !cls->is_interface();
+  }
+
+  Node* node_at_depth(int i) const {
+    return (i >= _path.length()) ? NULL : _path.at(_path.length() - i - 1);
+  }
+
+ protected:
+
+  // Accessors available to the algorithm
+  int current_depth() const { return _path.length() - 1; }
+
+  InstanceKlass* class_at_depth(int i) {
+    Node* n = node_at_depth(i);
+    return n == NULL ? NULL : n->_class;
+  }
+  InstanceKlass* current_class() { return class_at_depth(0); }
+
+  void* data_at_depth(int i) {
+    Node* n = node_at_depth(i);
+    return n == NULL ? NULL : n->_algorithm_data;
+  }
+  void* current_data() { return data_at_depth(0); }
+
+  void cancel_iteration() { _cancelled = true; }
+
+ public:
+
+  void run(InstanceKlass* root) {
+    ALGO* algo = static_cast<ALGO*>(this);
+
+    reset_iteration();
+
+    void* algo_data = algo->new_node_data(root);
+    push(root, algo_data);
+    bool top_needs_visit = true;
+
+    do {
+      Node* top = current_top();
+      if (top_needs_visit) {
+        if (algo->visit() == false) {
+          // algorithm does not want to continue along this path.  Arrange
+          // it so that this state is immediately popped off the stack
+          top->set_super_visited();
+          top->set_all_interfaces_visited();
+        }
+        top_needs_visit = false;
+      }
+
+      if (top->has_visited_super() && top->has_visited_all_interfaces()) {
+        algo->free_node_data(top->_algorithm_data);
+        pop();
+      } else {
+        InstanceKlass* next = NULL;
+        if (top->has_visited_super() == false) {
+          next = top->next_super();
+          top->set_super_visited();
+        } else {
+          next = top->next_interface();
+          top->increment_visited_interface();
+        }
+        assert(next != NULL, "Otherwise we shouldn't be here");
+        algo_data = algo->new_node_data(next);
+        push(next, algo_data);
+        top_needs_visit = true;
+      }
+    } while (!is_cancelled() && has_more_nodes());
+  }
+};
+
+#ifndef PRODUCT
+class PrintHierarchy : public HierarchyVisitor<PrintHierarchy> {
+ public:
+
+  bool visit() {
+    InstanceKlass* cls = current_class();
+    streamIndentor si(tty, current_depth() * 2);
+    tty->indent().print_cr("%s", cls->name()->as_C_string());
+    return true;
+  }
+
+  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void free_node_data(void* data) { return; }
+};
+#endif // ndef PRODUCT
+
+// Used to register InstanceKlass objects and all related metadata structures
+// (Methods, ConstantPools) as "in-use" by the current thread so that they can't
+// be deallocated by class redefinition while we're using them.  The classes are
+// de-registered when this goes out of scope.
+//
+// Once a class is registered, we need not bother with methodHandles or
+// constantPoolHandles for it's associated metadata.
+class KeepAliveRegistrar : public StackObj {
+ private:
+  Thread* _thread;
+  GrowableArray<ConstantPool*> _keep_alive;
+
+ public:
+  KeepAliveRegistrar(Thread* thread) : _thread(thread), _keep_alive(20) {
+    assert(thread == Thread::current(), "Must be current thread");
+  }
+
+  ~KeepAliveRegistrar() {
+    for (int i = _keep_alive.length() - 1; i >= 0; --i) {
+      ConstantPool* cp = _keep_alive.at(i);
+      int idx = _thread->metadata_handles()->find_from_end(cp);
+      assert(idx > 0, "Must be in the list");
+      _thread->metadata_handles()->remove_at(idx);
+    }
+  }
+
+  // Register a class as 'in-use' by the thread.  It's fine to register a class
+  // multiple times (though perhaps inefficient)
+  void register_class(InstanceKlass* ik) {
+    ConstantPool* cp = ik->constants();
+    _keep_alive.push(cp);
+    _thread->metadata_handles()->push(cp);
+  }
+};
+
+class KeepAliveVisitor : public HierarchyVisitor<KeepAliveVisitor> {
+ private:
+  KeepAliveRegistrar* _registrar;
+
+ public:
+  KeepAliveVisitor(KeepAliveRegistrar* registrar) : _registrar(registrar) {}
+
+  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void free_node_data(void* data) { return; }
+
+  bool visit() {
+    _registrar->register_class(current_class());
+    return true;
+  }
+};
+
+// A method family contains a set of all methods that implement a single
+// language-level method.  Because of erasure, these methods may have different
+// signatures.  As members of the set are collected while walking over the
+// hierarchy, they are tagged with a qualification state.  The qualification
+// state for an erased method is set to disqualified if there exists a path
+// from the root of hierarchy to the method that contains an interleaving
+// language-equivalent method defined in an interface.
+class MethodFamily : public ResourceObj {
+ private:
+
+  generic::MethodDescriptor* _descriptor; // language-level description
+  GrowableArray<Pair<Method*,QualifiedState> > _members;
+  ResourceHashtable<Method*, int> _member_index;
+
+  Method* _selected_target;  // Filled in later, if a unique target exists
+  Symbol* _exception_message; // If no unique target is found
+
+  bool contains_method(Method* method) {
+    int* lookup = _member_index.get(method);
+    return lookup != NULL;
+  }
+
+  void add_method(Method* method, QualifiedState state) {
+    Pair<Method*,QualifiedState> entry(method, state);
+    _member_index.put(method, _members.length());
+    _members.append(entry);
+  }
+
+  void disqualify_method(Method* method) {
+    int* index = _member_index.get(method);
+    assert(index != NULL && *index >= 0 && *index < _members.length(), "bad index");
+    _members.at(*index).second = DISQUALIFIED;
+  }
+
+  Symbol* generate_no_defaults_message(TRAPS) const;
+  Symbol* generate_abstract_method_message(Method* method, TRAPS) const;
+  Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
+
+ public:
+
+  MethodFamily(generic::MethodDescriptor* canonical_desc)
+      : _descriptor(canonical_desc), _selected_target(NULL),
+        _exception_message(NULL) {}
+
+  generic::MethodDescriptor* descriptor() const { return _descriptor; }
+
+  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
+    return descriptor()->covariant_match(md, ctx);
+  }
+
+  void set_target_if_empty(Method* m) {
+    if (_selected_target == NULL && !m->is_overpass()) {
+      _selected_target = m;
+    }
+  }
+
+  void record_qualified_method(Method* m) {
+    // If the method already exists in the set as qualified, this operation is
+    // redundant.  If it already exists as disqualified, then we leave it as
+    // disqualfied.  Thus we only add to the set if it's not already in the
+    // set.
+    if (!contains_method(m)) {
+      add_method(m, QUALIFIED);
+    }
+  }
+
+  void record_disqualified_method(Method* m) {
+    // If not in the set, add it as disqualified.  If it's already in the set,
+    // then set the state to disqualified no matter what the previous state was.
+    if (!contains_method(m)) {
+      add_method(m, DISQUALIFIED);
+    } else {
+      disqualify_method(m);
+    }
+  }
+
+  bool has_target() const { return _selected_target != NULL; }
+  bool throws_exception() { return _exception_message != NULL; }
+
+  Method* get_selected_target() { return _selected_target; }
+  Symbol* get_exception_message() { return _exception_message; }
+
+  // Either sets the target or the exception error message
+  void determine_target(InstanceKlass* root, TRAPS) {
+    if (has_target() || throws_exception()) {
+      return;
+    }
+
+    GrowableArray<Method*> qualified_methods;
+    for (int i = 0; i < _members.length(); ++i) {
+      Pair<Method*,QualifiedState> entry = _members.at(i);
+      if (entry.second == QUALIFIED) {
+        qualified_methods.append(entry.first);
+      }
+    }
+
+    if (qualified_methods.length() == 0) {
+      _exception_message = generate_no_defaults_message(CHECK);
+    } else if (qualified_methods.length() == 1) {
+      Method* method = qualified_methods.at(0);
+      if (method->is_abstract()) {
+        _exception_message = generate_abstract_method_message(method, CHECK);
+      } else {
+        _selected_target = qualified_methods.at(0);
+      }
+    } else {
+      _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
+    }
+
+    assert((has_target() ^ throws_exception()) == 1,
+           "One and only one must be true");
+  }
+
+  bool contains_signature(Symbol* query) {
+    for (int i = 0; i < _members.length(); ++i) {
+      if (query == _members.at(i).first->signature()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const {
+    print_on(str, 0);
+  }
+
+  void print_on(outputStream* str, int indent) const {
+    streamIndentor si(str, indent * 2);
+
+    generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
+    TempNewSymbol family = descriptor()->reify_signature(&ctx, Thread::current());
+    str->indent().print_cr("Logical Method %s:", family->as_C_string());
+
+    streamIndentor si2(str);
+    for (int i = 0; i < _members.length(); ++i) {
+      str->indent();
+      print_method(str, _members.at(i).first);
+      if (_members.at(i).second == DISQUALIFIED) {
+        str->print(" (disqualified)");
+      }
+      str->print_cr("");
+    }
+
+    if (_selected_target != NULL) {
+      print_selected(str, 1);
+    }
+  }
+
+  void print_selected(outputStream* str, int indent) const {
+    assert(has_target(), "Should be called otherwise");
+    streamIndentor si(str, indent * 2);
+    str->indent().print("Selected method: ");
+    print_method(str, _selected_target);
+    str->print_cr("");
+  }
+
+  void print_exception(outputStream* str, int indent) {
+    assert(throws_exception(), "Should be called otherwise");
+    streamIndentor si(str, indent * 2);
+    str->indent().print_cr("%s", _exception_message->as_C_string());
+  }
+#endif // ndef PRODUCT
+};
+
+Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
+  return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
+}
+
+Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const {
+  Symbol* klass = method->klass_name();
+  Symbol* name = method->name();
+  Symbol* sig = method->signature();
+  stringStream ss;
+  ss.print("Method ");
+  ss.write((const char*)klass->bytes(), klass->utf8_length());
+  ss.print(".");
+  ss.write((const char*)name->bytes(), name->utf8_length());
+  ss.write((const char*)sig->bytes(), sig->utf8_length());
+  ss.print(" is abstract");
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+}
+
+Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
+  stringStream ss;
+  ss.print("Conflicting default methods:");
+  for (int i = 0; i < methods->length(); ++i) {
+    Method* method = methods->at(i);
+    Symbol* klass = method->klass_name();
+    Symbol* name = method->name();
+    ss.print(" ");
+    ss.write((const char*)klass->bytes(), klass->utf8_length());
+    ss.print(".");
+    ss.write((const char*)name->bytes(), name->utf8_length());
+  }
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+}
+
+class StateRestorer;
+
+// StatefulMethodFamily is a wrapper around MethodFamily that maintains the
+// qualification state during hierarchy visitation, and applies that state
+// when adding members to the MethodFamily.
+class StatefulMethodFamily : public ResourceObj {
+  friend class StateRestorer;
+ private:
+  MethodFamily* _method;
+  QualifiedState _qualification_state;
+
+  void set_qualification_state(QualifiedState state) {
+    _qualification_state = state;
+  }
+
+ public:
+  StatefulMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) {
+    _method = new MethodFamily(md->canonicalize(ctx));
+    _qualification_state = QUALIFIED;
+  }
+
+  void set_target_if_empty(Method* m) { _method->set_target_if_empty(m); }
+
+  MethodFamily* get_method_family() { return _method; }
+
+  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
+    return _method->descriptor_matches(md, ctx);
+  }
+
+  StateRestorer* record_method_and_dq_further(Method* mo);
+};
+
+class StateRestorer : public PseudoScopeMark {
+ private:
+  StatefulMethodFamily* _method;
+  QualifiedState _state_to_restore;
+ public:
+  StateRestorer(StatefulMethodFamily* dm, QualifiedState state)
+      : _method(dm), _state_to_restore(state) {}
+  ~StateRestorer() { destroy(); }
+  void restore_state() { _method->set_qualification_state(_state_to_restore); }
+  virtual void destroy() { restore_state(); }
+};
+
+StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
+  StateRestorer* mark = new StateRestorer(this, _qualification_state);
+  if (_qualification_state == QUALIFIED) {
+    _method->record_qualified_method(mo);
+  } else {
+    _method->record_disqualified_method(mo);
+  }
+  // Everything found "above"??? this method in the hierarchy walk is set to
+  // disqualified
+  set_qualification_state(DISQUALIFIED);
+  return mark;
+}
+
+class StatefulMethodFamilies : public ResourceObj {
+ private:
+  GrowableArray<StatefulMethodFamily*> _methods;
+
+ public:
+  StatefulMethodFamily* find_matching(
+      generic::MethodDescriptor* md, generic::Context* ctx) {
+    for (int i = 0; i < _methods.length(); ++i) {
+      StatefulMethodFamily* existing = _methods.at(i);
+      if (existing->descriptor_matches(md, ctx)) {
+        return existing;
+      }
+    }
+    return NULL;
+  }
+
+  StatefulMethodFamily* find_matching_or_create(
+      generic::MethodDescriptor* md, generic::Context* ctx) {
+    StatefulMethodFamily* method = find_matching(md, ctx);
+    if (method == NULL) {
+      method = new StatefulMethodFamily(md, ctx);
+      _methods.append(method);
+    }
+    return method;
+  }
+
+  void extract_families_into(GrowableArray<MethodFamily*>* array) {
+    for (int i = 0; i < _methods.length(); ++i) {
+      array->append(_methods.at(i)->get_method_family());
+    }
+  }
+};
+
+// Represents a location corresponding to a vtable slot for methods that
+// neither the class nor any of it's ancestors provide an implementaion.
+// Default methods may be present to fill this slot.
+class EmptyVtableSlot : public ResourceObj {
+ private:
+  Symbol* _name;
+  Symbol* _signature;
+  int _size_of_parameters;
+  MethodFamily* _binding;
+
+ public:
+  EmptyVtableSlot(Method* method)
+      : _name(method->name()), _signature(method->signature()),
+        _size_of_parameters(method->size_of_parameters()), _binding(NULL) {}
+
+  Symbol* name() const { return _name; }
+  Symbol* signature() const { return _signature; }
+  int size_of_parameters() const { return _size_of_parameters; }
+
+  void bind_family(MethodFamily* lm) { _binding = lm; }
+  bool is_bound() { return _binding != NULL; }
+  MethodFamily* get_binding() { return _binding; }
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const {
+    print_slot(str, name(), signature());
+  }
+#endif // ndef PRODUCT
+};
+
+static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
+    InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
+
+  assert(klass != NULL, "Must be valid class");
+
+  GrowableArray<EmptyVtableSlot*>* slots = new GrowableArray<EmptyVtableSlot*>();
+
+  // All miranda methods are obvious candidates
+  for (int i = 0; i < mirandas->length(); ++i) {
+    EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i));
+    slots->append(slot);
+  }
+
+  // Also any overpasses in our superclasses, that we haven't implemented.
+  // (can't use the vtable because it is not guaranteed to be initialized yet)
+  InstanceKlass* super = klass->java_super();
+  while (super != NULL) {
+    for (int i = 0; i < super->methods()->length(); ++i) {
+      Method* m = super->methods()->at(i);
+      if (m->is_overpass()) {
+        // m is a method that would have been a miranda if not for the
+        // default method processing that occurred on behalf of our superclass,
+        // so it's a method we want to re-examine in this new context.  That is,
+        // unless we have a real implementation of it in the current class.
+        Method* impl = klass->lookup_method(m->name(), m->signature());
+        if (impl == NULL || impl->is_overpass()) {
+          slots->append(new EmptyVtableSlot(m));
+        }
+      }
+    }
+    super = super->java_super();
+  }
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Slots that need filling:");
+    streamIndentor si(tty);
+    for (int i = 0; i < slots->length(); ++i) {
+      tty->indent();
+      slots->at(i)->print_on(tty);
+      tty->print_cr("");
+    }
+  }
+#endif // ndef PRODUCT
+  return slots;
+}
+
+// Iterates over the type hierarchy looking for all methods with a specific
+// method name.  The result of this is a set of method families each of
+// which is populated with a set of methods that implement the same
+// language-level signature.
+class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
+ private:
+  // Context data
+  Thread* THREAD;
+  generic::DescriptorCache* _cache;
+  Symbol* _method_name;
+  generic::Context* _ctx;
+  StatefulMethodFamilies _families;
+
+ public:
+
+  FindMethodsByName(generic::DescriptorCache* cache, Symbol* name,
+      generic::Context* ctx, Thread* thread) :
+    _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
+
+  void get_discovered_families(GrowableArray<MethodFamily*>* methods) {
+    _families.extract_families_into(methods);
+  }
+
+  void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
+  void free_node_data(void* node_data) {
+    PseudoScope::cast(node_data)->destroy();
+  }
+
+  bool visit() {
+    PseudoScope* scope = PseudoScope::cast(current_data());
+    InstanceKlass* klass = current_class();
+    InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
+
+    ContextMark* cm = new ContextMark(_ctx->mark());
+    scope->add_mark(cm); // will restore context when scope is freed
+
+    _ctx->apply_type_arguments(sub, klass, THREAD);
+
+    int start, end = 0;
+    start = klass->find_method_by_name(_method_name, &end);
+    if (start != -1) {
+      for (int i = start; i < end; ++i) {
+        Method* m = klass->methods()->at(i);
+        // This gets the method's parameter list with its generic type
+        // parameters resolved
+        generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
+
+        // Find all methods on this hierarchy that match this method
+        // (name, signature).   This class collects other families of this
+        // method name.
+        StatefulMethodFamily* family =
+            _families.find_matching_or_create(md, _ctx);
+
+        if (klass->is_interface()) {
+          // ???
+          StateRestorer* restorer = family->record_method_and_dq_further(m);
+          scope->add_mark(restorer);
+        } else {
+          // This is the rule that methods in classes "win" (bad word) over
+          // methods in interfaces.  This works because of single inheritance
+          family->set_target_if_empty(m);
+        }
+      }
+    }
+    return true;
+  }
+};
+
+#ifndef PRODUCT
+static void print_families(
+    GrowableArray<MethodFamily*>* methods, Symbol* match) {
+  streamIndentor si(tty, 4);
+  if (methods->length() == 0) {
+    tty->indent();
+    tty->print_cr("No Logical Method found");
+  }
+  for (int i = 0; i < methods->length(); ++i) {
+    tty->indent();
+    MethodFamily* lm = methods->at(i);
+    if (lm->contains_signature(match)) {
+      tty->print_cr("<Matching>");
+    } else {
+      tty->print_cr("<Non-Matching>");
+    }
+    lm->print_on(tty, 1);
+  }
+}
+#endif // ndef PRODUCT
+
+static void merge_in_new_methods(InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS);
+static void create_overpasses(
+    GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
+
+// This is the guts of the default methods implementation.  This is called just
+// after the classfile has been parsed if some ancestor has default methods.
+//
+// First if finds any name/signature slots that need any implementation (either
+// because they are miranda or a superclass's implementation is an overpass
+// itself).  For each slot, iterate over the hierarchy, using generic signature
+// information to partition any methods that match the name into method families
+// where each family contains methods whose signatures are equivalent at the
+// language level (i.e., their reified parameters match and return values are
+// covariant). Check those sets to see if they contain a signature that matches
+// the slot we're looking at (if we're lucky, there might be other empty slots
+// that we can fill using the same analysis).
+//
+// For each slot filled, we generate an overpass method that either calls the
+// unique default method candidate using invokespecial, or throws an exception
+// (in the case of no default method candidates, or more than one valid
+// candidate).  These methods are then added to the class's method list.  If
+// the method set we're using contains methods (qualified or not) with a
+// different runtime signature than the method we're creating, then we have to
+// create bridges with those signatures too.
+void DefaultMethods::generate_default_methods(
+    InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
+
+  // This resource mark is the bound for all memory allocation that takes
+  // place during default method processing.  After this goes out of scope,
+  // all (Resource) objects' memory will be reclaimed.  Be careful if adding an
+  // embedded resource mark under here as that memory can't be used outside
+  // whatever scope it's in.
+  ResourceMark rm(THREAD);
+
+  generic::DescriptorCache cache;
+
+  // Keep entire hierarchy alive for the duration of the computation
+  KeepAliveRegistrar keepAlive(THREAD);
+  KeepAliveVisitor loadKeepAlive(&keepAlive);
+  loadKeepAlive.run(klass);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    ResourceMark rm;  // be careful with these!
+    tty->print_cr("Class %s requires default method processing",
+        klass->name()->as_klass_external_name());
+    PrintHierarchy printer;
+    printer.run(klass);
+  }
+#endif // ndef PRODUCT
+
+  GrowableArray<EmptyVtableSlot*>* empty_slots =
+      find_empty_vtable_slots(klass, mirandas, CHECK);
+
+  for (int i = 0; i < empty_slots->length(); ++i) {
+    EmptyVtableSlot* slot = empty_slots->at(i);
+#ifndef PRODUCT
+    if (TraceDefaultMethods) {
+      streamIndentor si(tty, 2);
+      tty->indent().print("Looking for default methods for slot ");
+      slot->print_on(tty);
+      tty->print_cr("");
+    }
+#endif // ndef PRODUCT
+    if (slot->is_bound()) {
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        streamIndentor si(tty, 4);
+        tty->indent().print_cr("Already bound to logical method:");
+        slot->get_binding()->print_on(tty, 1);
+      }
+#endif // ndef PRODUCT
+      continue; // covered by previous processing
+    }
+
+    generic::Context ctx(&cache);
+    FindMethodsByName visitor(&cache, slot->name(), &ctx, CHECK);
+    visitor.run(klass);
+
+    GrowableArray<MethodFamily*> discovered_families;
+    visitor.get_discovered_families(&discovered_families);
+
+#ifndef PRODUCT
+    if (TraceDefaultMethods) {
+      print_families(&discovered_families, slot->signature());
+    }
+#endif // ndef PRODUCT
+
+    // Find and populate any other slots that match the discovered families
+    for (int j = i; j < empty_slots->length(); ++j) {
+      EmptyVtableSlot* open_slot = empty_slots->at(j);
+
+      if (slot->name() == open_slot->name()) {
+        for (int k = 0; k < discovered_families.length(); ++k) {
+          MethodFamily* lm = discovered_families.at(k);
+
+          if (lm->contains_signature(open_slot->signature())) {
+            lm->determine_target(klass, CHECK);
+            open_slot->bind_family(lm);
+          }
+        }
+      }
+    }
+  }
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Creating overpasses...");
+  }
+#endif // ndef PRODUCT
+
+  create_overpasses(empty_slots, klass, CHECK);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Default method processing complete");
+  }
+#endif // ndef PRODUCT
+}
+
+
+/**
+ * Generic analysis was used upon interface '_target' and found a unique
+ * default method candidate with generic signature '_method_desc'.  This
+ * method is only viable if it would also be in the set of default method
+ * candidates if we ran a full analysis on the current class.
+ *
+ * The only reason that the method would not be in the set of candidates for
+ * the current class is if that there's another covariantly matching method
+ * which is "more specific" than the found method -- i.e., one could find a
+ * path in the interface hierarchy in which the matching method appears
+ * before we get to '_target'.
+ *
+ * In order to determine this, we examine all of the implemented
+ * interfaces.  If we find path that leads to the '_target' interface, then
+ * we examine that path to see if there are any methods that would shadow
+ * the selected method along that path.
+ */
+class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
+ private:
+  generic::DescriptorCache* _cache;
+  Thread* THREAD;
+
+  InstanceKlass* _target;
+
+  Symbol* _method_name;
+  InstanceKlass* _method_holder;
+  generic::MethodDescriptor* _method_desc;
+  bool _found_shadow;
+
+  bool path_has_shadow() {
+    generic::Context ctx(_cache);
+
+    for (int i = current_depth() - 1; i > 0; --i) {
+      InstanceKlass* ik = class_at_depth(i);
+      InstanceKlass* sub = class_at_depth(i + 1);
+      ctx.apply_type_arguments(sub, ik, THREAD);
+
+      if (ik->is_interface()) {
+        int end;
+        int start = ik->find_method_by_name(_method_name, &end);
+        if (start != -1) {
+          for (int j = start; j < end; ++j) {
+            Method* mo = ik->methods()->at(j);
+            generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
+            if (_method_desc->covariant_match(md, &ctx)) {
+              return true;
+            }
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+ public:
+
+  ShadowChecker(generic::DescriptorCache* cache, Thread* thread,
+      Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
+      InstanceKlass* target)
+    : _cache(cache), THREAD(thread), _method_name(name), _method_holder(holder),
+      _method_desc(desc), _target(target), _found_shadow(false) {}
+
+  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void free_node_data(void* data) { return; }
+
+  bool visit() {
+    InstanceKlass* ik = current_class();
+    if (ik == _target && current_depth() == 1) {
+      return false; // This was the specified super -- no need to search it
+    }
+    if (ik == _method_holder || ik == _target) {
+      // We found a path that should be examined to see if it shadows _method
+      if (path_has_shadow()) {
+        _found_shadow = true;
+        cancel_iteration();
+      }
+      return false; // no need to continue up hierarchy
+    }
+    return true;
+  }
+
+  bool found_shadow() { return _found_shadow; }
+};
+
+// This is called during linktime when we find an invokespecial call that
+// refers to a direct superinterface.  It indicates that we should find the
+// default method in the hierarchy of that superinterface, and if that method
+// would have been a candidate from the point of view of 'this' class, then we
+// return that method.
+Method* DefaultMethods::find_super_default(
+    Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
+
+  ResourceMark rm(THREAD);
+
+  assert(cls != NULL && super != NULL, "Need real classes");
+
+  InstanceKlass* current_class = InstanceKlass::cast(cls);
+  InstanceKlass* direction = InstanceKlass::cast(super);
+
+  // Keep entire hierarchy alive for the duration of the computation
+  KeepAliveRegistrar keepAlive(THREAD);
+  KeepAliveVisitor loadKeepAlive(&keepAlive);
+  loadKeepAlive.run(current_class);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Finding super default method %s.%s%s from %s",
+      direction->name()->as_C_string(),
+      method_name->as_C_string(), sig->as_C_string(),
+      current_class->name()->as_C_string());
+  }
+#endif // ndef PRODUCT
+
+  if (!direction->is_interface()) {
+    // We should not be here
+    return NULL;
+  }
+
+  generic::DescriptorCache cache;
+  generic::Context ctx(&cache);
+
+  // Prime the initial generic context for current -> direction
+  ctx.apply_type_arguments(current_class, direction, CHECK_NULL);
+
+  FindMethodsByName visitor(&cache, method_name, &ctx, CHECK_NULL);
+  visitor.run(direction);
+
+  GrowableArray<MethodFamily*> families;
+  visitor.get_discovered_families(&families);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    print_families(&families, sig);
+  }
+#endif // ndef PRODUCT
+
+  MethodFamily* selected_family = NULL;
+
+  for (int i = 0; i < families.length(); ++i) {
+    MethodFamily* lm = families.at(i);
+    if (lm->contains_signature(sig)) {
+      lm->determine_target(current_class, CHECK_NULL);
+      selected_family = lm;
+    }
+  }
+
+  if (selected_family->has_target()) {
+    Method* target = selected_family->get_selected_target();
+    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
+
+    // Verify that the identified method is valid from the context of
+    // the current class
+    ShadowChecker checker(&cache, THREAD, target->name(),
+        holder, selected_family->descriptor(), direction);
+    checker.run(current_class);
+
+    if (checker.found_shadow()) {
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        tty->print_cr("    Only candidate found was shadowed.");
+      }
+#endif // ndef PRODUCT
+      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+                 "Accessible default method not found", NULL);
+    } else {
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        tty->print("    Returning ");
+        print_method(tty, target, true);
+        tty->print_cr("");
+      }
+#endif // ndef PRODUCT
+      return target;
+    }
+  } else {
+    assert(selected_family->throws_exception(), "must have target or throw");
+    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+               selected_family->get_exception_message()->as_C_string(), NULL);
+  }
+}
+
+
+static int assemble_redirect(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer,
+    Symbol* incoming, Method* target, TRAPS) {
+
+  BytecodeAssembler assem(buffer, cp);
+
+  SignatureStream in(incoming, true);
+  SignatureStream out(target->signature(), true);
+  u2 parameter_count = 0;
+
+  assem.aload(parameter_count++); // load 'this'
+
+  while (!in.at_return_type()) {
+    assert(!out.at_return_type(), "Parameter counts do not match");
+    BasicType bt = in.type();
+    assert(out.type() == bt, "Parameter types are not compatible");
+    assem.load(bt, parameter_count);
+    if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
+      assem.checkcast(out.as_symbol(THREAD));
+    } else if (bt == T_LONG || bt == T_DOUBLE) {
+      ++parameter_count; // longs and doubles use two slots
+    }
+    ++parameter_count;
+    in.next();
+    out.next();
+  }
+  assert(out.at_return_type(), "Parameter counts do not match");
+  assert(in.type() == out.type(), "Return types are not compatible");
+
+  if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
+    ++parameter_count; // need room for return value
+  }
+  if (target->method_holder()->is_interface()) {
+    assem.invokespecial(target);
+  } else {
+    assem.invokevirtual(target);
+  }
+
+  if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
+    assem.checkcast(in.as_symbol(THREAD));
+  }
+  assem._return(in.type());
+  return parameter_count;
+}
+
+static int assemble_abstract_method_error(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) {
+
+  Symbol* errorName = vmSymbols::java_lang_AbstractMethodError();
+  Symbol* init = vmSymbols::object_initializer_name();
+  Symbol* sig = vmSymbols::string_void_signature();
+
+  BytecodeAssembler assem(buffer, cp);
+
+  assem._new(errorName);
+  assem.dup();
+  assem.load_string(message);
+  assem.invokespecial(errorName, init, sig);
+  assem.athrow();
+
+  return 3; // max stack size: [ exception, exception, string ]
+}
+
+static Method* new_method(
+    BytecodeConstantPool* cp, BytecodeBuffer* bytecodes, Symbol* name,
+    Symbol* sig, AccessFlags flags, int max_stack, int params,
+    ConstMethod::MethodType mt, TRAPS) {
+
+  address code_start = static_cast<address>(bytecodes->adr_at(0));
+  int code_length = bytecodes->length();
+
+  Method* m = Method::allocate(cp->pool_holder()->class_loader_data(),
+      code_length, flags, 0, 0, 0, 0, mt, CHECK_NULL);
+
+  m->set_constants(NULL); // This will get filled in later
+  m->set_name_index(cp->utf8(name));
+  m->set_signature_index(cp->utf8(sig));
+  m->set_generic_signature_index(0);
+#ifdef CC_INTERP
+  ResultTypeFinder rtf(sig);
+  m->set_result_index(rtf.type());
+#endif
+  m->set_size_of_parameters(params);
+  m->set_max_stack(max_stack);
+  m->set_max_locals(params);
+  m->constMethod()->set_stackmap_data(NULL);
+  m->set_code(code_start);
+  m->set_force_inline(true);
+
+  return m;
+}
+
+static void switchover_constant_pool(BytecodeConstantPool* bpool,
+    InstanceKlass* klass, GrowableArray<Method*>* new_methods, TRAPS) {
+
+  if (new_methods->length() > 0) {
+    ConstantPool* cp = bpool->create_constant_pool(CHECK);
+    if (cp != klass->constants()) {
+      klass->class_loader_data()->add_to_deallocate_list(klass->constants());
+      klass->set_constants(cp);
+      cp->set_pool_holder(klass);
+
+      for (int i = 0; i < new_methods->length(); ++i) {
+        new_methods->at(i)->set_constants(cp);
+      }
+      for (int i = 0; i < klass->methods()->length(); ++i) {
+        Method* mo = klass->methods()->at(i);
+        mo->set_constants(cp);
+      }
+    }
+  }
+}
+
+// A "bridge" is a method created by javac to bridge the gap between
+// an implementation and a generically-compatible, but different, signature.
+// Bridges have actual bytecode implementation in classfiles.
+// An "overpass", on the other hand, performs the same function as a bridge
+// but does not occur in a classfile; the VM creates overpass itself,
+// when it needs a path to get from a call site to an default method, and
+// a bridge doesn't exist.
+static void create_overpasses(
+    GrowableArray<EmptyVtableSlot*>* slots,
+    InstanceKlass* klass, TRAPS) {
+
+  GrowableArray<Method*> overpasses;
+  BytecodeConstantPool bpool(klass->constants());
+
+  for (int i = 0; i < slots->length(); ++i) {
+    EmptyVtableSlot* slot = slots->at(i);
+
+    if (slot->is_bound()) {
+      MethodFamily* method = slot->get_binding();
+      int max_stack = 0;
+      BytecodeBuffer buffer;
+
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        tty->print("for slot: ");
+        slot->print_on(tty);
+        tty->print_cr("");
+        if (method->has_target()) {
+          method->print_selected(tty, 1);
+        } else {
+          method->print_exception(tty, 1);
+        }
+      }
+#endif // ndef PRODUCT
+      if (method->has_target()) {
+        Method* selected = method->get_selected_target();
+        max_stack = assemble_redirect(
+            &bpool, &buffer, slot->signature(), selected, CHECK);
+      } else if (method->throws_exception()) {
+        max_stack = assemble_abstract_method_error(
+            &bpool, &buffer, method->get_exception_message(), CHECK);
+      }
+      AccessFlags flags = accessFlags_from(
+          JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
+      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+          flags, max_stack, slot->size_of_parameters(),
+          ConstMethod::OVERPASS, CHECK);
+      if (m != NULL) {
+        overpasses.push(m);
+      }
+    }
+  }
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Created %d overpass methods", overpasses.length());
+  }
+#endif // ndef PRODUCT
+
+  switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
+  merge_in_new_methods(klass, &overpasses, CHECK);
+}
+
+static void sort_methods(GrowableArray<Method*>* methods) {
+  // Note that this must sort using the same key as is used for sorting
+  // methods in InstanceKlass.
+  bool sorted = true;
+  for (int i = methods->length() - 1; i > 0; --i) {
+    for (int j = 0; j < i; ++j) {
+      Method* m1 = methods->at(j);
+      Method* m2 = methods->at(j + 1);
+      if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) {
+        methods->at_put(j, m2);
+        methods->at_put(j + 1, m1);
+        sorted = false;
+      }
+    }
+    if (sorted) break;
+    sorted = true;
+  }
+#ifdef ASSERT
+  uintptr_t prev = 0;
+  for (int i = 0; i < methods->length(); ++i) {
+    Method* mh = methods->at(i);
+    uintptr_t nv = (uintptr_t)mh->name();
+    assert(nv >= prev, "Incorrect overpass method ordering");
+    prev = nv;
+  }
+#endif
+}
+
+static void merge_in_new_methods(InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS) {
+
+  enum { ANNOTATIONS, PARAMETERS, DEFAULTS, NUM_ARRAYS };
+
+  Array<AnnotationArray*>* original_annots[NUM_ARRAYS];
+
+  Array<Method*>* original_methods = klass->methods();
+  Annotations* annots = klass->annotations();
+  original_annots[ANNOTATIONS] = annots->methods_annotations();
+  original_annots[PARAMETERS]  = annots->methods_parameter_annotations();
+  original_annots[DEFAULTS]    = annots->methods_default_annotations();
+
+  Array<int>* original_ordering = klass->method_ordering();
+  Array<int>* merged_ordering = Universe::the_empty_int_array();
+
+  int new_size = klass->methods()->length() + new_methods->length();
+
+  Array<AnnotationArray*>* merged_annots[NUM_ARRAYS];
+
+  Array<Method*>* merged_methods = MetadataFactory::new_array<Method*>(
+      klass->class_loader_data(), new_size, NULL, CHECK);
+  for (int i = 0; i < NUM_ARRAYS; ++i) {
+    if (original_annots[i] != NULL) {
+      merged_annots[i] = MetadataFactory::new_array<AnnotationArray*>(
+          klass->class_loader_data(), new_size, CHECK);
+    } else {
+      merged_annots[i] = NULL;
+    }
+  }
+  if (original_ordering != NULL && original_ordering->length() > 0) {
+    merged_ordering = MetadataFactory::new_array<int>(
+        klass->class_loader_data(), new_size, CHECK);
+  }
+  int method_order_index = klass->methods()->length();
+
+  sort_methods(new_methods);
+
+  // Perform grand merge of existing methods and new methods
+  int orig_idx = 0;
+  int new_idx = 0;
+
+  for (int i = 0; i < new_size; ++i) {
+    Method* orig_method = NULL;
+    Method* new_method = NULL;
+    if (orig_idx < original_methods->length()) {
+      orig_method = original_methods->at(orig_idx);
+    }
+    if (new_idx < new_methods->length()) {
+      new_method = new_methods->at(new_idx);
+    }
+
+    if (orig_method != NULL &&
+        (new_method == NULL || orig_method->name() < new_method->name())) {
+      merged_methods->at_put(i, orig_method);
+      original_methods->at_put(orig_idx, NULL);
+      for (int j = 0; j < NUM_ARRAYS; ++j) {
+        if (merged_annots[j] != NULL) {
+          merged_annots[j]->at_put(i, original_annots[j]->at(orig_idx));
+          original_annots[j]->at_put(orig_idx, NULL);
+        }
+      }
+      if (merged_ordering->length() > 0) {
+        merged_ordering->at_put(i, original_ordering->at(orig_idx));
+      }
+      ++orig_idx;
+    } else {
+      merged_methods->at_put(i, new_method);
+      if (merged_ordering->length() > 0) {
+        merged_ordering->at_put(i, method_order_index++);
+      }
+      ++new_idx;
+    }
+    // update idnum for new location
+    merged_methods->at(i)->set_method_idnum(i);
+  }
+
+  // Verify correct order
+#ifdef ASSERT
+  uintptr_t prev = 0;
+  for (int i = 0; i < merged_methods->length(); ++i) {
+    Method* mo = merged_methods->at(i);
+    uintptr_t nv = (uintptr_t)mo->name();
+    assert(nv >= prev, "Incorrect method ordering");
+    prev = nv;
+  }
+#endif
+
+  // Replace klass methods with new merged lists
+  klass->set_methods(merged_methods);
+  annots->set_methods_annotations(merged_annots[ANNOTATIONS]);
+  annots->set_methods_parameter_annotations(merged_annots[PARAMETERS]);
+  annots->set_methods_default_annotations(merged_annots[DEFAULTS]);
+
+  ClassLoaderData* cld = klass->class_loader_data();
+  MetadataFactory::free_array(cld, original_methods);
+  for (int i = 0; i < NUM_ARRAYS; ++i) {
+    MetadataFactory::free_array(cld, original_annots[i]);
+  }
+  if (original_ordering->length() > 0) {
+    klass->set_method_ordering(merged_ordering);
+    MetadataFactory::free_array(cld, original_ordering);
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/defaultMethods.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
+#define SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
+
+#include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/exceptions.hpp"
+
+class InstanceKlass;
+class Symbol;
+class Method;
+
+class DefaultMethods : AllStatic {
+ public:
+
+  // Analyzes class and determines which default methods are inherited
+  // from interfaces (and has no other implementation).  For each method
+  // (and each different signature the method could have), create an
+  // "overpass" method that is an instance method that redirects to the
+  // default method.  Overpass methods are added to the methods lists for
+  // the class.
+  static void generate_default_methods(
+      InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
+
+
+  // Called during linking when an invokespecial to an direct interface
+  // method is found.  Selects and returns a method if there is a unique
+  // default method in the 'super_iface' part of the hierarchy which is
+  // also a candidate default for 'this_klass'.  Otherwise throws an AME.
+  static Method* find_super_default(
+      Klass* this_klass, Klass* super_iface,
+      Symbol* method_name, Symbol* method_sig, TRAPS);
+};
+
+#endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/genericSignatures.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,1272 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/genericSignatures.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "memory/resourceArea.hpp"
+
+namespace generic {
+
+// Helper class for parsing the generic signature Symbol in klass and methods
+class DescriptorStream : public ResourceObj {
+ private:
+  Symbol* _symbol;
+  int _offset;
+  int _mark;
+  const char* _parse_error;
+
+  void set_parse_error(const char* error) {
+    assert(error != NULL, "Can't set NULL error string");
+    _parse_error = error;
+  }
+
+ public:
+  DescriptorStream(Symbol* sym)
+      : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {}
+
+  const char* parse_error() const {
+    return _parse_error;
+  }
+
+  bool at_end() { return _offset >= _symbol->utf8_length(); }
+
+  char peek() {
+    if (at_end()) {
+      set_parse_error("Peeking past end of signature");
+      return '\0';
+    } else {
+      return _symbol->byte_at(_offset);
+    }
+  }
+
+  char read() {
+    if (at_end()) {
+      set_parse_error("Reading past end of signature");
+      return '\0';
+    } else {
+      return _symbol->byte_at(_offset++);
+    }
+  }
+
+  void read(char expected) {
+    char c = read();
+    assert_char(c, expected, 0);
+  }
+
+  void assert_char(char c, char expected, int pos = -1) {
+    if (c != expected) {
+      const char* fmt = "Parse error at %d: expected %c but got %c";
+      size_t len = strlen(fmt) + 5;
+      char* buffer = NEW_RESOURCE_ARRAY(char, len);
+      jio_snprintf(buffer, len, fmt, _offset + pos, expected, c);
+      set_parse_error(buffer);
+    }
+  }
+
+  void push(char c) {
+    assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value");
+    --_offset;
+  }
+
+  void expect_end() {
+    if (!at_end()) {
+      set_parse_error("Unexpected data trailing signature");
+    }
+  }
+
+  bool has_mark() { return _mark != -1; }
+
+  void set_mark() {
+    _mark = _offset;
+  }
+
+  Identifier* identifier_from_mark() {
+    assert(has_mark(), "Mark should be set");
+    if (!has_mark()) {
+      set_parse_error("Expected mark to be set");
+      return NULL;
+    } else {
+      Identifier* id = new Identifier(_symbol, _mark, _offset - 1);
+      _mark = -1;
+      return id;
+    }
+  }
+};
+
+
+#define CHECK_FOR_PARSE_ERROR()         \
+  if (STREAM->parse_error() != NULL) {   \
+    if (VerifyGenericSignatures) {      \
+      fatal(STREAM->parse_error());      \
+    }                                   \
+    return NULL;                        \
+  } 0
+
+#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
+#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
+#define PUSH(c) STREAM->push(c)
+#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR()
+#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
+#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
+
+#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0
+
+#ifndef PRODUCT
+void Identifier::print_on(outputStream* str) const {
+  for (int i = _begin; i < _end; ++i) {
+    str->print("%c", (char)_sym->byte_at(i));
+  }
+}
+#endif // ndef PRODUCT
+
+bool Identifier::equals(Identifier* other) {
+  if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) {
+    return true;
+  } else if (_end - _begin != other->_end - other->_begin) {
+    return false;
+  } else {
+    size_t len = _end - _begin;
+    char* addr = ((char*)_sym->bytes()) + _begin;
+    char* oaddr = ((char*)other->_sym->bytes()) + other->_begin;
+    return strncmp(addr, oaddr, len) == 0;
+  }
+}
+
+bool Identifier::equals(Symbol* sym) {
+  Identifier id(sym, 0, sym->utf8_length());
+  return equals(&id);
+}
+
+/**
+ * A formal type parameter may be found in the the enclosing class, but it could
+ * also come from an enclosing method or outer class, in the case of inner-outer
+ * classes or anonymous classes.  For example:
+ *
+ * class Outer<T,V> {
+ *   class Inner<W> {
+ *     void m(T t, V v, W w);
+ *   }
+ * }
+ *
+ * In this case, the type variables in m()'s signature are not all found in the
+ * immediate enclosing class (Inner).  class Inner has only type parameter W,
+ * but it's outer_class field will reference Outer's descriptor which contains
+ * T & V (no outer_method in this case).
+ *
+ * If you have an anonymous class, it has both an enclosing method *and* an
+ * enclosing class where type parameters can be declared:
+ *
+ * class MOuter<T> {
+ *   <V> void bar(V v) {
+ *     Runnable r = new Runnable() {
+ *       public void run() {}
+ *       public void foo(T t, V v) { ... }
+ *     };
+ *   }
+ * }
+ *
+ * In this case, foo will be a member of some class, Runnable$1, which has no
+ * formal parameters itself, but has an outer_method (bar()) which provides
+ * type parameter V, and an outer class MOuter with type parameter T.
+ *
+ * It is also possible that the outer class is itself an inner class to some
+ * other class (or an anonymous class with an enclosing method), so we need to
+ * follow the outer_class/outer_method chain to it's end when looking for a
+ * type parameter.
+ */
+TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) {
+
+  int current_depth = 0;
+
+  MethodDescriptor* outer_method = as_method_signature();
+  ClassDescriptor* outer_class = as_class_signature();
+
+  if (outer_class == NULL) { // 'this' is a method signature; use the holder
+    outer_class = outer_method->outer_class();
+  }
+
+  while (outer_method != NULL || outer_class != NULL) {
+    if (outer_method != NULL) {
+      for (int i = 0; i < outer_method->type_parameters().length(); ++i) {
+        TypeParameter* p = outer_method->type_parameters().at(i);
+        if (p->identifier()->equals(id)) {
+          *depth = -1; // indicates this this is a method parameter
+          return p;
+        }
+      }
+    }
+    if (outer_class != NULL) {
+      for (int i = 0; i < outer_class->type_parameters().length(); ++i) {
+        TypeParameter* p = outer_class->type_parameters().at(i);
+        if (p->identifier()->equals(id)) {
+          *depth = current_depth;
+          return p;
+        }
+      }
+      outer_method = outer_class->outer_method();
+      outer_class = outer_class->outer_class();
+      ++current_depth;
+    }
+  }
+
+  if (VerifyGenericSignatures) {
+    fatal("Could not resolve identifier");
+  }
+
+  return NULL;
+}
+
+ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) {
+  return parse_generic_signature(klass, NULL, CHECK_NULL);
+}
+
+ClassDescriptor* ClassDescriptor::parse_generic_signature(
+      Klass* klass, Symbol* original_name, TRAPS) {
+
+  InstanceKlass* ik = InstanceKlass::cast(klass);
+  Symbol* sym = ik->generic_signature();
+
+  ClassDescriptor* spec;
+
+  if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) {
+    spec = ClassDescriptor::placeholder(ik);
+  }
+
+  u2 outer_index = get_outer_class_index(ik, CHECK_NULL);
+  if (outer_index != 0) {
+    if (original_name == NULL) {
+      original_name = ik->name();
+    }
+    Handle class_loader = Handle(THREAD, ik->class_loader());
+    Handle protection_domain = Handle(THREAD, ik->protection_domain());
+
+    Symbol* outer_name = ik->constants()->klass_name_at(outer_index);
+    Klass* outer = SystemDictionary::find(
+        outer_name, class_loader, protection_domain, CHECK_NULL);
+    if (outer == NULL && !THREAD->is_Compiler_thread()) {
+      outer = SystemDictionary::resolve_super_or_fail(original_name,
+          outer_name, class_loader, protection_domain, false, CHECK_NULL);
+    }
+
+    InstanceKlass* outer_ik;
+    ClassDescriptor* outer_spec = NULL;
+    if (outer == NULL) {
+      outer_spec = ClassDescriptor::placeholder(ik);
+      assert(false, "Outer class not loaded and not loadable from here");
+    } else {
+      outer_ik = InstanceKlass::cast(outer);
+      outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL);
+    }
+    spec->set_outer_class(outer_spec);
+
+    u2 encl_method_idx = ik->enclosing_method_method_index();
+    if (encl_method_idx != 0 && outer_ik != NULL) {
+      ConstantPool* cp = ik->constants();
+      u2 name_index = cp->name_ref_index_at(encl_method_idx);
+      u2 sig_index = cp->signature_ref_index_at(encl_method_idx);
+      Symbol* name = cp->symbol_at(name_index);
+      Symbol* sig = cp->symbol_at(sig_index);
+      Method* m = outer_ik->find_method(name, sig);
+      if (m != NULL) {
+        Symbol* gsig = m->generic_signature();
+        if (gsig != NULL) {
+          MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec);
+          spec->set_outer_method(gms);
+        }
+      } else if (VerifyGenericSignatures) {
+        ResourceMark rm;
+        stringStream ss;
+        ss.print("Could not find method %s %s in class %s",
+          name->as_C_string(), sig->as_C_string(), outer_name->as_C_string());
+        fatal(ss.as_string());
+      }
+    }
+  }
+
+  spec->bind_variables_to_parameters();
+  return spec;
+}
+
+ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) {
+  GrowableArray<TypeParameter*> formals;
+  GrowableArray<ClassType*> interfaces;
+  ClassType* super_type = NULL;
+
+  Klass* super_klass = klass->super();
+  if (super_klass != NULL) {
+    InstanceKlass* super = InstanceKlass::cast(super_klass);
+    super_type = ClassType::from_symbol(super->name());
+  }
+
+  for (int i = 0; i < klass->local_interfaces()->length(); ++i) {
+    InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i));
+    interfaces.append(ClassType::from_symbol(iface->name()));
+  }
+  return new ClassDescriptor(formals, super_type, interfaces);
+}
+
+ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) {
+
+  DescriptorStream ds(sym);
+  DescriptorStream* STREAM = &ds;
+
+  GrowableArray<TypeParameter*> parameters(8);
+  char c = READ();
+  if (c == '<') {
+    c = READ();
+    while (c != '>') {
+      PUSH(c);
+      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
+      parameters.append(ftp);
+      c = READ();
+    }
+  } else {
+    PUSH(c);
+  }
+
+  EXPECT('L');
+  ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM);
+
+  GrowableArray<ClassType*> signatures(2);
+  while (!STREAM->at_end()) {
+    EXPECT('L');
+    ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM);
+    signatures.append(iface);
+  }
+
+  EXPECT_END();
+
+  return new ClassDescriptor(parameters, super, signatures);
+}
+
+#ifndef PRODUCT
+void ClassDescriptor::print_on(outputStream* str) const {
+  str->indent().print_cr("ClassDescriptor {");
+  {
+    streamIndentor si(str);
+    if (_type_parameters.length() > 0) {
+      str->indent().print_cr("Formals {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _type_parameters.length(); ++i) {
+          _type_parameters.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    if (_super != NULL) {
+      str->indent().print_cr("Superclass: ");
+      {
+        streamIndentor si(str);
+        _super->print_on(str);
+      }
+    }
+    if (_interfaces.length() > 0) {
+      str->indent().print_cr("SuperInterfaces: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _interfaces.length(); ++i) {
+          _interfaces.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    if (_outer_method != NULL) {
+      str->indent().print_cr("Outer Method: {");
+      {
+        streamIndentor si(str);
+        _outer_method->print_on(str);
+      }
+      str->indent().print_cr("}");
+    }
+    if (_outer_class != NULL) {
+      str->indent().print_cr("Outer Class: {");
+      {
+        streamIndentor si(str);
+        _outer_class->print_on(str);
+      }
+      str->indent().print_cr("}");
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+ClassType* ClassDescriptor::interface_desc(Symbol* sym) {
+  for (int i = 0; i < _interfaces.length(); ++i) {
+    if (_interfaces.at(i)->identifier()->equals(sym)) {
+      return _interfaces.at(i);
+    }
+  }
+  if (VerifyGenericSignatures) {
+    fatal("Did not find expected interface");
+  }
+  return NULL;
+}
+
+void ClassDescriptor::bind_variables_to_parameters() {
+  if (_outer_class != NULL) {
+    _outer_class->bind_variables_to_parameters();
+  }
+  if (_outer_method != NULL) {
+    _outer_method->bind_variables_to_parameters();
+  }
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
+  }
+  if (_super != NULL) {
+    _super->bind_variables_to_parameters(this);
+  }
+  for (int i = 0; i < _interfaces.length(); ++i) {
+    _interfaces.at(i)->bind_variables_to_parameters(this);
+  }
+}
+
+ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) {
+
+  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
+  }
+
+  ClassDescriptor* outer = _outer_class == NULL ? NULL :
+      _outer_class->canonicalize(ctx);
+
+  ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0);
+
+  GrowableArray<ClassType*> interfaces(_interfaces.length());
+  for (int i = 0; i < _interfaces.length(); ++i) {
+    interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0));
+  }
+
+  MethodDescriptor* md = _outer_method == NULL ? NULL :
+      _outer_method->canonicalize(ctx);
+
+  return new ClassDescriptor(type_params, super, interfaces, outer, md);
+}
+
+u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) {
+  int inner_index = InstanceKlass::inner_class_inner_class_info_offset;
+  int outer_index = InstanceKlass::inner_class_outer_class_info_offset;
+  int name_offset = InstanceKlass::inner_class_inner_name_offset;
+  int next_offset = InstanceKlass::inner_class_next_offset;
+
+  if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) {
+    // No inner class info => no declaring class
+    return 0;
+  }
+
+  Array<u2>* i_icls = klass->inner_classes();
+  ConstantPool* i_cp = klass->constants();
+  int i_length = i_icls->length();
+
+  // Find inner_klass attribute
+  for (int i = 0; i + next_offset < i_length; i += next_offset) {
+    u2 ioff = i_icls->at(i + inner_index);
+    u2 ooff = i_icls->at(i + outer_index);
+    u2 noff = i_icls->at(i + name_offset);
+    if (ioff != 0) {
+      // Check to see if the name matches the class we're looking for
+      // before attempting to find the class.
+      if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) {
+        return ooff;
+      }
+    }
+  }
+
+  // It may be anonymous; try for that.
+  u2 encl_method_class_idx = klass->enclosing_method_class_index();
+  if (encl_method_class_idx != 0) {
+    return encl_method_class_idx;
+  }
+
+  return 0;
+}
+
+MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) {
+  Symbol* generic_sig = m->generic_signature();
+  MethodDescriptor* md = NULL;
+  if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) {
+    md = parse_generic_signature(m->signature(), outer);
+  }
+  assert(md != NULL, "Could not parse method signature");
+  md->bind_variables_to_parameters();
+  return md;
+}
+
+MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) {
+
+  DescriptorStream ds(sym);
+  DescriptorStream* STREAM = &ds;
+
+  GrowableArray<TypeParameter*> params(8);
+  char c = READ();
+  if (c == '<') {
+    c = READ();
+    while (c != '>') {
+      PUSH(c);
+      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
+      params.append(ftp);
+      c = READ();
+    }
+  } else {
+    PUSH(c);
+  }
+
+  EXPECT('(');
+
+  GrowableArray<Type*> parameters(8);
+  c = READ();
+  while (c != ')') {
+    PUSH(c);
+    Type* arg = Type::parse_generic_signature(CHECK_STREAM);
+    parameters.append(arg);
+    c = READ();
+  }
+
+  Type* rt = Type::parse_generic_signature(CHECK_STREAM);
+
+  GrowableArray<Type*> throws;
+  while (!STREAM->at_end()) {
+    EXPECT('^');
+    Type* spec = Type::parse_generic_signature(CHECK_STREAM);
+    throws.append(spec);
+  }
+
+  return new MethodDescriptor(params, outer, parameters, rt, throws);
+}
+
+void MethodDescriptor::bind_variables_to_parameters() {
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
+  }
+  for (int i = 0; i < _parameters.length(); ++i) {
+    _parameters.at(i)->bind_variables_to_parameters(this);
+  }
+  _return_type->bind_variables_to_parameters(this);
+  for (int i = 0; i < _throws.length(); ++i) {
+    _throws.at(i)->bind_variables_to_parameters(this);
+  }
+}
+
+bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) {
+
+  if (_parameters.length() == other->_parameters.length()) {
+    for (int i = 0; i < _parameters.length(); ++i) {
+      if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) {
+        return false;
+      }
+    }
+
+    if (_return_type->as_primitive() != NULL) {
+      return _return_type->covariant_match(other->_return_type, ctx);
+    } else {
+      // return type is a reference
+      return other->_return_type->as_class() != NULL ||
+             other->_return_type->as_variable() != NULL ||
+             other->_return_type->as_array() != NULL;
+    }
+  } else {
+    return false;
+  }
+}
+
+MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) {
+
+  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
+  }
+
+  ClassDescriptor* outer = _outer_class == NULL ? NULL :
+      _outer_class->canonicalize(ctx);
+
+  GrowableArray<Type*> params(_parameters.length());
+  for (int i = 0; i < _parameters.length(); ++i) {
+    params.append(_parameters.at(i)->canonicalize(ctx, 0));
+  }
+
+  Type* rt = _return_type->canonicalize(ctx, 0);
+
+  GrowableArray<Type*> throws(_throws.length());
+  for (int i = 0; i < _throws.length(); ++i) {
+    throws.append(_throws.at(i)->canonicalize(ctx, 0));
+  }
+
+  return new MethodDescriptor(type_params, outer, params, rt, throws);
+}
+
+#ifndef PRODUCT
+TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) {
+  stringStream ss(256);
+
+  ss.print("(");
+  for (int i = 0; i < _parameters.length(); ++i) {
+    _parameters.at(i)->reify_signature(&ss, ctx);
+  }
+  ss.print(")");
+  _return_type->reify_signature(&ss, ctx);
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
+}
+
+void MethodDescriptor::print_on(outputStream* str) const {
+  str->indent().print_cr("MethodDescriptor {");
+  {
+    streamIndentor si(str);
+    if (_type_parameters.length() > 0) {
+      str->indent().print_cr("Formals: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _type_parameters.length(); ++i) {
+          _type_parameters.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    str->indent().print_cr("Parameters: {");
+    {
+      streamIndentor si(str);
+      for (int i = 0; i < _parameters.length(); ++i) {
+        _parameters.at(i)->print_on(str);
+      }
+    }
+    str->indent().print_cr("}");
+    str->indent().print_cr("Return Type: ");
+    {
+      streamIndentor si(str);
+      _return_type->print_on(str);
+    }
+
+    if (_throws.length() > 0) {
+      str->indent().print_cr("Throws: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _throws.length(); ++i) {
+          _throws.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) {
+  STREAM->set_mark();
+  char c = READ();
+  while (c != ':') {
+    c = READ();
+  }
+
+  Identifier* id = STREAM->identifier_from_mark();
+
+  ClassType* class_bound = NULL;
+  GrowableArray<ClassType*> interface_bounds(8);
+
+  c = READ();
+  if (c != '>') {
+    if (c != ':') {
+      EXPECTED(c, 'L');
+      class_bound = ClassType::parse_generic_signature(CHECK_STREAM);
+      c = READ();
+    }
+
+    while (c == ':') {
+      EXPECT('L');
+      ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM);
+      interface_bounds.append(fts);
+      c = READ();
+    }
+  }
+  PUSH(c);
+
+  return new TypeParameter(id, class_bound, interface_bounds);
+}
+
+void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) {
+  if (_class_bound != NULL) {
+    _class_bound->bind_variables_to_parameters(sig);
+  }
+  for (int i = 0; i < _interface_bounds.length(); ++i) {
+    _interface_bounds.at(i)->bind_variables_to_parameters(sig);
+  }
+  _position = position;
+}
+
+Type* TypeParameter::resolve(
+    Context* ctx, int inner_depth, int ctx_depth) {
+
+  if (inner_depth == -1) {
+    // This indicates that the parameter is a method type parameter, which
+    // isn't resolveable using the class hierarchy context
+    return bound();
+  }
+
+  ClassType* provider = ctx->at_depth(ctx_depth);
+  if (provider != NULL) {
+    for (int i = 0; i < inner_depth && provider != NULL; ++i) {
+      provider = provider->outer_class();
+    }
+    if (provider != NULL) {
+      TypeArgument* arg = provider->type_argument_at(_position);
+      if (arg != NULL) {
+        Type* value = arg->lower_bound();
+        return value->canonicalize(ctx, ctx_depth + 1);
+      }
+    }
+  }
+
+  return bound();
+}
+
+TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) {
+  ClassType* bound = _class_bound == NULL ? NULL :
+     _class_bound->canonicalize(ctx, ctx_depth);
+
+  GrowableArray<ClassType*> ifaces(_interface_bounds.length());
+  for (int i = 0; i < _interface_bounds.length(); ++i) {
+    ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth));
+  }
+
+  TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces);
+  ret->_position = _position;
+  return ret;
+}
+
+ClassType* TypeParameter::bound() {
+  if (_class_bound != NULL) {
+    return _class_bound;
+  }
+
+  if (_interface_bounds.length() == 1) {
+    return _interface_bounds.at(0);
+  }
+
+  return ClassType::java_lang_Object(); // TODO: investigate this case
+}
+
+#ifndef PRODUCT
+void TypeParameter::print_on(outputStream* str) const {
+  str->indent().print_cr("Formal: {");
+  {
+    streamIndentor si(str);
+
+    str->indent().print("Identifier: ");
+    _identifier->print_on(str);
+    str->print_cr("");
+    if (_class_bound != NULL) {
+      str->indent().print_cr("Class Bound: ");
+      streamIndentor si(str);
+      _class_bound->print_on(str);
+    }
+    if (_interface_bounds.length() > 0) {
+      str->indent().print_cr("Interface Bounds: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _interface_bounds.length(); ++i) {
+          _interface_bounds.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    str->indent().print_cr("Ordinal Position: %d", _position);
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+Type* Type::parse_generic_signature(DescriptorStream* STREAM) {
+  char c = READ();
+  switch (c) {
+    case 'L':
+      return ClassType::parse_generic_signature(CHECK_STREAM);
+    case 'T':
+      return TypeVariable::parse_generic_signature(CHECK_STREAM);
+    case '[':
+      return ArrayType::parse_generic_signature(CHECK_STREAM);
+    default:
+      return new PrimitiveType(c);
+  }
+}
+
+Identifier* ClassType::parse_generic_signature_simple(GrowableArray<TypeArgument*>* args,
+    bool* has_inner, DescriptorStream* STREAM) {
+  STREAM->set_mark();
+
+  char c = READ();
+  while (c != ';' && c != '.' && c != '<') { c = READ(); }
+  Identifier* id = STREAM->identifier_from_mark();
+
+  if (c == '<') {
+    c = READ();
+    while (c != '>') {
+      PUSH(c);
+      TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM);
+      args->append(arg);
+      c = READ();
+    }
+    c = READ();
+  }
+
+  *has_inner = (c == '.');
+  if (!(*has_inner)) {
+    EXPECTED(c, ';');
+  }
+
+  return id;
+}
+
+ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) {
+  return parse_generic_signature(NULL, CHECK_STREAM);
+}
+
+ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) {
+  GrowableArray<TypeArgument*> args;
+  ClassType* gct = NULL;
+  bool has_inner = false;
+
+  Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM);
+  if (id != NULL) {
+    gct = new ClassType(id, args, outer);
+
+    if (has_inner) {
+      gct = parse_generic_signature(gct, CHECK_STREAM);
+    }
+  }
+  return gct;
+}
+
+ClassType* ClassType::from_symbol(Symbol* sym) {
+  assert(sym != NULL, "Must not be null");
+  GrowableArray<TypeArgument*> args;
+  Identifier* id = new Identifier(sym, 0, sym->utf8_length());
+  return new ClassType(id, args, NULL);
+}
+
+ClassType* ClassType::java_lang_Object() {
+  return from_symbol(vmSymbols::java_lang_Object());
+}
+
+void ClassType::bind_variables_to_parameters(Descriptor* sig) {
+  for (int i = 0; i < _type_arguments.length(); ++i) {
+    _type_arguments.at(i)->bind_variables_to_parameters(sig);
+  }
+  if (_outer_class != NULL) {
+    _outer_class->bind_variables_to_parameters(sig);
+  }
+}
+
+TypeArgument* ClassType::type_argument_at(int i) {
+  if (i >= 0 && i < _type_arguments.length()) {
+    return _type_arguments.at(i);
+  } else {
+    return NULL;
+  }
+}
+
+#ifndef PRODUCT
+void ClassType::reify_signature(stringStream* ss, Context* ctx) {
+  ss->print("L");
+  _identifier->print_on(ss);
+  ss->print(";");
+}
+
+void ClassType::print_on(outputStream* str) const {
+  str->indent().print_cr("Class {");
+  {
+    streamIndentor si(str);
+    str->indent().print("Name: ");
+    _identifier->print_on(str);
+    str->print_cr("");
+    if (_type_arguments.length() != 0) {
+      str->indent().print_cr("Type Arguments: {");
+      {
+        streamIndentor si(str);
+        for (int j = 0; j < _type_arguments.length(); ++j) {
+          _type_arguments.at(j)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    if (_outer_class != NULL) {
+      str->indent().print_cr("Outer Class: ");
+      streamIndentor sir(str);
+      _outer_class->print_on(str);
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+bool ClassType::covariant_match(Type* other, Context* ctx) {
+
+  if (other == this) {
+    return true;
+  }
+
+  TypeVariable* variable = other->as_variable();
+  if (variable != NULL) {
+    other = variable->resolve(ctx, 0);
+  }
+
+  ClassType* outer = outer_class();
+  ClassType* other_class = other->as_class();
+
+  if (other_class == NULL ||
+      (outer == NULL) != (other_class->outer_class() == NULL)) {
+    return false;
+  }
+
+  if (!_identifier->equals(other_class->_identifier)) {
+    return false;
+  }
+
+  if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) {
+    return false;
+  }
+
+  return true;
+}
+
+ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) {
+
+  GrowableArray<TypeArgument*> args(_type_arguments.length());
+  for (int i = 0; i < _type_arguments.length(); ++i) {
+    args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth));
+  }
+
+  ClassType* outer = _outer_class == NULL ? NULL :
+      _outer_class->canonicalize(ctx, ctx_depth);
+
+  return new ClassType(_identifier, args, outer);
+}
+
+TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) {
+  STREAM->set_mark();
+  char c = READ();
+  while (c != ';') {
+    c = READ();
+  }
+  Identifier* id = STREAM->identifier_from_mark();
+
+  return new TypeVariable(id);
+}
+
+void TypeVariable::bind_variables_to_parameters(Descriptor* sig) {
+  _parameter = sig->find_type_parameter(_id, &_inner_depth);
+  if (VerifyGenericSignatures && _parameter == NULL) {
+    fatal("Could not find formal parameter");
+  }
+}
+
+Type* TypeVariable::resolve(Context* ctx, int ctx_depth) {
+  if (parameter() != NULL) {
+    return parameter()->resolve(ctx, inner_depth(), ctx_depth);
+  } else {
+    if (VerifyGenericSignatures) {
+      fatal("Type variable matches no parameter");
+    }
+    return NULL;
+  }
+}
+
+bool TypeVariable::covariant_match(Type* other, Context* ctx) {
+
+  if (other == this) {
+    return true;
+  }
+
+  Context my_context(NULL); // empty, results in erasure
+  Type* my_type = resolve(&my_context, 0);
+  if (my_type == NULL) {
+    return false;
+  }
+
+  return my_type->covariant_match(other, ctx);
+}
+
+Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) {
+  return resolve(ctx, ctx_depth);
+}
+
+#ifndef PRODUCT
+void TypeVariable::reify_signature(stringStream* ss, Context* ctx) {
+  Type* type = resolve(ctx, 0);
+  if (type != NULL) {
+    type->reify_signature(ss, ctx);
+  }
+}
+
+void TypeVariable::print_on(outputStream* str) const {
+  str->indent().print_cr("Type Variable {");
+  {
+    streamIndentor si(str);
+    str->indent().print("Name: ");
+    _id->print_on(str);
+    str->print_cr("");
+    str->indent().print_cr("Inner depth: %d", _inner_depth);
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) {
+  Type* base = Type::parse_generic_signature(CHECK_STREAM);
+  return new ArrayType(base);
+}
+
+void ArrayType::bind_variables_to_parameters(Descriptor* sig) {
+  assert(_base != NULL, "Invalid base");
+  _base->bind_variables_to_parameters(sig);
+}
+
+bool ArrayType::covariant_match(Type* other, Context* ctx) {
+  assert(_base != NULL, "Invalid base");
+
+  if (other == this) {
+    return true;
+  }
+
+  ArrayType* other_array = other->as_array();
+  return (other_array != NULL && _base->covariant_match(other_array->_base, ctx));
+}
+
+ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) {
+  assert(_base != NULL, "Invalid base");
+  return new ArrayType(_base->canonicalize(ctx, ctx_depth));
+}
+
+#ifndef PRODUCT
+void ArrayType::reify_signature(stringStream* ss, Context* ctx) {
+  assert(_base != NULL, "Invalid base");
+  ss->print("[");
+  _base->reify_signature(ss, ctx);
+}
+
+void ArrayType::print_on(outputStream* str) const {
+  str->indent().print_cr("Array {");
+  {
+    streamIndentor si(str);
+    _base->print_on(str);
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+bool PrimitiveType::covariant_match(Type* other, Context* ctx) {
+
+  PrimitiveType* other_prim = other->as_primitive();
+  return (other_prim != NULL && _type == other_prim->_type);
+}
+
+PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) {
+  return this;
+}
+
+#ifndef PRODUCT
+void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) {
+  ss->print("%c", _type);
+}
+
+void PrimitiveType::print_on(outputStream* str) const {
+  str->indent().print_cr("Primitive: '%c'", _type);
+}
+#endif // ndef PRODUCT
+
+void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) {
+}
+
+TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) {
+  char c = READ();
+  Type* type = NULL;
+
+  switch (c) {
+    case '*':
+      return new TypeArgument(ClassType::java_lang_Object(), NULL);
+      break;
+    default:
+      PUSH(c);
+      // fall-through
+    case '+':
+    case '-':
+      type = Type::parse_generic_signature(CHECK_STREAM);
+      if (c == '+') {
+        return new TypeArgument(type, NULL);
+      } else if (c == '-') {
+        return new TypeArgument(ClassType::java_lang_Object(), type);
+      } else {
+        return new TypeArgument(type, type);
+      }
+  }
+}
+
+void TypeArgument::bind_variables_to_parameters(Descriptor* sig) {
+  assert(_lower_bound != NULL, "Invalid lower bound");
+  _lower_bound->bind_variables_to_parameters(sig);
+  if (_upper_bound != NULL && _upper_bound != _lower_bound) {
+    _upper_bound->bind_variables_to_parameters(sig);
+  }
+}
+
+bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) {
+  assert(_lower_bound != NULL, "Invalid lower bound");
+
+  if (other == this) {
+    return true;
+  }
+
+  if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) {
+    return false;
+  }
+  return true;
+}
+
+TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) {
+  assert(_lower_bound != NULL, "Invalid lower bound");
+  Type* lower = _lower_bound->canonicalize(ctx, ctx_depth);
+  Type* upper = NULL;
+
+  if (_upper_bound == _lower_bound) {
+    upper = lower;
+  } else if (_upper_bound != NULL) {
+    upper = _upper_bound->canonicalize(ctx, ctx_depth);
+  }
+
+  return new TypeArgument(lower, upper);
+}
+
+#ifndef PRODUCT
+void TypeArgument::print_on(outputStream* str) const {
+  str->indent().print_cr("TypeArgument {");
+  {
+    streamIndentor si(str);
+    if (_lower_bound != NULL) {
+      str->indent().print("Lower bound: ");
+      _lower_bound->print_on(str);
+    }
+    if (_upper_bound != NULL) {
+      str->indent().print("Upper bound: ");
+      _upper_bound->print_on(str);
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+void Context::Mark::destroy() {
+  if (is_active()) {
+    _context->reset_to_mark(_marked_size);
+  }
+  deactivate();
+}
+
+void Context::apply_type_arguments(
+    InstanceKlass* current, InstanceKlass* super, TRAPS) {
+  assert(_cache != NULL, "Cannot use an empty context");
+  ClassType* spec = NULL;
+  if (current != NULL) {
+    ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK);
+    if (super == current->super()) {
+      spec = descriptor->super();
+    } else {
+      spec = descriptor->interface_desc(super->name());
+    }
+    if (spec != NULL) {
+      _type_arguments.push(spec);
+    }
+  }
+}
+
+void Context::reset_to_mark(int size) {
+  _type_arguments.trunc_to(size);
+}
+
+ClassType* Context::at_depth(int i) const {
+  if (i < _type_arguments.length()) {
+    return _type_arguments.at(_type_arguments.length() - 1 - i);
+  }
+  return NULL;
+}
+
+#ifndef PRODUCT
+void Context::print_on(outputStream* str) const {
+  str->indent().print_cr("Context {");
+  for (int i = 0; i < _type_arguments.length(); ++i) {
+    streamIndentor si(str);
+    str->indent().print("leval %d: ", i);
+    ClassType* ct = at_depth(i);
+    if (ct == NULL) {
+      str->print_cr("<empty>");
+      continue;
+    } else {
+      str->print_cr("{");
+    }
+
+    for (int j = 0; j < ct->type_arguments_length(); ++j) {
+      streamIndentor si(str);
+      TypeArgument* ta = ct->type_argument_at(j);
+      Type* bound = ta->lower_bound();
+      bound->print_on(str);
+    }
+    str->indent().print_cr("}");
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) {
+
+  ClassDescriptor** existing = _class_descriptors.get(ik);
+  if (existing == NULL) {
+    ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL);
+    _class_descriptors.put(ik, cd);
+    return cd;
+  } else {
+    return *existing;
+  }
+}
+
+MethodDescriptor* DescriptorCache::descriptor_for(
+    Method* mh, ClassDescriptor* cd, TRAPS) {
+  assert(mh != NULL && cd != NULL, "Should not be NULL");
+  MethodDescriptor** existing = _method_descriptors.get(mh);
+  if (existing == NULL) {
+    MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd);
+    _method_descriptors.put(mh, md);
+    return md;
+  } else {
+    return *existing;
+  }
+}
+MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) {
+  ClassDescriptor* cd = descriptor_for(
+      InstanceKlass::cast(mh->method_holder()), CHECK_NULL);
+  return descriptor_for(mh, cd, THREAD);
+}
+
+} // namespace generic
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/genericSignatures.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
+#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
+
+#include "classfile/symbolTable.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/signature.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/resourceHash.hpp"
+
+class stringStream;
+
+namespace generic {
+
+class Identifier;
+class ClassDescriptor;
+class MethodDescriptor;
+
+class TypeParameter; // a formal type parameter declared in generic signatures
+class TypeArgument;  // The "type value" passed to fill parameters in supertypes
+class TypeVariable;  // A usage of a type parameter as a value
+/**
+ * Example:
+ *
+ * <T, V> class Foo extends Bar<String> { int m(V v) {} }
+ * ^^^^^^                       ^^^^^^          ^^
+ * type parameters            type argument    type variable
+ *
+ * Note that a type variable could be passed as an argument too:
+ * <T, V> class Foo extends Bar<T> { int m(V v) {} }
+ *                             ^^^
+ *                             type argument's value is a type variable
+ */
+
+
+class Type;
+class ClassType;
+class ArrayType;
+class PrimitiveType;
+class Context;
+class DescriptorCache;
+
+class DescriptorStream;
+
+class Identifier : public ResourceObj {
+ private:
+  Symbol* _sym;
+  int _begin;
+  int _end;
+
+ public:
+  Identifier(Symbol* sym, int begin, int end) :
+    _sym(sym), _begin(begin), _end(end) {}
+
+  bool equals(Identifier* other);
+  bool equals(Symbol* sym);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif // ndef PRODUCT
+};
+
+class Descriptor : public ResourceObj {
+ protected:
+  GrowableArray<TypeParameter*> _type_parameters;
+  ClassDescriptor* _outer_class;
+
+  Descriptor(GrowableArray<TypeParameter*>& params,
+    ClassDescriptor* outer)
+    : _type_parameters(params), _outer_class(outer) {}
+
+ public:
+
+  ClassDescriptor* outer_class() { return _outer_class; }
+  void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
+
+  virtual ClassDescriptor* as_class_signature() { return NULL; }
+  virtual MethodDescriptor* as_method_signature() { return NULL; }
+
+  bool is_class_signature() { return as_class_signature() != NULL; }
+  bool is_method_signature() { return as_method_signature() != NULL; }
+
+  GrowableArray<TypeParameter*>& type_parameters() {
+    return _type_parameters;
+  }
+
+  TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
+
+  virtual void bind_variables_to_parameters() = 0;
+
+#ifndef PRODUCT
+  virtual void print_on(outputStream* str) const = 0;
+#endif
+};
+
+class ClassDescriptor : public Descriptor {
+ private:
+  ClassType* _super;
+  GrowableArray<ClassType*> _interfaces;
+  MethodDescriptor* _outer_method;
+
+  ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
+      GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
+      MethodDescriptor* outer_method = NULL)
+        : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
+          _outer_method(outer_method) {}
+
+  static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
+  static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
+
+ public:
+
+  virtual ClassDescriptor* as_class_signature() { return this; }
+
+  MethodDescriptor* outer_method() { return _outer_method; }
+  void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
+
+  ClassType* super() { return _super; }
+  ClassType* interface_desc(Symbol* sym);
+
+  static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
+  static ClassDescriptor* parse_generic_signature(Symbol* sym);
+
+  // For use in superclass chains in positions where this is no generic info
+  static ClassDescriptor* placeholder(InstanceKlass* klass);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+
+  ClassDescriptor* canonicalize(Context* ctx);
+
+  // Linking sets the position index in any contained TypeVariable type
+  // to correspond to the location of that identifier in the formal type
+  // parameters.
+  void bind_variables_to_parameters();
+};
+
+class MethodDescriptor : public Descriptor {
+ private:
+  GrowableArray<Type*> _parameters;
+  Type* _return_type;
+  GrowableArray<Type*> _throws;
+
+  MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
+      GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
+      : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
+        _throws(throws) {}
+
+ public:
+
+  static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
+  static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
+
+  MethodDescriptor* as_method_signature() { return this; }
+
+  // Performs generic analysis on the method parameters to determine
+  // if both methods refer to the same argument types.
+  bool covariant_match(MethodDescriptor* other, Context* ctx);
+
+  // Returns a new method descriptor with all generic variables
+  // removed and replaced with whatever is indicated using the Context.
+  MethodDescriptor* canonicalize(Context* ctx);
+
+  void bind_variables_to_parameters();
+
+#ifndef PRODUCT
+  TempNewSymbol reify_signature(Context* ctx, TRAPS);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class TypeParameter : public ResourceObj {
+ private:
+  Identifier* _identifier;
+  ClassType* _class_bound;
+  GrowableArray<ClassType*> _interface_bounds;
+
+  // The position is the ordinal location of the parameter within the
+  // formal parameter list (excluding outer classes).  It is only set for
+  // formal type parameters that are associated with a class -- method
+  // type parameters are left as -1.  When resolving a generic variable to
+  // find the actual type, this index is used to access the generic type
+  // argument in the provided context object.
+  int _position; // Assigned during variable linking
+
+  TypeParameter(Identifier* id, ClassType* class_bound,
+    GrowableArray<ClassType*>& interface_bounds) :
+      _identifier(id), _class_bound(class_bound),
+      _interface_bounds(interface_bounds), _position(-1) {}
+
+ public:
+  static TypeParameter* parse_generic_signature(DescriptorStream* str);
+
+  ClassType* bound();
+  int position() { return _position; }
+
+  void bind_variables_to_parameters(Descriptor* sig, int position);
+  Identifier* identifier() { return _identifier; }
+
+  Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
+  TypeParameter* canonicalize(Context* ctx, int ctx_depth);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class Type : public ResourceObj {
+ public:
+  static Type* parse_generic_signature(DescriptorStream* str);
+
+  virtual ClassType* as_class() { return NULL; }
+  virtual TypeVariable* as_variable() { return NULL; }
+  virtual ArrayType* as_array() { return NULL; }
+  virtual PrimitiveType* as_primitive() { return NULL; }
+
+  virtual bool covariant_match(Type* gt, Context* ctx) = 0;
+  virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
+
+  virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
+
+#ifndef PRODUCT
+  virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
+  virtual void print_on(outputStream* str) const = 0;
+#endif
+};
+
+class ClassType : public Type {
+  friend class ClassDescriptor;
+ protected:
+  Identifier* _identifier;
+  GrowableArray<TypeArgument*> _type_arguments;
+  ClassType* _outer_class;
+
+  ClassType(Identifier* identifier,
+      GrowableArray<TypeArgument*>& args,
+      ClassType* outer)
+      : _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
+
+  // Returns true if there are inner classes to read
+  static Identifier* parse_generic_signature_simple(
+      GrowableArray<TypeArgument*>* args,
+      bool* has_inner, DescriptorStream* str);
+
+  static ClassType* parse_generic_signature(ClassType* outer,
+      DescriptorStream* str);
+  static ClassType* from_symbol(Symbol* sym);
+
+ public:
+  ClassType* as_class() { return this; }
+
+  static ClassType* parse_generic_signature(DescriptorStream* str);
+  static ClassType* java_lang_Object();
+
+  Identifier* identifier() { return _identifier; }
+  int type_arguments_length() { return _type_arguments.length(); }
+  TypeArgument* type_argument_at(int i);
+
+  virtual ClassType* outer_class() { return _outer_class; }
+
+  bool covariant_match(Type* gt, Context* ctx);
+  ClassType* canonicalize(Context* ctx, int context_depth);
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class TypeVariable : public Type {
+ private:
+  Identifier* _id;
+  TypeParameter* _parameter; // assigned during linking
+
+  // how many steps "out" from inner classes, -1 if method
+  int _inner_depth;
+
+  TypeVariable(Identifier* id)
+      : _id(id), _parameter(NULL), _inner_depth(0) {}
+
+ public:
+  TypeVariable* as_variable() { return this; }
+
+  static TypeVariable* parse_generic_signature(DescriptorStream* str);
+
+  Identifier* identifier() { return _id; }
+  TypeParameter* parameter() { return _parameter; }
+  int inner_depth() { return _inner_depth; }
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+  Type* resolve(Context* ctx, int ctx_depth);
+  bool covariant_match(Type* gt, Context* ctx);
+  Type* canonicalize(Context* ctx, int ctx_depth);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class ArrayType : public Type {
+ private:
+  Type* _base;
+
+  ArrayType(Type* base) : _base(base) {}
+
+ public:
+  ArrayType* as_array() { return this; }
+
+  static ArrayType* parse_generic_signature(DescriptorStream* str);
+
+  bool covariant_match(Type* gt, Context* ctx);
+  ArrayType* canonicalize(Context* ctx, int ctx_depth);
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class PrimitiveType : public Type {
+  friend class Type;
+ private:
+  char _type; // includes V for void
+
+  PrimitiveType(char& type) : _type(type) {}
+
+ public:
+  PrimitiveType* as_primitive() { return this; }
+
+  bool covariant_match(Type* gt, Context* ctx);
+  PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class TypeArgument : public ResourceObj {
+ private:
+  Type* _lower_bound;
+  Type* _upper_bound; // may be null or == _lower_bound
+
+  TypeArgument(Type* lower_bound, Type* upper_bound)
+      : _lower_bound(lower_bound), _upper_bound(upper_bound) {}
+
+ public:
+
+  static TypeArgument* parse_generic_signature(DescriptorStream* str);
+
+  Type* lower_bound() { return _lower_bound; }
+  Type* upper_bound() { return _upper_bound; }
+
+  void bind_variables_to_parameters(Descriptor* sig);
+  TypeArgument* canonicalize(Context* ctx, int ctx_depth);
+
+  bool covariant_match(TypeArgument* a, Context* ctx);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+};
+
+
+class Context : public ResourceObj {
+ private:
+  DescriptorCache* _cache;
+  GrowableArray<ClassType*> _type_arguments;
+
+  void reset_to_mark(int size);
+
+ public:
+  // When this object goes out of scope or 'destroy' is
+  // called, then the application of the type to the
+  // context is wound-back (unless it's been deactivated).
+  class Mark : public StackObj {
+   private:
+    mutable Context* _context;
+    int _marked_size;
+
+    bool is_active() const { return _context != NULL; }
+    void deactivate() const { _context = NULL; }
+
+   public:
+    Mark() : _context(NULL), _marked_size(0) {}
+    Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
+    Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
+      m.deactivate(); // Ownership is transferred
+    }
+
+    Mark& operator=(const Mark& cm) {
+      destroy();
+      _context = cm._context;
+      _marked_size = cm._marked_size;
+      cm.deactivate();
+      return *this;
+    }
+
+    void destroy();
+    ~Mark() { destroy(); }
+  };
+
+  Context(DescriptorCache* cache) : _cache(cache) {}
+
+  Mark mark() { return Mark(this, _type_arguments.length()); }
+  void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
+
+  ClassType* at_depth(int i) const;
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+};
+
+/**
+ * Contains a cache of descriptors for classes and methods so they can be
+ * looked-up instead of reparsing each time they are needed.
+ */
+class DescriptorCache : public ResourceObj {
+ private:
+  ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
+  ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
+
+ public:
+  ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
+
+  MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
+  // Class descriptor derived from method holder
+  MethodDescriptor* descriptor_for(Method* mh, TRAPS);
+};
+
+} // namespace generic
+
+#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
+
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -1156,7 +1156,7 @@
 // Print stack trace element to resource allocated buffer
 char* java_lang_Throwable::print_stack_element_to_buffer(Method* method, int bci) {
   // Get strings and string lengths
-  InstanceKlass* klass = InstanceKlass::cast(method->method_holder());
+  InstanceKlass* klass = method->method_holder();
   const char* klass_name  = klass->external_name();
   int buf_len = (int)strlen(klass_name);
   char* source_file_name;
@@ -1747,14 +1747,14 @@
   Handle element = ik->allocate_instance_handle(CHECK_0);
   // Fill in class name
   ResourceMark rm(THREAD);
-  const char* str = InstanceKlass::cast(method->method_holder())->external_name();
+  const char* str = method->method_holder()->external_name();
   oop classname = StringTable::intern((char*) str, CHECK_0);
   java_lang_StackTraceElement::set_declaringClass(element(), classname);
   // Fill in method name
   oop methodname = StringTable::intern(method->name(), CHECK_0);
   java_lang_StackTraceElement::set_methodName(element(), methodname);
   // Fill in source file name
-  Symbol* source = InstanceKlass::cast(method->method_holder())->source_file_name();
+  Symbol* source = method->method_holder()->source_file_name();
   if (ShowHiddenFrames && source == NULL)
     source = vmSymbols::unknown_class_name();
   oop filename = StringTable::intern(source, CHECK_0);
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -137,6 +137,7 @@
   /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */                              \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */                                                   \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
+  do_klass(lambda_MagicLambdaImpl_klass,                java_lang_invoke_MagicLambdaImpl, Opt ) \
   do_klass(reflect_MagicAccessorImpl_klass,             sun_reflect_MagicAccessorImpl,             Opt                 ) \
   do_klass(reflect_MethodAccessorImpl_klass,            sun_reflect_MethodAccessorImpl,            Opt_Only_JDK14NewRef) \
   do_klass(reflect_ConstructorAccessorImpl_klass,       sun_reflect_ConstructorAccessorImpl,       Opt_Only_JDK14NewRef) \
--- a/src/share/vm/classfile/verifier.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/verifier.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -446,7 +446,7 @@
           bytecode_name = "<illegal>";
       }
     }
-    InstanceKlass* ik = InstanceKlass::cast(method->method_holder());
+    InstanceKlass* ik = method->method_holder();
     ss->indent().print_cr("Location:");
     streamIndentor si2(ss);
     ss->indent().print_cr("%s.%s%s @%d: %s",
@@ -555,9 +555,10 @@
     if (was_recursively_verified())  return;
 
     Method* m = methods->at(index);
-    if (m->is_native() || m->is_abstract()) {
+    if (m->is_native() || m->is_abstract() || m->is_overpass()) {
       // If m is native or abstract, skip it.  It is checked in class file
-      // parser that methods do not override a final method.
+      // parser that methods do not override a final method.  Overpass methods
+      // are trusted since the VM generates them.
       continue;
     }
     verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
@@ -1849,7 +1850,7 @@
   if ((index <= 0) || (index >= nconstants)) {
     verify_error(ErrorContext::bad_cp_index(bci, index),
         "Illegal constant pool index %d in class %s",
-        index, InstanceKlass::cast(cp->pool_holder())->external_name());
+        index, cp->pool_holder()->external_name());
     return;
   }
 }
@@ -1868,7 +1869,7 @@
   if ((types & (1 << tag)) == 0) {
     verify_error(ErrorContext::bad_cp_index(bci, index),
       "Illegal type at constant pool entry %d in class %s",
-      index, InstanceKlass::cast(cp->pool_holder())->external_name());
+      index, cp->pool_holder()->external_name());
     return;
   }
 }
@@ -1880,7 +1881,7 @@
   if (!tag.is_klass() && !tag.is_unresolved_klass()) {
     verify_error(ErrorContext::bad_cp_index(bci, index),
         "Illegal type at constant pool entry %d in class %s",
-        index, InstanceKlass::cast(cp->pool_holder())->external_name());
+        index, cp->pool_holder()->external_name());
     return;
   }
 }
@@ -2304,11 +2305,21 @@
   // Make sure the constant pool item is the right type
   u2 index = bcs->get_index_u2();
   Bytecodes::Code opcode = bcs->raw_code();
-  unsigned int types = (opcode == Bytecodes::_invokeinterface
-                                ? 1 << JVM_CONSTANT_InterfaceMethodref
-                      : opcode == Bytecodes::_invokedynamic
-                                ? 1 << JVM_CONSTANT_InvokeDynamic
-                                : 1 << JVM_CONSTANT_Methodref);
+  unsigned int types;
+  switch (opcode) {
+    case Bytecodes::_invokeinterface:
+      types = 1 << JVM_CONSTANT_InterfaceMethodref;
+      break;
+    case Bytecodes::_invokedynamic:
+      types = 1 << JVM_CONSTANT_InvokeDynamic;
+      break;
+    case Bytecodes::_invokespecial:
+      types = (1 << JVM_CONSTANT_InterfaceMethodref) |
+              (1 << JVM_CONSTANT_Methodref);
+      break;
+    default:
+      types = 1 << JVM_CONSTANT_Methodref;
+  }
   verify_cp_type(bcs->bci(), index, cp, types, CHECK_VERIFY(this));
 
   // Get method name and signature
--- a/src/share/vm/classfile/vmSymbols.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/vmSymbols.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -507,7 +507,7 @@
 }
 
 void vmIntrinsics::verify_method(ID actual_id, Method* m) {
-  Symbol* mk = Klass::cast(m->method_holder())->name();
+  Symbol* mk = m->method_holder()->name();
   ID declared_id = match_method_with_klass(m, mk);
 
   if (declared_id == actual_id)  return; // success
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -110,10 +110,12 @@
   template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")              \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(sun_misc_PostVMInitHook,                   "sun/misc/PostVMInitHook")                  \
+  template(sun_misc_Launcher_ExtClassLoader,          "sun/misc/Launcher$ExtClassLoader")         \
                                                                                                   \
   /* Java runtime version access */                                                               \
   template(sun_misc_Version,                          "sun/misc/Version")                         \
   template(java_runtime_name_name,                    "java_runtime_name")                        \
+  template(java_runtime_version_name,                 "java_runtime_version")                     \
                                                                                                   \
   /* class file format tags */                                                                    \
   template(tag_source_file,                           "SourceFile")                               \
@@ -257,6 +259,7 @@
   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
+  template(java_lang_invoke_MagicLambdaImpl,          "java/lang/invoke/MagicLambdaImpl")         \
   /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */         \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature,       "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
@@ -723,6 +726,21 @@
   /* java/lang/ref/Reference */                                                                                         \
   do_intrinsic(_Reference_get,            java_lang_ref_Reference, get_name,    void_object_signature, F_R)             \
                                                                                                                         \
+  /* support for com.sum.crypto.provider.AESCrypt and some of its callers */                                            \
+  do_class(com_sun_crypto_provider_aescrypt,      "com/sun/crypto/provider/AESCrypt")                                   \
+  do_intrinsic(_aescrypt_encryptBlock, com_sun_crypto_provider_aescrypt, encryptBlock_name, byteArray_int_byteArray_int_signature, F_R)   \
+  do_intrinsic(_aescrypt_decryptBlock, com_sun_crypto_provider_aescrypt, decryptBlock_name, byteArray_int_byteArray_int_signature, F_R)   \
+   do_name(     encryptBlock_name,                                 "encryptBlock")                                      \
+   do_name(     decryptBlock_name,                                 "decryptBlock")                                      \
+   do_signature(byteArray_int_byteArray_int_signature,             "([BI[BI)V")                                         \
+                                                                                                                        \
+  do_class(com_sun_crypto_provider_cipherBlockChaining,            "com/sun/crypto/provider/CipherBlockChaining")       \
+   do_intrinsic(_cipherBlockChaining_encryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, encrypt_name, byteArray_int_int_byteArray_int_signature, F_R)   \
+   do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R)   \
+   do_name(     encrypt_name,                                      "encrypt")                                           \
+   do_name(     decrypt_name,                                      "decrypt")                                           \
+   do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)V")                                        \
+                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
--- a/src/share/vm/code/compiledIC.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/code/compiledIC.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -191,8 +191,8 @@
     int index = klassItable::compute_itable_index(call_info->resolved_method()());
     entry = VtableStubs::create_stub(false, index, method());
     assert(entry != NULL, "entry not computed");
-    Klass* k = call_info->resolved_method()->method_holder();
-    assert(Klass::cast(k)->is_interface(), "sanity check");
+    InstanceKlass* k = call_info->resolved_method()->method_holder();
+    assert(k->is_interface(), "sanity check");
     InlineCacheBuffer::create_transition_stub(this, k, entry);
   } else {
     // Can be different than method->vtable_index(), due to package-private etc.
--- a/src/share/vm/code/dependencies.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/code/dependencies.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -829,7 +829,7 @@
       }
       if (   !Dependencies::is_concrete_method(lm)
           && !Dependencies::is_concrete_method(m)
-          && Klass::cast(lm->method_holder())->is_subtype_of(m->method_holder()))
+          && lm->method_holder()->is_subtype_of(m->method_holder()))
         // Method m is overridden by lm, but both are non-concrete.
         return true;
     }
@@ -1160,7 +1160,11 @@
 
   // We could also return false if m does not yet appear to be
   // executed, if the VM version supports this distinction also.
-  return !m->is_abstract();
+  return !m->is_abstract() &&
+         !InstanceKlass::cast(m->method_holder())->is_interface();
+         // TODO: investigate whether default methods should be
+         // considered as "concrete" in this situation.  For now they
+         // are not.
 }
 
 
--- a/src/share/vm/code/nmethod.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/code/nmethod.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -1263,7 +1263,7 @@
   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
   // Remove from list of active nmethods
   if (method() != NULL)
-    InstanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
+    method()->method_holder()->remove_osr_nmethod(this);
   // Set entry as invalid
   _entry_bci = InvalidOSREntryBci;
 }
--- a/src/share/vm/compiler/compileBroker.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -1051,7 +1051,7 @@
   guarantee(!method->is_abstract(), "cannot compile abstract methods");
   assert(method->method_holder()->oop_is_instance(),
          "sanity check");
-  assert(!InstanceKlass::cast(method->method_holder())->is_not_initialized(),
+  assert(!method->method_holder()->is_not_initialized(),
          "method holder must be initialized");
   assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys");
 
@@ -1206,7 +1206,7 @@
   assert(method->method_holder()->oop_is_instance(), "not an instance method");
   assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
   assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
-  assert(!InstanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized");
+  assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized");
 
   if (!TieredCompilation) {
     comp_level = CompLevel_highest_tier;
--- a/src/share/vm/compiler/compilerOracle.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/compiler/compilerOracle.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -67,7 +67,7 @@
 
   // utility method
   MethodMatcher* find(methodHandle method) {
-    Symbol* class_name  = Klass::cast(method->method_holder())->name();
+    Symbol* class_name  = method->method_holder()->name();
     Symbol* method_name = method->name();
     for (MethodMatcher* current = this; current != NULL; current = current->_next) {
       if (match(class_name, current->class_name(), current->_class_mode) &&
@@ -574,7 +574,7 @@
   char token[1024];
   int  pos = 0;
   int  c = getc(stream);
-  while(c != EOF) {
+  while(c != EOF && pos < (int)(sizeof(token)-1)) {
     if (c == '\n') {
       token[pos++] = '\0';
       parse_from_line(token);
@@ -595,7 +595,7 @@
   int  pos = 0;
   const char* sp = str;
   int  c = *sp++;
-  while (c != '\0') {
+  while (c != '\0' && pos < (int)(sizeof(token)-1)) {
     if (c == '\n') {
       token[pos++] = '\0';
       parse_line(token);
@@ -624,7 +624,7 @@
   assert(has_command_file(), "command file must be specified");
   fileStream stream(fopen(cc_file(), "at"));
   stream.print("exclude ");
-  Klass::cast(method->method_holder())->name()->print_symbol_on(&stream);
+  method->method_holder()->name()->print_symbol_on(&stream);
   stream.print(".");
   method->name()->print_symbol_on(&stream);
   method->signature()->print_symbol_on(&stream);
--- a/src/share/vm/compiler/disassembler.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/compiler/disassembler.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -55,16 +55,18 @@
 bool        Disassembler::_tried_to_load_library = false;
 
 // This routine is in the shared library:
+Disassembler::decode_func_virtual Disassembler::_decode_instructions_virtual = NULL;
 Disassembler::decode_func Disassembler::_decode_instructions = NULL;
 
 static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH;
-static const char decode_instructions_name[] = "decode_instructions_virtual";
-
+static const char decode_instructions_virtual_name[] = "decode_instructions_virtual";
+static const char decode_instructions_name[] = "decode_instructions";
+static bool use_new_version = true;
 #define COMMENT_COLUMN  40 LP64_ONLY(+8) /*could be an option*/
 #define BYTES_COMMENT   ";..."  /* funky byte display comment */
 
 bool Disassembler::load_library() {
-  if (_decode_instructions != NULL) {
+  if (_decode_instructions_virtual != NULL || _decode_instructions != NULL) {
     // Already succeeded.
     return true;
   }
@@ -123,11 +125,19 @@
     _library = os::dll_load(buf, ebuf, sizeof ebuf);
   }
   if (_library != NULL) {
+    _decode_instructions_virtual = CAST_TO_FN_PTR(Disassembler::decode_func_virtual,
+                                          os::dll_lookup(_library, decode_instructions_virtual_name));
+  }
+  if (_decode_instructions_virtual == NULL) {
+    // could not spot in new version, try old version
     _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func,
                                           os::dll_lookup(_library, decode_instructions_name));
+    use_new_version = false;
+  } else {
+    use_new_version = true;
   }
   _tried_to_load_library = true;
-  if (_decode_instructions == NULL) {
+  if (_decode_instructions_virtual == NULL && _decode_instructions == NULL) {
     tty->print_cr("Could not load %s; %s; %s", buf,
                   ((_library != NULL)
                    ? "entry point is missing"
@@ -450,17 +460,31 @@
     // This is mainly for debugging the library itself.
     FILE* out = stdout;
     FILE* xmlout = (_print_raw > 1 ? out : NULL);
-    return (address)
-      (*Disassembler::_decode_instructions)((uintptr_t)start, (uintptr_t)end,
-                                            start, end - start,
+    return use_new_version ?
+      (address)
+      (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
+                                                    start, end - start,
+                                                    NULL, (void*) xmlout,
+                                                    NULL, (void*) out,
+                                                    options(), 0/*nice new line*/)
+      :
+      (address)
+      (*Disassembler::_decode_instructions)(start, end,
                                             NULL, (void*) xmlout,
                                             NULL, (void*) out,
                                             options());
   }
 
-  return (address)
-    (*Disassembler::_decode_instructions)((uintptr_t)start, (uintptr_t)end,
-                                          start, end - start,
+  return use_new_version ?
+    (address)
+    (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
+                                                  start, end - start,
+                                                  &event_to_env,  (void*) this,
+                                                  &printf_to_env, (void*) this,
+                                                  options(), 0/*nice new line*/)
+    :
+    (address)
+    (*Disassembler::_decode_instructions)(start, end,
                                           &event_to_env,  (void*) this,
                                           &printf_to_env, (void*) this,
                                           options());
--- a/src/share/vm/compiler/disassembler.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/compiler/disassembler.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -49,18 +49,27 @@
   friend class decode_env;
  private:
   // this is the type of the dll entry point:
-  typedef void* (*decode_func)(uintptr_t start_va, uintptr_t end_va,
+  typedef void* (*decode_func_virtual)(uintptr_t start_va, uintptr_t end_va,
                                unsigned char* buffer, uintptr_t length,
                                void* (*event_callback)(void*, const char*, void*),
                                void* event_stream,
                                int (*printf_callback)(void*, const char*, ...),
                                void* printf_stream,
+                               const char* options,
+                               int newline);
+  // this is the type of the dll entry point for old version:
+  typedef void* (*decode_func)(void* start_va, void* end_va,
+                               void* (*event_callback)(void*, const char*, void*),
+                               void* event_stream,
+                               int (*printf_callback)(void*, const char*, ...),
+                               void* printf_stream,
                                const char* options);
   // points to the library.
   static void*    _library;
   // bailout
   static bool     _tried_to_load_library;
   // points to the decode function.
+  static decode_func_virtual _decode_instructions_virtual;
   static decode_func _decode_instructions;
   // tries to load library and return whether it succedded.
   static bool load_library();
@@ -85,7 +94,9 @@
 
  public:
   static bool can_decode() {
-    return (_decode_instructions != NULL) || load_library();
+    return (_decode_instructions_virtual != NULL) ||
+           (_decode_instructions != NULL) ||
+           load_library();
   }
   static void decode(CodeBlob *cb,               outputStream* st = NULL);
   static void decode(nmethod* nm,                outputStream* st = NULL);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
+#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
+#include "memory/freeBlockDictionary.hpp"
+#include "memory/sharedHeap.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/vmThread.hpp"
+
+template <>
+void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
+  if (c != NULL) {
+    st->print("%16s", c);
+  } else {
+    st->print(SIZE_FORMAT_W(16), size());
+  }
+  st->print("\t"
+           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
+           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
+           bfr_surp(),             surplus(),             desired(),             prev_sweep(),           before_sweep(),
+           count(),               coal_births(),          coal_deaths(),          split_births(),         split_deaths());
+}
+
+template <class Chunk>
+AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
+  init_statistics();
+}
+
+template <class Chunk>
+AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
+  init_statistics();
+#ifndef PRODUCT
+  _allocation_stats.set_returned_bytes(size() * HeapWordSize);
+#endif
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::initialize() {
+  FreeList<Chunk>::initialize();
+  set_hint(0);
+  init_statistics(true /* split_birth */);
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::reset(size_t hint) {
+  FreeList<Chunk>::reset();
+  set_hint(hint);
+}
+
+#ifndef PRODUCT
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
+  assert(protecting_lock() != NULL, "Don't call this directly");
+  assert(ParallelGCThreads > 0, "Don't call this directly");
+  Thread* thr = Thread::current();
+  if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
+    // assert that we are holding the freelist lock
+  } else if (thr->is_GC_task_thread()) {
+    assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
+  } else if (thr->is_Java_thread()) {
+    assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
+  } else {
+    ShouldNotReachHere();  // unaccounted thread type?
+  }
+}
+#endif
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
+  _allocation_stats.initialize(split_birth);
+}
+
+template <class Chunk>
+size_t AdaptiveFreeList<Chunk>::get_better_size() {
+
+  // A candidate chunk has been found.  If it is already under
+  // populated and there is a hinT, REturn the hint().  Else
+  // return the size of this chunk.
+  if (surplus() <= 0) {
+    if (hint() != 0) {
+      return hint();
+    } else {
+      return size();
+    }
+  } else {
+    // This list has a surplus so use it.
+    return size();
+  }
+}
+
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
+  assert_proper_lock_protection();
+  return_chunk_at_head(chunk, true);
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
+  FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
+#ifdef ASSERT
+  if (record_return) {
+    increment_returned_bytes_by(size()*HeapWordSize);
+  }
+#endif
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
+  return_chunk_at_tail(chunk, true);
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
+  FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
+#ifdef ASSERT
+  if (record_return) {
+    increment_returned_bytes_by(size()*HeapWordSize);
+  }
+#endif
+}
+
+#ifndef PRODUCT
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::verify_stats() const {
+  // The +1 of the LH comparand is to allow some "looseness" in
+  // checking: we usually call this interface when adding a block
+  // and we'll subsequently update the stats; we cannot update the
+  // stats beforehand because in the case of the large-block BT
+  // dictionary for example, this might be the first block and
+  // in that case there would be no place that we could record
+  // the stats (which are kept in the block itself).
+  assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+          + _allocation_stats.coal_births() + 1)   // Total Production Stock + 1
+         >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+             + (ssize_t)count()),                // Total Current Stock + depletion
+         err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
+                 " violates Conservation Principle: "
+                 "prev_sweep(" SIZE_FORMAT ")"
+                 " + split_births(" SIZE_FORMAT ")"
+                 " + coal_births(" SIZE_FORMAT ") + 1 >= "
+                 " split_deaths(" SIZE_FORMAT ")"
+                 " coal_deaths(" SIZE_FORMAT ")"
+                 " + count(" SSIZE_FORMAT ")",
+                 this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
+                 _allocation_stats.split_births(), _allocation_stats.split_deaths(),
+                 _allocation_stats.coal_deaths(), count()));
+}
+#endif
+
+// Needs to be after the definitions have been seen.
+template class AdaptiveFreeList<FreeChunk>;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
+#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
+
+#include "memory/freeList.hpp"
+#include "gc_implementation/shared/allocationStats.hpp"
+
+class CompactibleFreeListSpace;
+
+// A class for maintaining a free list of Chunk's.  The FreeList
+// maintains a the structure of the list (head, tail, etc.) plus
+// statistics for allocations from the list.  The links between items
+// are not part of FreeList.  The statistics are
+// used to make decisions about coalescing Chunk's when they
+// are swept during collection.
+//
+// See the corresponding .cpp file for a description of the specifics
+// for that implementation.
+
+class Mutex;
+
+template <class Chunk>
+class AdaptiveFreeList : public FreeList<Chunk> {
+  friend class CompactibleFreeListSpace;
+  friend class VMStructs;
+  // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
+
+  size_t        _hint;          // next larger size list with a positive surplus
+
+  AllocationStats _allocation_stats; // allocation-related statistics
+
+ public:
+
+  AdaptiveFreeList();
+  AdaptiveFreeList(Chunk* fc);
+
+  using FreeList<Chunk>::assert_proper_lock_protection;
+#ifdef ASSERT
+  using FreeList<Chunk>::protecting_lock;
+#endif
+  using FreeList<Chunk>::count;
+  using FreeList<Chunk>::size;
+  using FreeList<Chunk>::verify_chunk_in_free_list;
+  using FreeList<Chunk>::getFirstNChunksFromList;
+  using FreeList<Chunk>::print_on;
+  void return_chunk_at_head(Chunk* fc, bool record_return);
+  void return_chunk_at_head(Chunk* fc);
+  void return_chunk_at_tail(Chunk* fc, bool record_return);
+  void return_chunk_at_tail(Chunk* fc);
+  using FreeList<Chunk>::return_chunk_at_tail;
+  using FreeList<Chunk>::remove_chunk;
+  using FreeList<Chunk>::prepend;
+  using FreeList<Chunk>::print_labels_on;
+  using FreeList<Chunk>::get_chunk_at_head;
+
+  // Initialize.
+  void initialize();
+
+  // Reset the head, tail, hint, and count of a free list.
+  void reset(size_t hint);
+
+  void assert_proper_lock_protection_work() const PRODUCT_RETURN;
+
+  void print_on(outputStream* st, const char* c = NULL) const;
+
+  size_t hint() const {
+    return _hint;
+  }
+  void set_hint(size_t v) {
+    assert_proper_lock_protection();
+    assert(v == 0 || size() < v, "Bad hint");
+    _hint = v;
+  }
+
+  size_t get_better_size();
+
+  // Accessors for statistics
+  void init_statistics(bool split_birth = false);
+
+  AllocationStats* allocation_stats() {
+    assert_proper_lock_protection();
+    return &_allocation_stats;
+  }
+
+  ssize_t desired() const {
+    return _allocation_stats.desired();
+  }
+  void set_desired(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_desired(v);
+  }
+  void compute_desired(float inter_sweep_current,
+                       float inter_sweep_estimate,
+                       float intra_sweep_estimate) {
+    assert_proper_lock_protection();
+    _allocation_stats.compute_desired(count(),
+                                      inter_sweep_current,
+                                      inter_sweep_estimate,
+                                      intra_sweep_estimate);
+  }
+  ssize_t coal_desired() const {
+    return _allocation_stats.coal_desired();
+  }
+  void set_coal_desired(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coal_desired(v);
+  }
+
+  ssize_t surplus() const {
+    return _allocation_stats.surplus();
+  }
+  void set_surplus(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_surplus(v);
+  }
+  void increment_surplus() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_surplus();
+  }
+  void decrement_surplus() {
+    assert_proper_lock_protection();
+    _allocation_stats.decrement_surplus();
+  }
+
+  ssize_t bfr_surp() const {
+    return _allocation_stats.bfr_surp();
+  }
+  void set_bfr_surp(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_bfr_surp(v);
+  }
+  ssize_t prev_sweep() const {
+    return _allocation_stats.prev_sweep();
+  }
+  void set_prev_sweep(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_prev_sweep(v);
+  }
+  ssize_t before_sweep() const {
+    return _allocation_stats.before_sweep();
+  }
+  void set_before_sweep(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_before_sweep(v);
+  }
+
+  ssize_t coal_births() const {
+    return _allocation_stats.coal_births();
+  }
+  void set_coal_births(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coal_births(v);
+  }
+  void increment_coal_births() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_coal_births();
+  }
+
+  ssize_t coal_deaths() const {
+    return _allocation_stats.coal_deaths();
+  }
+  void set_coal_deaths(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coal_deaths(v);
+  }
+  void increment_coal_deaths() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_coal_deaths();
+  }
+
+  ssize_t split_births() const {
+    return _allocation_stats.split_births();
+  }
+  void set_split_births(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_split_births(v);
+  }
+  void increment_split_births() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_split_births();
+  }
+
+  ssize_t split_deaths() const {
+    return _allocation_stats.split_deaths();
+  }
+  void set_split_deaths(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_split_deaths(v);
+  }
+  void increment_split_deaths() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_split_deaths();
+  }
+
+#ifndef PRODUCT
+  // For debugging.  The "_returned_bytes" in all the lists are summed
+  // and compared with the total number of bytes swept during a
+  // collection.
+  size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
+  void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
+  void increment_returned_bytes_by(size_t v) {
+    _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
+  }
+  // Stats verification
+  void verify_stats() const;
+#endif  // NOT PRODUCT
+};
+
+#endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -91,7 +91,7 @@
   _collector(NULL)
 {
   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
-    "FreeChunk is larger than expected");
+         "FreeChunk is larger than expected");
   _bt.set_space(this);
   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
   // We have all of "mr", all of which we place in the dictionary
@@ -101,14 +101,14 @@
   // implementation, namely, the simple binary tree (splaying
   // temporarily disabled).
   switch (dictionaryChoice) {
+    case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
+      _dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
+      break;
     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
     default:
       warning("dictionaryChoice: selected option not understood; using"
               " default BinaryTreeDictionary implementation instead.");
-    case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
-      _dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
-      break;
   }
   assert(_dictionary != NULL, "CMS dictionary initialization");
   // The indexed free lists are initially all empty and are lazily
@@ -453,7 +453,7 @@
   reportIndexedFreeListStatistics();
   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   gclog_or_tty->print_cr("---------------------------");
-  FreeList<FreeChunk>::print_labels_on(st, "size");
+  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     _indexedFreeList[i].print_on(gclog_or_tty);
     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
@@ -1319,7 +1319,7 @@
   size_t currSize = numWords + MinChunkSize;
   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
-    FreeList<FreeChunk>* fl = &_indexedFreeList[i];
+    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
     if (fl->head()) {
       ret = getFromListGreater(fl, numWords);
       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
@@ -1702,7 +1702,9 @@
   _dictionary->return_chunk(chunk);
 #ifndef PRODUCT
   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-    TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
+    TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
+    TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
+    tl->verify_stats();
   }
 #endif // PRODUCT
 }
@@ -1745,7 +1747,7 @@
   {
     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
     ec = dictionary()->find_largest_dict();  // get largest block
-    if (ec != NULL && ec->end() == chunk) {
+    if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
       // It's a coterminal block - we can coalesce.
       size_t old_size = ec->size();
       coalDeath(old_size);
@@ -1850,11 +1852,11 @@
      the excess is >= MIN_CHUNK. */
   size_t start = align_object_size(numWords + MinChunkSize);
   if (start < IndexSetSize) {
-    FreeList<FreeChunk>* it   = _indexedFreeList;
+    AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
     size_t    hint = _indexedFreeList[start].hint();
     while (hint < IndexSetSize) {
       assert(hint % MinObjAlignment == 0, "hint should be aligned");
-      FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
+      AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
       if (fl->surplus() > 0 && fl->head() != NULL) {
         // Found a list with surplus, reset original hint
         // and split out a free chunk which is returned.
@@ -1873,7 +1875,7 @@
 }
 
 /* Requires fl->size >= numWords + MinChunkSize */
-FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
+FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
   size_t numWords) {
   FreeChunk *curr = fl->head();
   size_t oldNumWords = curr->size();
@@ -2155,7 +2157,7 @@
   assert_locked();
   size_t i;
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    FreeList<FreeChunk>* fl = &_indexedFreeList[i];
+    AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
     if (PrintFLSStatistics > 1) {
       gclog_or_tty->print("size[%d] : ", i);
     }
@@ -2174,7 +2176,7 @@
   assert_locked();
   size_t i;
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    FreeList<FreeChunk> *fl = &_indexedFreeList[i];
+    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
     fl->set_surplus(fl->count() -
                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
   }
@@ -2185,7 +2187,7 @@
   size_t i;
   size_t h = IndexSetSize;
   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    FreeList<FreeChunk> *fl = &_indexedFreeList[i];
+    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
     fl->set_hint(h);
     if (fl->surplus() > 0) {
       h = i;
@@ -2197,7 +2199,7 @@
   assert_locked();
   size_t i;
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    FreeList<FreeChunk> *fl = &_indexedFreeList[i];
+    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
     fl->set_prev_sweep(fl->count());
     fl->set_coal_births(0);
     fl->set_coal_deaths(0);
@@ -2224,7 +2226,7 @@
 
 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
   if (size < SmallForDictionary) {
-    FreeList<FreeChunk> *fl = &_indexedFreeList[size];
+    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
     return (fl->coal_desired() < 0) ||
            ((int)fl->count() > fl->coal_desired());
   } else {
@@ -2234,14 +2236,14 @@
 
 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
   assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
+  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   fl->increment_coal_births();
   fl->increment_surplus();
 }
 
 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
   assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
+  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   fl->increment_coal_deaths();
   fl->decrement_surplus();
 }
@@ -2250,7 +2252,7 @@
   if (size  < SmallForDictionary) {
     smallCoalBirth(size);
   } else {
-    dictionary()->dict_census_udpate(size,
+    dictionary()->dict_census_update(size,
                                    false /* split */,
                                    true /* birth */);
   }
@@ -2260,7 +2262,7 @@
   if(size  < SmallForDictionary) {
     smallCoalDeath(size);
   } else {
-    dictionary()->dict_census_udpate(size,
+    dictionary()->dict_census_update(size,
                                    false /* split */,
                                    false /* birth */);
   }
@@ -2268,14 +2270,14 @@
 
 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
   assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
+  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   fl->increment_split_births();
   fl->increment_surplus();
 }
 
 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
   assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
+  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   fl->increment_split_deaths();
   fl->decrement_surplus();
 }
@@ -2284,7 +2286,7 @@
   if (size  < SmallForDictionary) {
     smallSplitBirth(size);
   } else {
-    dictionary()->dict_census_udpate(size,
+    dictionary()->dict_census_update(size,
                                    true /* split */,
                                    true /* birth */);
   }
@@ -2294,7 +2296,7 @@
   if (size  < SmallForDictionary) {
     smallSplitDeath(size);
   } else {
-    dictionary()->dict_census_udpate(size,
+    dictionary()->dict_census_update(size,
                                    true /* split */,
                                    false /* birth */);
   }
@@ -2517,10 +2519,10 @@
 
 #ifndef PRODUCT
 void CompactibleFreeListSpace::check_free_list_consistency() const {
-  assert(_dictionary->min_size() <= IndexSetSize,
+  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
     "Some sizes can't be allocated without recourse to"
     " linear allocation buffers");
-  assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
+  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
     "else MIN_TREE_CHUNK_SIZE is wrong");
   assert(IndexSetStart != 0, "IndexSetStart not initialized");
   assert(IndexSetStride != 0, "IndexSetStride not initialized");
@@ -2529,15 +2531,15 @@
 
 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
   assert_lock_strong(&_freelistLock);
-  FreeList<FreeChunk> total;
+  AdaptiveFreeList<FreeChunk> total;
   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
-  FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
+  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
   size_t total_free = 0;
   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
+    const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
     total_free += fl->count() * fl->size();
     if (i % (40*IndexSetStride) == 0) {
-      FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
+      AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
     }
     fl->print_on(gclog_or_tty);
     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
@@ -2620,7 +2622,7 @@
     res = _cfls->getChunkFromDictionaryExact(word_sz);
     if (res == NULL) return NULL;
   } else {
-    FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
+    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
     if (fl->count() == 0) {
       // Attempt to refill this local free list.
       get_from_global_pool(word_sz, fl);
@@ -2640,7 +2642,7 @@
 
 // Get a chunk of blocks of the right size and update related
 // book-keeping stats
-void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
+void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
   // Get the #blocks we want to claim
   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
   assert(n_blks > 0, "Error");
@@ -2722,7 +2724,7 @@
         if (num_retire > 0) {
           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
           // Reset this list.
-          _indexedFreeList[i] = FreeList<FreeChunk>();
+          _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
           _indexedFreeList[i].set_size(i);
         }
       }
@@ -2736,7 +2738,7 @@
   }
 }
 
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
   assert(fl->count() == 0, "Precondition.");
   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
          "Precondition");
@@ -2752,12 +2754,12 @@
          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
          (CMSSplitIndexedFreeListBlocks || k <= 1);
          k++, cur_sz = k * word_sz) {
-      FreeList<FreeChunk> fl_for_cur_sz;  // Empty.
+      AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
       fl_for_cur_sz.set_size(cur_sz);
       {
         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
                         Mutex::_no_safepoint_check_flag);
-        FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
+        AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
         if (gfl->count() != 0) {
           // nn is the number of chunks of size cur_sz that
           // we'd need to split k-ways each, in order to create
@@ -2832,12 +2834,11 @@
     MutexLockerEx x(parDictionaryAllocLock(),
                     Mutex::_no_safepoint_check_flag);
     while (n > 0) {
-      fc = dictionary()->get_chunk(MAX2(n * word_sz,
-                                  _dictionary->min_size()),
+      fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
                                   FreeBlockDictionary<FreeChunk>::atLeast);
       if (fc != NULL) {
         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-        dictionary()->dict_census_udpate(fc->size(),
+        dictionary()->dict_census_update(fc->size(),
                                        true /*split*/,
                                        false /*birth*/);
         break;
@@ -2890,7 +2891,7 @@
       fc->set_size(prefix_size);
       if (rem >= IndexSetSize) {
         returnChunkToDictionary(rem_fc);
-        dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
+        dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
         rem_fc = NULL;
       }
       // Otherwise, return it to the small list below.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
 
+#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
@@ -38,6 +39,7 @@
 class CompactibleFreeListSpace;
 class BlkClosure;
 class BlkClosureCareful;
+class FreeChunk;
 class UpwardsObjectClosure;
 class ObjectClosureCareful;
 class Klass;
@@ -131,7 +133,7 @@
   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
   FreeBlockDictionary<FreeChunk>* _dictionary;    // ptr to dictionary for large size blocks
 
-  FreeList<FreeChunk> _indexedFreeList[IndexSetSize];
+  AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
                                        // indexed array for small size blocks
   // allocation stategy
   bool       _fitStrategy;      // Use best fit strategy.
@@ -168,7 +170,7 @@
   // If the count of "fl" is negative, it's absolute value indicates a
   // number of free chunks that had been previously "borrowed" from global
   // list of size "word_sz", and must now be decremented.
-  void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
+  void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 
   // Allocation helper functions
   // Allocate using a strategy that takes from the indexed free lists
@@ -214,7 +216,7 @@
   // and return it.  The split off remainder is returned to
   // the free lists.  The old name for getFromListGreater
   // was lookInListGreater.
-  FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords);
+  FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
   // Get a chunk in the indexed free list or dictionary,
   // by considering a larger chunk and splitting it.
   FreeChunk* getChunkFromGreater(size_t numWords);
@@ -621,7 +623,7 @@
   CompactibleFreeListSpace* _cfls;
 
   // Our local free lists.
-  FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
+  AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
 
   // Initialized from a command-line arg.
 
@@ -634,7 +636,7 @@
   size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
 
   // Internal work method
-  void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl);
+  void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
 
 public:
   CFLS_LAB(CompactibleFreeListSpace* cfls);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -9143,7 +9143,7 @@
     size_t shrinkable_size_in_bytes = chunk_at_end->size();
     size_t aligned_shrinkable_size_in_bytes =
       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
-    assert(unallocated_start <= chunk_at_end->end(),
+    assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
       "Inconsistent chunk at end of space");
     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
@@ -9210,7 +9210,7 @@
 
     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
       "Inconsistency at end of space");
-    assert(chunk_at_end->end() == _cmsSpace->end(),
+    assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
       "Shrinking is inconsistent");
     return;
   }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -133,7 +133,7 @@
   }
 
   // Return the address past the end of this chunk
-  HeapWord* end() const { return ((HeapWord*) this) + size(); }
+  uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
 
   // debugging
   void verify()             const PRODUCT_RETURN;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
 
+typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
+
 #define VM_STRUCTS_CMS(nonstatic_field, \
                    volatile_nonstatic_field, \
                    static_field) \
@@ -38,14 +40,8 @@
   nonstatic_field(CMSCollector,                _markBitMap,                                   CMSBitMap)                             \
   nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace,                                   CompactibleFreeListSpace*)             \
      static_field(ConcurrentMarkSweepThread,   _collector,                                    CMSCollector*)                         \
-  volatile_nonstatic_field(FreeChunk,          _size,                                         size_t)                                \
-  nonstatic_field(FreeChunk,                   _next,                                         FreeChunk*)                            \
-  nonstatic_field(FreeChunk,                   _prev,                                         FreeChunk*)                            \
   nonstatic_field(LinearAllocBlock,            _word_size,                                    size_t)                                \
-  nonstatic_field(FreeList<FreeChunk>,         _size,                                         size_t)                                \
-  nonstatic_field(FreeList<FreeChunk>,         _count,                                        ssize_t)                               \
-  nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size,                                 size_t)                                \
-  nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   FreeBlockDictionary<FreeChunk>*)       \
+  nonstatic_field(AFLBinaryTreeDictionary,     _total_size,                                   size_t)                                \
   nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           FreeList<FreeChunk>)                   \
   nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)
 
@@ -60,19 +56,17 @@
   declare_toplevel_type(CMSCollector)                                     \
   declare_toplevel_type(CMSBitMap)                                        \
   declare_toplevel_type(FreeChunk)                                        \
+  declare_toplevel_type(Metablock)                                        \
   declare_toplevel_type(ConcurrentMarkSweepThread*)                       \
   declare_toplevel_type(ConcurrentMarkSweepGeneration*)                   \
   declare_toplevel_type(SurrogateLockerThread*)                           \
   declare_toplevel_type(CompactibleFreeListSpace*)                        \
   declare_toplevel_type(CMSCollector*)                                    \
-  declare_toplevel_type(FreeChunk*)                                       \
-  declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*)                 \
-  declare_toplevel_type(FreeBlockDictionary<FreeChunk>*)                  \
-  declare_toplevel_type(FreeList<FreeChunk>*)                             \
-  declare_toplevel_type(FreeList<FreeChunk>)                              \
+  declare_toplevel_type(AFLBinaryTreeDictionary*)                         \
   declare_toplevel_type(LinearAllocBlock)                                 \
   declare_toplevel_type(FreeBlockDictionary<FreeChunk>)                   \
-            declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>)
+  declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>)   \
+            declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
 
 #define VM_INT_CONSTANTS_CMS(declare_constant)                            \
   declare_constant(Generation::ConcurrentMarkSweep)                       \
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -191,7 +191,7 @@
 class VM_CollectForMetadataAllocation: public VM_GC_Operation {
  private:
   MetaWord*                _result;
-  size_t      _size;                       // size of object to be allocated
+  size_t                   _size;     // size of object to be allocated
   Metaspace::MetadataType  _mdtype;
   ClassLoaderData*         _loader_data;
  public:
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -320,6 +320,7 @@
   void bang_stack_shadow_pages(bool native_call);
 
   void generate_all();
+  void initialize_method_handle_entries();
 
  public:
   AbstractInterpreterGenerator(StubQueue* _code);
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -235,10 +235,6 @@
 #endif
 #endif
 
-// JavaStack Implementation
-#define MORE_STACK(count)  \
-    (topOfStack -= ((count) * Interpreter::stackElementWords))
-
 
 #define UPDATE_PC(opsize) {pc += opsize; }
 /*
@@ -575,7 +571,7 @@
 
 /* 0xE0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 /* 0xE4 */ &&opc_default,     &&opc_fast_aldc,      &&opc_fast_aldc_w,  &&opc_return_register_finalizer,
-/* 0xE8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+/* 0xE8 */ &&opc_invokehandle,&&opc_default,        &&opc_default,      &&opc_default,
 /* 0xEC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 
 /* 0xF0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
@@ -1773,7 +1769,7 @@
 
           oop obj;
           if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
-            Klass* k = (Klass*) cache->f1();
+            Klass* k = cache->f1_as_klass();
             obj = k->java_mirror();
             MORE_STACK(1);  // Assume single slot push
           } else {
@@ -1885,7 +1881,7 @@
             --count;
           }
           if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
-            Klass* k = (Klass*) cache->f1();
+            Klass* k = cache->f1_as_klass();
             obj = k->java_mirror();
           } else {
             --count;
@@ -2190,6 +2186,7 @@
       }
 
       CASE(_invokedynamic): {
+
         if (!EnableInvokeDynamic) {
           // We should not encounter this bytecode if !EnableInvokeDynamic.
           // The verifier will stop it.  However, if we get past the verifier,
@@ -2199,30 +2196,68 @@
           ShouldNotReachHere();
         }
 
-        int index = Bytes::get_native_u4(pc+1);
+        u4 index = Bytes::get_native_u4(pc+1);
+        ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
 
         // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
         // This kind of CP cache entry does not need to match the flags byte, because
         // there is a 1-1 relation between bytecode type and CP entry type.
-        ConstantPool* constants = METHOD->constants();
-        oop result = constants->resolved_references()->obj_at(index);
-        if (result == NULL) {
+        if (! cache->is_resolved((Bytecodes::Code) opcode)) {
           CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
                   handle_exception);
-          result = THREAD->vm_result();
+          cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
         }
 
-        VERIFY_OOP(result);
-        oop method_handle = java_lang_invoke_CallSite::target(result);
-        CHECK_NULL(method_handle);
-
-        istate->set_msg(call_method_handle);
-        istate->set_callee((Method*) method_handle);
+        Method* method = cache->f1_as_method();
+        VERIFY_OOP(method);
+
+        if (cache->has_appendix()) {
+          ConstantPool* constants = METHOD->constants();
+          SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
+          MORE_STACK(1);
+        }
+
+        istate->set_msg(call_method);
+        istate->set_callee(method);
+        istate->set_callee_entry_point(method->from_interpreted_entry());
         istate->set_bcp_advance(5);
 
         UPDATE_PC_AND_RETURN(0); // I'll be back...
       }
 
+      CASE(_invokehandle): {
+
+        if (!EnableInvokeDynamic) {
+          ShouldNotReachHere();
+        }
+
+        u2 index = Bytes::get_native_u2(pc+1);
+        ConstantPoolCacheEntry* cache = cp->entry_at(index);
+
+        if (! cache->is_resolved((Bytecodes::Code) opcode)) {
+          CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
+                  handle_exception);
+          cache = cp->entry_at(index);
+        }
+
+        Method* method = cache->f1_as_method();
+
+        VERIFY_OOP(method);
+
+        if (cache->has_appendix()) {
+          ConstantPool* constants = METHOD->constants();
+          SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
+          MORE_STACK(1);
+        }
+
+        istate->set_msg(call_method);
+        istate->set_callee(method);
+        istate->set_callee_entry_point(method->from_interpreted_entry());
+        istate->set_bcp_advance(3);
+
+        UPDATE_PC_AND_RETURN(0); // I'll be back...
+      }
+
       CASE(_invokeinterface): {
         u2 index = Bytes::get_native_u2(pc+1);
 
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp	Thu Nov 08 16:48:01 2012 -0800
@@ -50,6 +50,10 @@
 
 #ifdef CC_INTERP
 
+// JavaStack Implementation
+#define MORE_STACK(count)  \
+    (topOfStack -= ((count) * Interpreter::stackElementWords))
+
 // CVM definitions find hotspot equivalents...
 
 union VMJavaVal64 {
@@ -107,7 +111,6 @@
          rethrow_exception,         // unwinding and throwing exception
          // requests to frame manager from C++ interpreter
          call_method,               // request for new frame from interpreter, manager responds with method_entry
-         call_method_handle,        // like the above, except the callee is a method handle
          return_from_method,        // request from interpreter to unwind, manager responds with method_continue
          more_monitors,             // need a new monitor
          throwing_exception,        // unwind stack and rethrow
--- a/src/share/vm/interpreter/cppInterpreter.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/cppInterpreter.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -117,7 +117,6 @@
     method_entry(empty);
     method_entry(accessor);
     method_entry(abstract);
-    method_entry(method_handle);
     method_entry(java_lang_math_sin   );
     method_entry(java_lang_math_cos   );
     method_entry(java_lang_math_tan   );
@@ -125,7 +124,12 @@
     method_entry(java_lang_math_sqrt  );
     method_entry(java_lang_math_log   );
     method_entry(java_lang_math_log10 );
+    method_entry(java_lang_math_pow );
+    method_entry(java_lang_math_exp );
     method_entry(java_lang_ref_reference_get);
+
+    initialize_method_handle_entries();
+
     Interpreter::_native_entry_begin = Interpreter::code()->code_end();
     method_entry(native);
     method_entry(native_synchronized);
--- a/src/share/vm/interpreter/interpreter.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/interpreter.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -464,3 +464,11 @@
     }
   }
 }
+
+void AbstractInterpreterGenerator::initialize_method_handle_entries() {
+  // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
+  for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) {
+    Interpreter::MethodKind kind = (Interpreter::MethodKind) i;
+    Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
+  }
+}
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/defaultMethods.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -132,7 +133,7 @@
       // don't force compilation, resolve was on behalf of compiler
       return;
     }
-    if (InstanceKlass::cast(selected_method->method_holder())->is_not_initialized()) {
+    if (selected_method->method_holder()->is_not_initialized()) {
       // 'is_not_initialized' means not only '!is_initialized', but also that
       // initialization has not been started yet ('!being_initialized')
       // Do not force compilation of methods in uninitialized classes.
@@ -404,21 +405,13 @@
                                   Symbol* method_name, Symbol* method_signature,
                                   KlassHandle current_klass, bool check_access, TRAPS) {
 
-  // 1. check if klass is not interface
-  if (resolved_klass->is_interface()) {
-    ResourceMark rm(THREAD);
-    char buf[200];
-    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
-    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-  }
-
   Handle nested_exception;
 
-  // 2. lookup method in resolved klass and its super klasses
+  // 1. lookup method in resolved klass and its super klasses
   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
   if (resolved_method.is_null()) { // not found in the class hierarchy
-    // 3. lookup method in all the interfaces implemented by the resolved klass
+    // 2. lookup method in all the interfaces implemented by the resolved klass
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
     if (resolved_method.is_null()) {
@@ -432,7 +425,7 @@
     }
 
     if (resolved_method.is_null()) {
-      // 4. method lookup failed
+      // 3. method lookup failed
       ResourceMark rm(THREAD);
       THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
                       Method::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
@@ -442,6 +435,15 @@
     }
   }
 
+  // 4. check if klass is not interface
+  if (resolved_klass->is_interface() && resolved_method->is_abstract()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
+        resolved_klass()->external_name());
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   // 5. check if method is concrete
   if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
     ResourceMark rm(THREAD);
@@ -464,7 +466,7 @@
 
     // check loader constraints
     Handle loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
-    Handle class_loader (THREAD, InstanceKlass::cast(resolved_method->method_holder())->class_loader());
+    Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
     {
       ResourceMark rm(THREAD);
       char* failed_type_name =
@@ -526,7 +528,7 @@
   if (check_access) {
     HandleMark hm(THREAD);
     Handle loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
-    Handle class_loader (THREAD, InstanceKlass::cast(resolved_method->method_holder())->class_loader());
+    Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
     {
       ResourceMark rm(THREAD);
       char* failed_type_name =
@@ -743,6 +745,27 @@
                                                    Symbol* method_name, Symbol* method_signature,
                                                    KlassHandle current_klass, bool check_access, TRAPS) {
 
+  if (resolved_klass->is_interface() && current_klass() != NULL) {
+    // If the target class is a direct interface, treat this as a "super"
+    // default call.
+    //
+    // If the current method is an overpass that happens to call a direct
+    // super-interface's method, then we'll end up rerunning the default method
+    // analysis even though we don't need to, but that's ok since it will end
+    // up with the same answer.
+    InstanceKlass* ik = InstanceKlass::cast(current_klass());
+    Array<Klass*>* interfaces = ik->local_interfaces();
+    int num_interfaces = interfaces->length();
+    for (int index = 0; index < num_interfaces; index++) {
+      if (interfaces->at(index) == resolved_klass()) {
+        Method* method = DefaultMethods::find_super_default(current_klass(),
+            resolved_klass(), method_name, method_signature, CHECK);
+        resolved_method = methodHandle(THREAD, method);
+        return;
+      }
+    }
+  }
+
   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
 
   // check if method name is <init>, that it is found in same klass as static type
@@ -784,11 +807,17 @@
   { KlassHandle method_klass  = KlassHandle(THREAD,
                                             resolved_method->method_holder());
 
-    if (check_access &&
+    const bool direct_calling_default_method =
+      resolved_klass() != NULL && resolved_method() != NULL &&
+      resolved_klass->is_interface() && !resolved_method->is_abstract();
+
+    if (!direct_calling_default_method &&
+        check_access &&
         // a) check if ACC_SUPER flag is set for the current class
         current_klass->is_super() &&
         // b) check if the method class is a superclass of the current class (superclass relation is not reflexive!)
-        current_klass->is_subtype_of(method_klass()) && current_klass() != method_klass() &&
+        current_klass->is_subtype_of(method_klass()) &&
+        current_klass() != method_klass() &&
         // c) check if the method is not <init>
         resolved_method->name() != vmSymbols::object_initializer_name()) {
       // Lookup super method
@@ -881,12 +910,12 @@
 
   // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
   // has not been rewritten, and the vtable initialized.
-  assert(InstanceKlass::cast(resolved_method->method_holder())->is_linked(), "must be linked");
+  assert(resolved_method->method_holder()->is_linked(), "must be linked");
 
   // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
   // has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
   // a missing receiver might result in a bogus lookup.
-  assert(InstanceKlass::cast(resolved_method->method_holder())->is_linked(), "must be linked");
+  assert(resolved_method->method_holder()->is_linked(), "must be linked");
 
   // do lookup based on receiver klass using the vtable index
   if (resolved_method->method_holder()->is_interface()) { // miranda method
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -373,11 +373,7 @@
   method_entry(java_lang_math_pow  )
   method_entry(java_lang_ref_reference_get)
 
-  // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
-  for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) {
-    Interpreter::MethodKind kind = (Interpreter::MethodKind) i;
-    Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
-  }
+  initialize_method_handle_entries();
 
   // all native method kinds (must be one contiguous block)
   Interpreter::_native_entry_begin = Interpreter::code()->code_end();
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Oct 31 16:20:03 2012 -0700
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Nov 08 16:48:01 2012 -0800
@@ -25,9 +25,15 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/allocationStats.hpp"
 #include "memory/binaryTreeDictionary.hpp"
+#include "memory/freeList.hpp"
+#include "memory/freeBlockDictionary.hpp"
+#include "memory/metablock.hpp"
+#include "memory/metachunk.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
 #ifndef SERIALGC
+#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
+#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // SERIALGC
@@ -37,15 +43,18 @@
 // This is currently used in the Concurrent Mark&Sweep implementation.
 ////////////////////////////////////////////////////////////////////////////////
 
-template <class Chunk>
-TreeChunk<Chunk>* TreeChunk<Chunk>::as_TreeChunk(Chunk* fc) {
+template <class Chunk_t, template <class> class FreeList_t>
+size_t TreeChunk<Chunk_t, FreeList_t>::_min_tree_chunk_size = sizeof(TreeChunk<Chunk_t,  FreeList_t>)/HeapWordSize;
+
+template <class Chunk_t, template <class> class FreeList_t>
+TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chunk_t* fc) {
   // Do some assertion checking here.
-  return (TreeChunk<Chunk>*) fc;
+  return (TreeChunk<Chunk_t, FreeList_t>*) fc;
 }
 
-template <class Chunk>
-void TreeChunk<Chunk>::verify_tree_chunk_list() const {
-  TreeChunk<Chunk>* nextTC = (TreeChunk<Chunk>*)next();
+template <class Chunk_t, template <class> class FreeList_t>
+void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
+  TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
   if (prev() != NULL) { // interior list node shouldn'r have tree fields
     guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
               embedded_list()->right()  == NULL, "should be clear");
@@ -57,53 +66,113 @@
   }
 }
 
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>::TreeList() {}
 
-template <class Chunk>
-TreeList<Chunk>* TreeList<Chunk>::as_TreeList(TreeChunk<Chunk>* tc) {
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>*
+TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
   // This first free chunk in the list will be the tree list.
-  assert(tc->size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "Chunk is too small for a TreeChunk");
-  TreeList<Chunk>* tl = tc->embedded_list();
+  assert((tc->size() >= (TreeChunk<Chunk_t, FreeList_t>::min_size())),
+    "Chunk is too small for a TreeChunk");
+  TreeList<Chunk_t, FreeList_t>* tl = tc->embedded_list();
+  tl->initialize();
   tc->set_list(tl);
-#ifdef ASSERT
-  tl->set_protecting_lock(NULL);
-#endif
-  tl->set_hint(0);
   tl->set_size(tc->size());
   tl->link_head(tc);
   tl->link_tail(tc);
   tl->set_count(1);
-  tl->init_statistics(true /* split_birth */);
-  tl->set_parent(NULL);
-  tl->set_left(NULL);
-  tl->set_right(NULL);
+
   return tl;
 }
 
-template <class Chunk>
-TreeList<Chunk>* TreeList<Chunk>::as_TreeList(HeapWord* addr, size_t size) {
-  TreeChunk<Chunk>* tc = (TreeChunk<Chunk>*) addr;
-  assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "Chunk is too small for a TreeChunk");
-  // The space in the heap will have been mangled initially but
-  // is not remangled when a free chunk is returned to the free list
+
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>*
+get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
+  FreeBlockDictionary<Chunk_t>::verify_par_locked();
+  Chunk_t* res = get_chunk_from_tree(size, dither);
+  assert(res == NULL || res->is_free(),
+         "Should be returning a free chunk");
+  assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
+         res->size() == size, "Not correct size");
+  return res;
+}
+
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>*
+TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
+  TreeChunk<Chunk_t, FreeList_t>* tc = (TreeChunk<Chunk_t, FreeList_t>*) addr;
+  assert((size >= TreeChunk<Chunk_t, FreeList_t>::min_size()),
+    "Chunk is too small for a TreeChunk");
+  // The space will have been mangled initially but
+  // is not remangled when a Chunk_t is returned to the free list
   // (since it is used to maintain the chunk on the free list).
-  assert((ZapUnusedHeapArea &&
-          SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) &&
-          SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) &&
-          SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) ||
-          (tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL),
-    "Space should be clear or mangled");
+  tc->assert_is_mangled();
   tc->set_size(size);
   tc->link_prev(NULL);
   tc->link_next(NULL);
-  TreeList<Chunk>* tl = TreeList<Chunk>::as_TreeList(tc);
+  TreeList<Chunk_t, FreeList_t>* tl = TreeList<Chunk_t, FreeList_t>::as_TreeList(tc);
   return tl;
 }
 
-template <class Chunk>
-TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc) {
 
-  TreeList<Chunk>* retTL = this;
-  Chunk* list = head();
+#ifndef SERIALGC
+// Specialize for AdaptiveFreeList which tries to avoid
+// splitting a chunk of a size that is under populated in favor of
+// an over populated size.  The general get_better_list() just returns
+// the current list.
+template <>
+TreeList<FreeChunk, AdaptiveFreeList>*
+TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
+  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList>* dictionary) {
+  // A candidate chunk has been found.  If it is already under
+  // populated, get a chunk associated with the hint for this
+  // chunk.
+
+  TreeList<FreeChunk, ::AdaptiveFreeList>* curTL = this;
+  if (surplus() <= 0) {
+    /* Use the hint to find a size with a surplus, and reset the hint. */
+    TreeList<FreeChunk, ::AdaptiveFreeList>* hintTL = this;
+    while (hintTL->hint() != 0) {
+      assert(hintTL->hint() > hintTL->size(),
+        "hint points in the wrong direction");
+      hintTL = dictionary->find_list(hintTL->hint());
+      assert(curTL != hintTL, "Infinite loop");
+      if (hintTL == NULL ||
+          hintTL == curTL /* Should not happen but protect against it */ ) {
+        // No useful hint.  Set the hint to NULL and go on.
+        curTL->set_hint(0);
+        break;
+      }
+      assert(hintTL->size() > curTL->size(), "hint is inconsistent");
+      if (hintTL->surplus() > 0) {
+        // The hint led to a list that has a surplus.  Use it.
+        // Set the hint for the candidate to an overpopulated
+        // size.
+        curTL->set_hint(hintTL->size());
+        // Change the candidate.
+        curTL = hintTL;
+        break;
+      }
+    }
+  }
+  return curTL;
+}
+#endif // SERIALGC
+
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>*
+TreeList<Chunk_t, FreeList_t>::get_better_list(
+  BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary) {
+  return this;
+}
+
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc) {
+
+  TreeList<Chunk_t, FreeList_t>* retTL = this;
+  Chunk_t* list = head();
   assert(!list || list != list->next(), "Chunk on list twice");
   assert(tc != NULL, "Chunk being removed is NULL");
   assert(parent() == NULL || this == parent()->left() ||
@@ -112,13 +181,13 @@
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 
-  Chunk* prevFC = tc->prev();
-  TreeChunk<Chunk>* nextTC = TreeChunk<Chunk>::as_TreeChunk(tc->next());
+  Chunk_t* prevFC = tc->prev();
+  TreeChunk<Chunk_t, FreeList_t>* nextTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(tc->next());
   assert(list != NULL, "should have at least the target chunk");
 
   // Is this the first item on the list?
   if (tc == list) {
-    // The "getChunk..." functions for a TreeList<Chunk> will not return the
+    // The "getChunk..." functions for a TreeList<Chunk_t, FreeList_t> will not return the
     // first chunk in the list unless it is the last chunk in the list
     // because the first chunk is also acting as the tree node.
     // When coalescing happens, however, the first chunk in the a tree
@@ -127,8 +196,8 @@
     // allocated when the sweeper yields (giving up the free list lock)
     // to allow mutator activity.  If this chunk is the first in the
     // list and is not the last in the list, do the work to copy the
-    // TreeList<Chunk> from the first chunk to the next chunk and update all
-    // the TreeList<Chunk> pointers in the chunks in the list.
+    // TreeList<Chunk_t, FreeList_t> from the first chunk to the next chunk and update all
+    // the TreeList<Chunk_t, FreeList_t> pointers in the chunks in the list.
     if (nextTC == NULL) {
       assert(prevFC == NULL, "Not last chunk in the list");
       set_tail(NULL);
@@ -141,11 +210,11 @@
       // This can be slow for a long list.  Consider having
       // an option that does not allow the first chunk on the
       // list to be coalesced.
-      for (TreeChunk<Chunk>* curTC = nextTC; curTC != NULL;
-          curTC = TreeChunk<Chunk>::as_TreeChunk(curTC->next())) {
+      for (TreeChunk<Chunk_t, FreeList_t>* curTC = nextTC; curTC != NULL;
+          curTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(curTC->next())) {
         curTC->set_list(retTL);
       }
-      // Fix the parent to point to the new TreeList<Chunk>.
+      // Fix the parent to point to the new TreeList<Chunk_t, FreeList_t>.
       if (retTL->parent() != NULL) {
         if (this == retTL->parent()->left()) {
           retTL->parent()->set_left(retTL);
@@ -176,9 +245,9 @@
     prevFC->link_after(nextTC);
   }
 
-  // Below this point the embeded TreeList<Chunk> being used for the
+  // Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
   // tree node may have changed. Don't use "this"
-  // TreeList<Chunk>*.
+  // TreeList<Chunk_t, FreeList_t>*.
   // chunk should still be a free chunk (bit set in _prev)
   assert(!retTL->head() || retTL->size() == retTL->head()->size(),
     "Wrong sized chunk in list");
@@ -188,7 +257,7 @@
     tc->set_list(NULL);
     bool prev_found = false;
     bool next_found = false;
-    for (Chunk* curFC = retTL->head();
+    for (Chunk_t* curFC = retTL->head();
          curFC != NULL; curFC = curFC->next()) {
       assert(curFC != tc, "Chunk is still in list");
       if (curFC == prevFC) {
@@ -215,8 +284,8 @@
   return retTL;
 }
 
-template <class Chunk>
-void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) {
+template <class Chunk_t, template <class> class FreeList_t>
+void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* chunk) {
   assert(chunk != NULL, "returning NULL chunk");
   assert(chunk->list() == this, "list should be set for chunk");
   assert(tail() != NULL, "The tree list is embedded in the first chunk");
@@ -225,12 +294,12 @@
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 
-  Chunk* fc = tail();
+  Chunk_t* fc = tail();
   fc->link_after(chunk);
   link_tail(chunk);
 
   assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
-  increment_count();
+  FreeList_t<Chunk_t>::increment_count();
   debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -238,10 +307,10 @@
 
 // Add this chunk at the head of the list.  "At the head of the list"
 // is defined to be after the chunk pointer to by head().  This is
-// because the TreeList<Chunk> is embedded in the first TreeChunk<Chunk> in the
-// list.  See the definition of TreeChunk<Chunk>.
-template <class Chunk>
-void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) {
+// because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
+// list.  See the definition of TreeChunk<Chunk_t, FreeList_t>.
+template <class Chunk_t, template <class> class FreeList_t>
+void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
   assert(chunk->list() == this, "list should be set for chunk");
   assert(head() != NULL, "The tree list is embedded in the first chunk");
   assert(chunk != NULL, "returning NULL chunk");
@@ -249,7 +318,7 @@
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 
-  Chunk* fc = head()->next();
+  Chunk_t* fc = head()->next();
   if (fc != NULL) {
     chunk->link_after(fc);
   } else {
@@ -258,28 +327,38 @@
   }
   head()->link_after(chunk);
   assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  increment_count();
+  FreeList_t<Chunk_t>::increment_count();
   debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 }
 
-template <class Chunk>
-TreeChunk<Chunk>* TreeList<Chunk>::head_as_TreeChunk() {
-  assert(head() == NULL || TreeChunk<Chunk>::as_TreeChunk(head())->list() == this,
-    "Wrong type of chunk?");
-  return TreeChunk<Chunk>::as_TreeChunk(head());
+template <class Chunk_t, template <class> class FreeList_t>
+void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
+  assert((ZapUnusedHeapArea &&
+          SpaceMangler::is_mangled((HeapWord*) Chunk_t::size_addr()) &&
+          SpaceMangler::is_mangled((HeapWord*) Chunk_t::prev_addr()) &&
+          SpaceMangler::is_mangled((HeapWord*) Chunk_t::next_addr())) ||
+          (size() == 0 && prev() == NULL && next() == NULL),
+    "Space should be clear or mangled");
 }
 
-template <class Chunk>
-TreeChunk<Chunk>* TreeList<Chunk>::first_available() {
+template <class Chunk_t, template <class> class FreeList_t>
+TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::head_as_TreeChunk() {
+  assert(head() == NULL || (TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head())->list() == this),
+    "Wrong type of chunk?");
+  return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head());
+}
+
+template <class Chunk_t, template <class> class FreeList_t>
+TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available() {
   assert(head() != NULL, "The head of the list cannot be NULL");
-  Chunk* fc = head()->next();
-  TreeChunk<Chunk>* retTC;
+  Chunk_t* fc = head()->next();
+  TreeChunk<Chunk_t, FreeList_t>* retTC;
   if (fc == NULL) {
     retTC = head_as_TreeChunk();
   } else {
-    retTC = TreeChunk<Chunk>::as_TreeChunk(fc);
+    retTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(fc);
   }
   assert(retTC->list() == this, "Wrong type of chunk.");
   return retTC;
@@ -288,41 +367,32 @@
 // Returns the block with the largest heap address amongst
 // those in the list for this size; potentially slow and expensive,
 // use with caution!
-template <class Chunk>
-TreeChunk<Chunk>* TreeList<Chunk>::largest_address() {
+template <class Chunk_t, template <class> class FreeList_t>
+TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address() {
   assert(head() != NULL, "The head of the list cannot be NULL");
-  Chunk* fc = head()->next();
-  TreeChunk<Chunk>* retTC;
+  Chunk_t* fc = head()->next();
+  TreeChunk<Chunk_t, FreeList_t>* retTC;
   if (fc == NULL) {
     retTC = head_as_TreeChunk();
   } else {
     // walk down the list and return the one with the highest
     // heap address among chunks of this size.
-    Chunk* last = fc;
+    Chunk_t* last = fc;
     while (fc->next() != NULL) {
       if ((HeapWord*)last < (HeapWord*)fc) {
         last = fc;
       }
       fc = fc->next();
     }
-    retTC = TreeChunk<Chunk>::as_TreeChunk(last);
+    retTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(last);
   }
   assert(retTC->list() == this, "Wrong type of chunk.");
   return retTC;
 }
 
-template <class Chunk>
-BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(bool adaptive_freelists, bool splay) :
-  _splay(splay), _adaptive_freelists(adaptive_freelists),
-  _total_size(0), _total_free_blocks(0), _root(0) {}
-
-template <class Chunk>
-BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(MemRegion mr,
-                                           bool adaptive_freelists,
-                                           bool splay):
-  _adaptive_freelists(adaptive_freelists), _splay(splay)
-{
-  assert(mr.word_size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
+template <class Chunk_t, template <class> class FreeList_t>
+BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
+  assert((mr.byte_size() > min_size()), "minimum chunk size");
 
   reset(mr);
   assert(root()->left() == NULL, "reset check failed");
@@ -333,52 +403,48 @@
   assert(total_free_blocks() == 1, "reset check failed");
 }
 
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::inc_total_size(size_t inc) {
+template <class Chunk_t, template <class> class FreeList_t>
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::inc_total_size(size_t inc) {
   _total_size = _total_size + inc;
 }
 
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::dec_total_size(size_t dec) {
+template <class Chunk_t, template <class> class FreeList_t>
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::dec_total_size(size_t dec) {
   _total_size = _total_size - dec;
 }
 
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::reset(MemRegion mr) {
-  assert(mr.word_size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
-  set_root(TreeList<Chunk>::as_TreeList(mr.start(), mr.word_size()));
+template <class Chunk_t, template <class> class FreeList_t>
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
+  assert((mr.byte_size() > min_size()), "minimum chunk size");
+  set_root(TreeList<Chunk_t, FreeList_t>::as_TreeList(mr.start(), mr.word_size()));
   set_total_size(mr.word_size());
   set_total_free_blocks(1);
 }
 
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::reset(HeapWord* addr, size_t byte_size) {
+template <class Chunk_t, template <class> class FreeList_t>
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(HeapWord* addr, size_t byte_size) {
   MemRegion mr(addr, heap_word_size(byte_size));
   reset(mr);
 }
 
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::reset() {
+template <class Chunk_t, template <class> class FreeList_t>
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
   set_root(NULL);
   set_total_size(0);
   set_total_free_blocks(0);
 }
 
 // Get a free block of size at least size from tree, or NULL.
-// If a splay step is requested, the removal algorithm (only) incorporates
-// a splay step as follows:
-// . the search proceeds down the tree looking for a possible
-//   match. At the (closest) matching location, an appropriate splay step is applied
-//   (zig, zig-zig or zig-zag). A chunk of the appropriate size is then returned
-//   if available, and if it's the last chunk, the node is deleted. A deteleted
-//   node is replaced in place by its tree successor.
-template <class Chunk>
-TreeChunk<Chunk>*
-BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay)
+template <class Chunk_t, template <class> class FreeList_t>
+TreeChunk<Chunk_t, FreeList_t>*
+BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
+                              size_t size,
+                              enum FreeBlockDictionary<Chunk_t>::Dither dither)
 {
-  TreeList<Chunk> *curTL, *prevTL;
-  TreeChunk<Chunk>* retTC = NULL;
-  assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
+  TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
+  TreeChunk<Chunk_t, FreeList_t>* retTC = NULL;
+
+  assert((size >= min_size()), "minimum chunk size");
   if (FLSVerifyDictionary) {
     verify_tree();
   }
@@ -398,7 +464,7 @@
   }
   if (curTL == NULL) { // couldn't find exact match
 
-    if (dither == FreeBlockDictionary<Chunk>::exactly) return NULL;
+    if (dither == FreeBlockDictionary<Chunk_t>::exactly) return NULL;
 
     // try and find the next larger size by walking back up the search path
     for (curTL = prevTL; curTL != NULL;) {
@@ -410,46 +476,9 @@
   }
   if (curTL != NULL) {
     assert(curTL->size() >= size, "size inconsistency");
-    if (adaptive_freelists()) {
 
-      // A candidate chunk has been found.  If it is already under
-      // populated, get a chunk associated with the hint for this
-      // chunk.
-      if (curTL->surplus() <= 0) {
-        /* Use the hint to find a size with a surplus, and reset the hint. */
-        TreeList<Chunk>* hintTL = curTL;
-        while (hintTL->hint() != 0) {
-          assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
-            "hint points in the wrong direction");
-          hintTL = find_list(hintTL->hint());
-          assert(curTL != hintTL, "Infinite loop");
-          if (hintTL == NULL ||
-              hintTL == curTL /* Should not happen but protect against it */ ) {
-            // No useful hint.  Set the hint to NULL and go on.
-            curTL->set_hint(0);
-            break;
-          }
-          assert(hintTL->size() > size, "hint is inconsistent");
-          if (hintTL->surplus() > 0) {
-            // The hint led to a list that has a surplus.  Use it.
-            // Set the hint for the candidate to an overpopulated
-            // size.
-            curTL->set_hint(hintTL->size());
-            // Change the candidate.
-            curTL = hintTL;
-            break;
-          }
-          // The evm code reset the hint of the candidate as
-          // at an interim point.  Why?  Seems like this leaves
-          // the hint pointing to a list that didn't work.
-          // curTL->set_hint(hintTL->size());
-        }
-      }
-    }
-    // don't waste time splaying if chunk's singleton
-    if (splay && curTL->head()->next() != NULL) {
-      semi_splay_step(curTL);
-    }
+    curTL = curTL->get_better_list(this);
+
     retTC = curTL->first_available();
     assert((retTC != NULL) && (curTL->count() > 0),
       "A list in the binary tree should not be NULL");
@@ -465,9 +494,9 @@
   return retTC;
 }
 
-template <class Chunk>
-TreeList<Chunk>* BinaryTreeDictionary<Chunk>::find_list(size_t size) const {
-  TreeList<Chunk>* curTL;
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_list(size_t size) const {
+  TreeList<Chunk_t, FreeList_t>* curTL;
   for (curTL = root(); curTL != NULL;) {
     if (curTL->size() == size) {        // exact match
       break;
@@ -484,10 +513,10 @@
 }
 
 
-template <class Chunk>
-bool BinaryTreeDictionary<Chunk>::verify_chunk_in_free_list(Chunk* tc) const {
+template <class Chunk_t, template <class> class FreeList_t>
+bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_t* tc) const {
   size_t size = tc->size();
-  TreeList<Chunk>* tl = find_list(size);
+  TreeList<Chunk_t, FreeList_t>* tl = find_list(size);
   if (tl == NULL) {
     return false;
   } else {
@@ -495,9 +524,9 @@
   }
 }
 
-template <class Chunk>
-Chunk* BinaryTreeDictionary<Chunk>::find_largest_dict() const {
-  TreeList<Chunk> *curTL = root();
+template <class Chunk_t, template <class> class FreeList_t>
+Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
+  TreeList<Chunk_t, FreeList_t> *curTL = root();
   if (curTL != NULL) {
     while(curTL->right() != NULL) curTL = curTL->right();
     return curTL->largest_address();
@@ -510,15 +539,15 @@
 // chunk in a list on a tree node, just unlink it.
 // If it is the last chunk in the list (the next link is NULL),
 // remove the node and repair the tree.
-template <class Chunk>
-TreeChunk<Chunk>*
-BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
+template <class Chunk_t, template <class> class FreeList_t>
+TreeChunk<Chunk_t, FreeList_t>*
+BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc) {
   assert(tc != NULL, "Should not call with a NULL chunk");
   assert(tc->is_free(), "Header is not marked correctly");
 
-  TreeList<Chunk> *newTL, *parentTL;
-  TreeChunk<Chunk>* retTC;
-  TreeList<Chunk>* tl = tc->list();
+  TreeList<Chunk_t, FreeList_t> *newTL, *parentTL;
+  TreeChunk<Chunk_t, FreeList_t>* retTC;
+  TreeList<Chunk_t, FreeList_t>* tl = tc->list();
   debug_only(
     bool removing_only_chunk = false;
     if (tl == _root) {
@@ -538,8 +567,8 @@
 
   retTC = tc;
   // Removing this chunk can have the side effect of changing the node
-  // (TreeList<Chunk>*) in the tree.  If the node is the root, update it.
-  TreeList<Chunk>* replacementTL = tl->remove_chunk_replace_if_needed(tc);
+  // (TreeList<Chunk_t, FreeList_t>*) in the tree.  If the node is the root, update it.
+  TreeList<Chunk_t, FreeList_t>* replacementTL = tl->remove_chunk_replace_if_needed(tc);
   assert(tc->is_free(), "Chunk should still be free");
   assert(replacementTL->parent() == NULL ||
          replacementTL == replacementTL->parent()->left() ||
@@ -549,17 +578,18 @@
     assert(replacementTL->parent() == NULL, "Incorrectly replacing root");
     set_root(replacementTL);
   }
-  debug_only(
+#ifdef ASSERT
     if (tl != replacementTL) {
       assert(replacementTL->head() != NULL,
         "If the tree list was replaced, it should not be a NULL list");
-      TreeList<Chunk>* rhl = replacementTL->head_as_TreeChunk()->list();
-      TreeList<Chunk>* rtl = TreeChunk<Chunk>::as_TreeChunk(replacementTL->tail())->list();
+      TreeList<Chunk_t, FreeList_t>* rhl = replacementTL->head_as_TreeChunk()->list();
+      TreeList<Chunk_t, FreeList_t>* rtl =
+        TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(replacementTL->tail())->list();
       assert(rhl == replacementTL, "Broken head");
       assert(rtl == replacementTL, "Broken tail");
       assert(replacementTL->size() == tc->size(),  "Broken size");
     }
-  )
+#endif
 
   // Does the tree need to be repaired?
   if (replacementTL->count() == 0) {
@@ -574,7 +604,7 @@
     } else if (replacementTL->right() == NULL) {
       // right is NULL
       newTL = replacementTL->left();
-      debug_only(replacementTL->clearLeft();)
+      debug_only(replacementTL->clear_left();)
     } else {  // we have both children, so, by patriarchal convention,
               // my replacement is least node in right sub-tree
       complicated_splice = true;
@@ -623,7 +653,7 @@
       newTL->set_right(replacementTL->right());
       debug_only(
         replacementTL->clear_right();
-        replacementTL->clearLeft();
+        replacementTL->clear_left();
       )
     }
     assert(replacementTL->right() == NULL &&
@@ -644,21 +674,21 @@
     verify_tree();
   }
   assert(!removing_only_chunk || _root == NULL, "root should be NULL");
-  return TreeChunk<Chunk>::as_TreeChunk(retTC);
+  return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(retTC);
 }
 
 // Remove the leftmost node (lm) in the tree and return it.
 // If lm has a right child, link it to the left node of
 // the parent of lm.
-template <class Chunk>
-TreeList<Chunk>* BinaryTreeDictionary<Chunk>::remove_tree_minimum(TreeList<Chunk>* tl) {
+template <class Chunk_t, template <class> class FreeList_t>
+TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl) {
   assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
   // locate the subtree minimum by walking down left branches
-  TreeList<Chunk>* curTL = tl;
+  TreeList<Chunk_t, FreeList_t>* curTL = tl;
   for (; curTL->left() != NULL; curTL = curTL->left());
   // obviously curTL now has at most one child, a right child
   if (curTL != root()) {  // Should this test just be removed?
-    TreeList<Chunk>* parentTL = curTL->parent();
+    TreeList<Chunk_t, FreeList_t>* parentTL = curTL->parent();
     if (parentTL->left() == curTL) { // curTL is a left child
       parentTL->set_left(curTL->right());
     } else {
@@ -685,31 +715,14 @@
   return curTL;
 }
 
-// Based on a simplification of the algorithm by Sleator and Tarjan (JACM 1985).
-// The simplifications are the following:
-// . we splay only when we delete (not when we insert)
-// . we apply a single spay step per deletion/access
-// By doing such partial splaying, we reduce the amount of restructuring,
-// while getting a reasonably efficient search tree (we think).
-// [Measurements will be needed to (in)validate this expectation.]
-
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::semi_splay_step(TreeList<Chunk>* tc) {
-  // apply a semi-splay step at the given node:
-  // . if root, norting needs to be done
-  // . if child of root, splay once
-  // . else zig-zig or sig-zag depending on path from grandparent
-  if (root() == tc) return;
-  warning("*** Splaying not yet implemented; "
-          "tree operations may be inefficient ***");
-}
-
-template <class Chunk>
-void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) {
-  TreeList<Chunk> *curTL, *prevTL;
+template <class Chunk_t, template <class> class FreeList_t>
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc) {
+  TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
   size_t size = fc->size();
 
-  assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "too small to be a TreeList<Chunk>");
+  assert((size >= min_size()),
+    err_msg(SIZE_FORMAT " is too small to be a TreeChunk<Chunk_t, FreeList_t> " SIZE_FORMAT,
+      size, min_size()));
   if (FLSVerifyDictionary) {
     verify_tree();
   }
@@ -729,9 +742,9 @@
       curTL = curTL->right();
     }
   }
-  TreeChunk<Chunk>* tc = TreeChunk<Chunk>::as_TreeChunk(fc);
+  TreeChunk<Chunk_t, FreeList_t>* tc = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(fc);
   // This chunk is being returned to the binary tree.  Its embedded
-  // TreeList<Chunk> should be unused at this point.
+  // TreeList<Chunk_t, FreeList_t> should be unused at this point.
   tc->initialize();
   if (curTL != NULL) {          // exact match
     tc->set_list(curTL);
@@ -739,8 +752,8 @@
   } else {                     // need a new node in tree
     tc->clear_next();
     tc->link_prev(NULL);
-    TreeList<Chunk>* newTL = TreeList<Chunk>::as_TreeList(tc);
-    assert(((TreeChunk<Chunk>*)tc)->list() == newTL,
+    TreeList<Chunk_t, FreeList_t>* newTL = TreeList<Chunk_t, FreeList_t>::as_TreeList(tc);
+    assert(((TreeChunk<Chunk_t, FreeList_t>*)tc)->list() == newTL,
       "List was not initialized correctly");
     if (prevTL == NULL) {      // we are the only tree node
       assert(root() == NULL, "control point invariant");
@@ -768,30 +781,30 @@
   }
 }
 
-template <class Chunk>
-size_t BinaryTreeDictionary<Chunk>::max_chunk_size() const {
-  FreeBlockDictionary<Chunk>::verify_par_locked();
-  TreeList<Chunk>* tc = root();
+template <class Chunk_t, template <class> class FreeList_t>
+size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
+  FreeBlockDictionary<Chunk_t>::verify_par_locked();
+  TreeList<Chunk_t, FreeList_t>* tc = root();
   if (tc == NULL) return 0;
   for (; tc->right() != NULL; tc = tc->right());
   return tc->size();
 }
 
-template <class Chunk>
-size_t BinaryTreeDictionary<Chunk>::total_list_length(TreeList<Chunk>* tl) const {
+template <class Chunk_t, template <class> class FreeList_t>
+size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const {
   size_t res;