changeset 3814:8940ddc1036f

Merge
author zgu
date Mon, 05 Nov 2012 13:55:31 -0800
parents 69ad7823b1ca 9cc901118f6b
children c284cf4781f0 fb3190e77d3c
files test/runtime/7158800/BadUtf8.java test/runtime/7158800/InternTest.java test/runtime/7158800/Test7158800.sh test/runtime/7158800/badstrings.txt
diffstat 136 files changed, 11147 insertions(+), 35286 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Nov 05 15:30:22 2012 -0500
+++ b/.hgtags	Mon Nov 05 13:55:31 2012 -0800
@@ -287,3 +287,7 @@
 b261523fe66c40a02968f0aa7e73602491bb3386 hs25-b05
 4547dc71db765276e027b0c2780b724bae0a07d3 jdk8-b61
 d0337c31c8be7716369b4e7c3bd5f352983c6a06 hs25-b06
+dccd40de8db1fa96f186e6179907818d75320440 jdk8-b62
+dc16fe422c535ecd4e9f80fb814a1bb9704da6f5 hs25-b07
+acabb5c282f59be7e3238920b2ea06b684ab68f7 jdk8-b63
+8cb93eadfb6dcab88d91b8e2cd3e0e07d0ac4048 hs25-b08
--- a/make/Makefile	Mon Nov 05 15:30:22 2012 -0500
+++ b/make/Makefile	Mon Nov 05 13:55:31 2012 -0800
@@ -453,14 +453,30 @@
     ifeq ($(JVM_VARIANT_ZEROSHARK), true)
         $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo):	$(SHARK_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(SHARK_DIR)/%.diz
+		$(install-file)
         $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.debuginfo:		$(SHARK_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_DIR)/%.diz
+		$(install-file)
     endif
     ifeq ($(JVM_VARIANT_ZERO), true)
         $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(ZERO_DIR)/%.diz
+		$(install-file)
         $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
 		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_DIR)/%.diz
+		$(install-file)
     endif
     ifeq ($(JVM_VARIANT_MINIMAL1), true)
         $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
--- a/make/excludeSrc.make	Mon Nov 05 15:30:22 2012 -0500
+++ b/make/excludeSrc.make	Mon Nov 05 13:55:31 2012 -0800
@@ -79,10 +79,10 @@
       CXXFLAGS += -DSERIALGC
       CFLAGS += -DSERIALGC
       Src_Files_EXCLUDE += \
-	binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
+	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
 	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
-	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \
-	freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
+	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
+	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
 	concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
 	dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
 	g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
--- a/make/hotspot_version	Mon Nov 05 15:30:22 2012 -0500
+++ b/make/hotspot_version	Mon Nov 05 13:55:31 2012 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=07
+HS_BUILD_NUMBER=09
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/src/cpu/x86/vm/assembler_x86.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -1007,6 +1007,67 @@
   emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
 }
 
+void Assembler::aesdec(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xde);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xde);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::aesdeclast(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdf);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdf);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::aesenc(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdc);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdc);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::aesenclast(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_aes(), "");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdd);
+  emit_operand(dst, src);
+}
+
+void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_aes(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0xdd);
+  emit_byte(0xC0 | encode);
+}
+
+
 void Assembler::andl(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefix(dst);
@@ -2307,6 +2368,22 @@
   a_byte(p);
 }
 
+void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_ssse3(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0x00);
+  emit_byte(0xC0 | encode);
+}
+
+void Assembler::pshufb(XMMRegister dst, Address src) {
+  assert(VM_Version::supports_ssse3(), "");
+  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
+  InstructionMark im(this);
+  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+  emit_byte(0x00);
+  emit_operand(dst, src);
+}
+
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
   assert(isByte(mode), "invalid value");
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -8067,6 +8144,15 @@
   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
 }
 
+void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
+  if (reachable(src)) {
+    Assembler::movdqu(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    Assembler::movdqu(dst, Address(rscratch1, 0));
+  }
+}
+
 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
   if (reachable(src)) {
     Assembler::movsd(dst, as_Address(src));
@@ -8357,6 +8443,17 @@
   }
 }
 
+void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
+  // Used in sign-bit flipping with aligned address.
+  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+  if (reachable(src)) {
+    Assembler::pshufb(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    Assembler::pshufb(dst, Address(rscratch1, 0));
+  }
+}
+
 // AVX 3-operands instructions
 
 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
--- a/src/cpu/x86/vm/assembler_x86.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -875,6 +875,17 @@
   void addss(XMMRegister dst, Address src);
   void addss(XMMRegister dst, XMMRegister src);
 
+  // AES instructions
+  void aesdec(XMMRegister dst, Address src);
+  void aesdec(XMMRegister dst, XMMRegister src);
+  void aesdeclast(XMMRegister dst, Address src);
+  void aesdeclast(XMMRegister dst, XMMRegister src);
+  void aesenc(XMMRegister dst, Address src);
+  void aesenc(XMMRegister dst, XMMRegister src);
+  void aesenclast(XMMRegister dst, Address src);
+  void aesenclast(XMMRegister dst, XMMRegister src);
+
+
   void andl(Address  dst, int32_t imm32);
   void andl(Register dst, int32_t imm32);
   void andl(Register dst, Address src);
@@ -1424,6 +1435,10 @@
   void prefetcht2(Address src);
   void prefetchw(Address src);
 
+  // Shuffle Bytes
+  void pshufb(XMMRegister dst, XMMRegister src);
+  void pshufb(XMMRegister dst, Address src);
+
   // Shuffle Packed Doublewords
   void pshufd(XMMRegister dst, XMMRegister src, int mode);
   void pshufd(XMMRegister dst, Address src,     int mode);
@@ -2611,6 +2626,12 @@
   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
   void divss(XMMRegister dst, AddressLiteral src);
 
+  // Move Unaligned Double Quadword
+  void movdqu(Address     dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
+  void movdqu(XMMRegister dst, Address src)       { Assembler::movdqu(dst, src); }
+  void movdqu(XMMRegister dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
+  void movdqu(XMMRegister dst, AddressLiteral src);
+
   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
@@ -2658,6 +2679,10 @@
   void xorps(XMMRegister dst, Address src)     { Assembler::xorps(dst, src); }
   void xorps(XMMRegister dst, AddressLiteral src);
 
+  // Shuffle Bytes
+  void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
+  void pshufb(XMMRegister dst, Address src)     { Assembler::pshufb(dst, src); }
+  void pshufb(XMMRegister dst, AddressLiteral src);
   // AVX 3-operands instructions
 
   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -2137,6 +2137,529 @@
     }
   }
 
+  // AES intrinsic stubs
+  enum {AESBlockSize = 16};
+
+  address generate_key_shuffle_mask() {
+    __ align(16);
+    StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
+    address start = __ pc();
+    __ emit_data(0x00010203, relocInfo::none, 0 );
+    __ emit_data(0x04050607, relocInfo::none, 0 );
+    __ emit_data(0x08090a0b, relocInfo::none, 0 );
+    __ emit_data(0x0c0d0e0f, relocInfo::none, 0 );
+    return start;
+  }
+
+  // Utility routine for loading a 128-bit key word in little endian format
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    __ movdqu(xmmdst, Address(key, offset));
+    if (xmm_shuf_mask != NULL) {
+      __ pshufb(xmmdst, xmm_shuf_mask);
+    } else {
+      __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    }
+  }
+
+  // aesenc using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesenc(xmmdst, xmmtmp);
+  }
+
+  // aesdec using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesdec(xmmdst, xmmtmp);
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_encryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register keylen      = rax;
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ push(rsi);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));  // get 16 bytes of input
+
+    // For encryption, the java expanded key ordering is just what we need
+
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ pxor(xmm_result, xmm_temp);
+    for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    load_key  (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 192 and 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    __ aesenclast(xmm_result, xmm_temp);
+    __ movdqu(Address(to, 0), xmm_result);        // store the result
+    __ xorptr(rax, rax); // return 0
+    __ pop(rsi);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_decryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register keylen      = rax;
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ push(rsi);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));
+
+    // for decryption java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    // we don't know if the key is aligned, hence not using load-execute form
+    load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
+    __ pxor  (xmm_result, xmm_temp);
+    for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
+      aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 192 and 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    // for decryption the aesdeclast operation is always on key+0x00
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ aesdeclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, 0), xmm_result);  // store the result
+
+    __ xorptr(rax, rax); // return 0
+    __ pop(rsi);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+  void handleSOERegisters(bool saving) {
+    const int saveFrameSizeInBytes = 4 * wordSize;
+    const Address saved_rbx     (rbp, -3 * wordSize);
+    const Address saved_rsi     (rbp, -2 * wordSize);
+    const Address saved_rdi     (rbp, -1 * wordSize);
+
+    if (saving) {
+      __ subptr(rsp, saveFrameSizeInBytes);
+      __ movptr(saved_rsi, rsi);
+      __ movptr(saved_rdi, rdi);
+      __ movptr(saved_rbx, rbx);
+    } else {
+      // restoring
+      __ movptr(rsi, saved_rsi);
+      __ movptr(rdi, saved_rdi);
+      __ movptr(rbx, saved_rbx);
+    }
+  }
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+  address generate_cipherBlockChaining_encryptAESCrypt() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register rvec        = rdi;      // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+    const Register len_reg     = rbx;      // src len (must be multiple of blocksize 16)
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    // first 6 keys preloaded into xmm2-xmm7
+    const int XMM_REG_NUM_KEY_FIRST = 2;
+    const int XMM_REG_NUM_KEY_LAST  = 7;
+    const XMMRegister xmm_key0   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    handleSOERegisters(true /*saving*/);
+
+    // load registers from incoming parameters
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+    const Address  rvec_param (rbp, 8+12);
+    const Address  len_param  (rbp, 8+16);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+    __ movptr(rvec , rvec_param);
+    __ movptr(len_reg , len_param);
+
+    const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 2 thru 7 with keys 0-5
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    __ movdqu(xmm_result, Address(rvec, 0x00));   // initialize xmm_result with r vec
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+    // 128 bit code follows here
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_128);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);                                // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0xa0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_128);
+
+    __ BIND(L_exit);
+    __ movdqu(Address(rvec, 0), xmm_result);     // final value of r stored in rvec of CipherBlockChaining object
+
+    handleSOERegisters(false /*restoring*/);
+    __ movl(rax, 0);                             // return 0 (why?)
+    __ leave();                                  // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+  __ BIND(L_key_192_256);
+  // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+  __ align(OptoLoopAlignment);
+  __ BIND(L_loopTop_192);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);                                // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0xc0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);   // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_192);
+    __ jmp(L_exit);
+
+  __ BIND(L_key_256);
+    // 256-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+  __ align(OptoLoopAlignment);
+  __ BIND(L_loopTop_256);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);                                // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0xe0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);   // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
+  // CBC AES Decryption.
+  // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time.
+  //
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+
+  address generate_cipherBlockChaining_decryptAESCrypt() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256;
+    Label L_singleBlock_loopTop_128;
+    Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
+    const Register from        = rsi;      // source array address
+    const Register to          = rdx;      // destination array address
+    const Register key         = rcx;      // key array address
+    const Register rvec        = rdi;      // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+    const Register len_reg     = rbx;      // src len (must be multiple of blocksize 16)
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    // first 6 keys preloaded into xmm2-xmm7
+    const int XMM_REG_NUM_KEY_FIRST = 2;
+    const int XMM_REG_NUM_KEY_LAST  = 7;
+    const int FIRST_NON_REG_KEY_offset = 0x70;
+    const XMMRegister xmm_key_first   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    handleSOERegisters(true /*saving*/);
+
+    // load registers from incoming parameters
+    const Address  from_param(rbp, 8+0);
+    const Address  to_param  (rbp, 8+4);
+    const Address  key_param (rbp, 8+8);
+    const Address  rvec_param (rbp, 8+12);
+    const Address  len_param  (rbp, 8+16);
+    __ movptr(from , from_param);
+    __ movptr(to   , to_param);
+    __ movptr(key  , key_param);
+    __ movptr(rvec , rvec_param);
+    __ movptr(len_reg , len_param);
+
+    // the java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 2 thru 6 with first 5 keys
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    // inside here, use the rvec register to point to previous block cipher
+    // with which we xor at the end of each newly decrypted block
+    const Register  prev_block_cipher_ptr = rvec;
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+
+    // 128-bit code follows here, parallelized
+    __ movptr(pos, 0);
+  __ align(OptoLoopAlignment);
+  __ BIND(L_singleBlock_loopTop_128);
+    __ cmpptr(len_reg, 0);           // any blocks left??
+    __ jcc(Assembler::equal, L_exit);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ pxor  (xmm_result, xmm_key_first);                             // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) {   // 128-bit runs up to key offset a0
+      aes_dec_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0x00);                                     // final key is stored in java expanded array at offset 0
+    __ aesdeclast(xmm_result, xmm_temp);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ pxor  (xmm_result, xmm_temp);                                  // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0));     // set up new ptr
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jmp(L_singleBlock_loopTop_128);
+
+
+    __ BIND(L_exit);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ movptr(rvec , rvec_param);                                     // restore this since used in loop
+    __ movdqu(Address(rvec, 0), xmm_temp);                            // final value of r stored in rvec of CipherBlockChaining object
+    handleSOERegisters(false /*restoring*/);
+    __ movl(rax, 0);                                                  // return 0 (why?)
+    __ leave();                                                       // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+
+    __ BIND(L_key_192_256);
+    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_192);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ pxor  (xmm_result, xmm_key_first);                             // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) {   // 192-bit runs up to key offset c0
+      aes_dec_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0x00);                                     // final key is stored in java expanded array at offset 0
+    __ aesdeclast(xmm_result, xmm_temp);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ pxor  (xmm_result, xmm_temp);                                  // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0));     // set up new ptr
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
+    __ jmp(L_exit);
+
+    __ BIND(L_key_256);
+    // 256-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_256);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ pxor  (xmm_result, xmm_key_first);                             // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) {   // 256-bit runs up to key offset e0
+      aes_dec_key(xmm_result, xmm_temp, key, key_offset);
+    }
+    load_key(xmm_temp, key, 0x00);                                     // final key is stored in java expanded array at offset 0
+    __ aesdeclast(xmm_result, xmm_temp);
+    __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
+    __ pxor  (xmm_result, xmm_temp);                                  // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0));     // set up new ptr
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
  public:
   // Information about frame layout at time of blocking runtime call.
   // Note that we only have to preserve callee-saved registers since
@@ -2332,6 +2855,16 @@
     generate_arraycopy_stubs();
 
     generate_math_stubs();
+
+    // don't bother generating these AES intrinsic stubs unless global flag is set
+    if (UseAESIntrinsics) {
+      StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask();  // might be needed by the others
+
+      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+      StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
+      StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
+    }
   }
 
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -2941,6 +2941,548 @@
     }
   }
 
+  // AES intrinsic stubs
+  enum {AESBlockSize = 16};
+
+  address generate_key_shuffle_mask() {
+    __ align(16);
+    StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
+    address start = __ pc();
+    __ emit_data64( 0x0405060700010203, relocInfo::none );
+    __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none );
+    return start;
+  }
+
+  // Utility routine for loading a 128-bit key word in little endian format
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    __ movdqu(xmmdst, Address(key, offset));
+    if (xmm_shuf_mask != NULL) {
+      __ pshufb(xmmdst, xmm_shuf_mask);
+    } else {
+      __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    }
+  }
+
+  // aesenc using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesenc(xmmdst, xmmtmp);
+  }
+
+  // aesdec using specified key+offset
+  // can optionally specify that the shuffle mask is already in an xmmregister
+  void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+    load_key(xmmtmp, key, offset, xmm_shuf_mask);
+    __ aesdec(xmmdst, xmmtmp);
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_encryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register keylen      = rax;
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));  // get 16 bytes of input
+
+    // For encryption, the java expanded key ordering is just what we need
+    // we don't know if the key is aligned, hence not using load-execute form
+
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ pxor(xmm_result, xmm_temp);
+    for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
+      aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    load_key  (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 192 and 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    __ aesenc(xmm_result, xmm_temp);                   // only in 256 bit keys
+    aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    __ aesenclast(xmm_result, xmm_temp);
+    __ movdqu(Address(to, 0), xmm_result);        // store the result
+    __ xorptr(rax, rax); // return 0
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //
+  address generate_aescrypt_decryptBlock() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
+    Label L_doLast;
+    address start = __ pc();
+
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register keylen      = rax;
+
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    const XMMRegister xmm_key_shuf_mask = xmm2;
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    // keylen = # of 32-bit words, convert to 128-bit words
+    __ shrl(keylen, 2);
+    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
+
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    __ movdqu(xmm_result, Address(from, 0));
+
+    // for decryption java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    // we don't know if the key is aligned, hence not using load-execute form
+    load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
+    __ pxor  (xmm_result, xmm_temp);
+    for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
+      aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
+    }
+    __ cmpl(keylen, 0);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 192 and 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
+    __ subl(keylen, 2);
+    __ jcc(Assembler::equal, L_doLast);
+    // only in 256 bit keys
+    aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
+
+    __ BIND(L_doLast);
+    // for decryption the aesdeclast operation is always on key+0x00
+    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
+    __ aesdeclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, 0), xmm_result);  // store the result
+
+    __ xorptr(rax, rax); // return 0
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+  address generate_cipherBlockChaining_encryptAESCrypt() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register rvec        = c_rarg3;  // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+#ifndef _WIN64
+    const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
+#else
+    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
+    const Register len_reg     = r10;      // pick the first volatile windows register
+#endif
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    const XMMRegister xmm_temp   = xmm1;
+    // keys 0-10 preloaded into xmm2-xmm12
+    const int XMM_REG_NUM_KEY_FIRST = 2;
+    const int XMM_REG_NUM_KEY_LAST  = 12;
+    const XMMRegister xmm_key0   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+    const XMMRegister xmm_key10  = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+#ifdef _WIN64
+    // on win64, fill len_reg from stack position
+    __ movl(len_reg, len_mem);
+    // save the xmm registers which must be preserved 6-12
+    __ subptr(rsp, -rsp_after_call_off * wordSize);
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(xmm_save(i), as_XMMRegister(i));
+    }
+#endif
+
+    const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 2 thru 12 with key 0x00 - 0xa0
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    __ movdqu(xmm_result, Address(rvec, 0x00));   // initialize xmm_result with r vec
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+    // 128 bit code follows here
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_128);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    __ aesenclast(xmm_result, xmm_key10);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_128);
+
+    __ BIND(L_exit);
+    __ movdqu(Address(rvec, 0), xmm_result);     // final value of r stored in rvec of CipherBlockChaining object
+
+#ifdef _WIN64
+    // restore xmm regs belonging to calling function
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(as_XMMRegister(i), xmm_save(i));
+    }
+#endif
+    __ movl(rax, 0); // return 0 (why?)
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    __ BIND(L_key_192_256);
+    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_192);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0);
+    load_key(xmm_temp, key, 0xc0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_192);
+    __ jmp(L_exit);
+
+    __ BIND(L_key_256);
+    // 256-bit code follows here (could be changed to use more xmm registers)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loopTop_256);
+    __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
+    __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
+
+    __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      __ aesenc(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_enc_key(xmm_result, xmm_temp, key, 0xb0);
+    aes_enc_key(xmm_result, xmm_temp, key, 0xc0);
+    aes_enc_key(xmm_result, xmm_temp, key, 0xd0);
+    load_key(xmm_temp, key, 0xe0);
+    __ aesenclast(xmm_result, xmm_temp);
+
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual, L_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
+
+  // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
+  // to hide instruction latency
+  //
+  // Arguments:
+  //
+  // Inputs:
+  //   c_rarg0   - source byte array address
+  //   c_rarg1   - destination byte array address
+  //   c_rarg2   - K (key) in little endian int array
+  //   c_rarg3   - r vector byte array address
+  //   c_rarg4   - input length
+  //
+
+  address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
+    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
+    address start = __ pc();
+
+    Label L_exit, L_key_192_256, L_key_256;
+    Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128;
+    Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register key         = c_rarg2;  // key array address
+    const Register rvec        = c_rarg3;  // r byte array initialized from initvector array address
+                                           // and left with the results of the last encryption block
+#ifndef _WIN64
+    const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
+#else
+    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
+    const Register len_reg     = r10;      // pick the first volatile windows register
+#endif
+    const Register pos         = rax;
+
+    // xmm register assignments for the loops below
+    const XMMRegister xmm_result = xmm0;
+    // keys 0-10 preloaded into xmm2-xmm12
+    const int XMM_REG_NUM_KEY_FIRST = 5;
+    const int XMM_REG_NUM_KEY_LAST  = 15;
+    const XMMRegister xmm_key_first   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
+    const XMMRegister xmm_key_last  = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+#ifdef _WIN64
+    // on win64, fill len_reg from stack position
+    __ movl(len_reg, len_mem);
+    // save the xmm registers which must be preserved 6-15
+    __ subptr(rsp, -rsp_after_call_off * wordSize);
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(xmm_save(i), as_XMMRegister(i));
+    }
+#endif
+    // the java expanded key ordering is rotated one position from what we want
+    // so we start from 0x10 here and hit 0x00 last
+    const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
+    __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00
+    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
+      if (rnum == XMM_REG_NUM_KEY_LAST) offset = 0x00;
+      load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
+      offset += 0x10;
+    }
+
+    const XMMRegister xmm_prev_block_cipher = xmm1;  // holds cipher of previous block
+    // registers holding the four results in the parallelized loop
+    const XMMRegister xmm_result0 = xmm0;
+    const XMMRegister xmm_result1 = xmm2;
+    const XMMRegister xmm_result2 = xmm3;
+    const XMMRegister xmm_result3 = xmm4;
+
+    __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00));   // initialize with initial rvec
+
+    // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
+    __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+    __ cmpl(rax, 44);
+    __ jcc(Assembler::notEqual, L_key_192_256);
+
+
+    // 128-bit code follows here, parallelized
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_multiBlock_loopTop_128);
+    __ cmpptr(len_reg, 4*AESBlockSize);           // see if at least 4 blocks left
+    __ jcc(Assembler::less, L_singleBlock_loopTop_128);
+
+    __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize));   // get next 4 blocks into xmmresult registers
+    __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize));
+    __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize));
+    __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize));
+
+#define DoFour(opc, src_reg)                    \
+    __ opc(xmm_result0, src_reg);               \
+    __ opc(xmm_result1, src_reg);               \
+    __ opc(xmm_result2, src_reg);               \
+    __ opc(xmm_result3, src_reg);
+
+    DoFour(pxor, xmm_key_first);
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      DoFour(aesdec, as_XMMRegister(rnum));
+    }
+    DoFour(aesdeclast, xmm_key_last);
+    // for each result, xor with the r vector of previous cipher block
+    __ pxor(xmm_result0, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize));
+    __ pxor(xmm_result1, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize));
+    __ pxor(xmm_result2, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize));
+    __ pxor(xmm_result3, xmm_prev_block_cipher);
+    __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize));   // this will carry over to next set of blocks
+
+    __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0);     // store 4 results into the next 64 bytes of output
+    __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1);
+    __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2);
+    __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3);
+
+    __ addptr(pos, 4*AESBlockSize);
+    __ subptr(len_reg, 4*AESBlockSize);
+    __ jmp(L_multiBlock_loopTop_128);
+
+    // registers used in the non-parallelized loops
+    const XMMRegister xmm_prev_block_cipher_save = xmm2;
+    const XMMRegister xmm_temp   = xmm3;
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_128);
+    __ cmpptr(len_reg, 0);           // any blocks left??
+    __ jcc(Assembler::equal, L_exit);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
+    __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    __ aesdeclast(xmm_result, xmm_key_last);
+    __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
+
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jmp(L_singleBlock_loopTop_128);
+
+
+    __ BIND(L_exit);
+    __ movdqu(Address(rvec, 0), xmm_prev_block_cipher);     // final value of r stored in rvec of CipherBlockChaining object
+#ifdef _WIN64
+    // restore regs belonging to calling function
+    for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
+      __ movdqu(as_XMMRegister(i), xmm_save(i));
+    }
+#endif
+    __ movl(rax, 0); // return 0 (why?)
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+
+    __ BIND(L_key_192_256);
+    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
+    __ cmpl(rax, 52);
+    __ jcc(Assembler::notEqual, L_key_256);
+
+    // 192-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_192);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
+    __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0);     // 192-bit key goes up to c0
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0);
+    __ aesdeclast(xmm_result, xmm_key_last);                    // xmm15 always came from key+0
+    __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
+
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
+    __ jmp(L_exit);
+
+    __ BIND(L_key_256);
+    // 256-bit code follows here (could be optimized to use parallelism)
+    __ movptr(pos, 0);
+    __ align(OptoLoopAlignment);
+    __ BIND(L_singleBlock_loopTop_256);
+    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
+    __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
+    __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
+    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
+      __ aesdec(xmm_result, as_XMMRegister(rnum));
+    }
+    aes_dec_key(xmm_result, xmm_temp, key, 0xb0);     // 256-bit key goes up to e0
+    aes_dec_key(xmm_result, xmm_temp, key, 0xc0);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xd0);
+    aes_dec_key(xmm_result, xmm_temp, key, 0xe0);
+    __ aesdeclast(xmm_result, xmm_key_last);             // xmm15 came from key+0
+    __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
+    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
+    // no need to store r to memory until we exit
+    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
+
+    __ addptr(pos, AESBlockSize);
+    __ subptr(len_reg, AESBlockSize);
+    __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
+    __ jmp(L_exit);
+
+    return start;
+  }
+
+
+
 #undef __
 #define __ masm->
 
@@ -3135,6 +3677,16 @@
     generate_arraycopy_stubs();
 
     generate_math_stubs();
+
+    // don't bother generating these AES intrinsic stubs unless global flag is set
+    if (UseAESIntrinsics) {
+      StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask();  // needed by the others
+
+      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+      StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
+      StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
+    }
   }
 
  public:
--- a/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -44,3 +44,4 @@
 
 address StubRoutines::x86::_verify_mxcsr_entry         = NULL;
 address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
+address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -41,10 +41,14 @@
  private:
   static address _verify_mxcsr_entry;
   static address _verify_fpu_cntrl_wrd_entry;
+  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
+  static address _key_shuffle_mask_addr;
 
  public:
   static address verify_mxcsr_entry()                        { return _verify_mxcsr_entry; }
   static address verify_fpu_cntrl_wrd_entry()                { return _verify_fpu_cntrl_wrd_entry; }
+  static address key_shuffle_mask_addr()                     { return _key_shuffle_mask_addr; }
+
 };
 
   static bool    returns_to_call_stub(address return_pc)     { return return_pc == _call_stub_return_address; }
--- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -56,3 +56,4 @@
 address StubRoutines::x86::_double_sign_mask = NULL;
 address StubRoutines::x86::_double_sign_flip = NULL;
 address StubRoutines::x86::_mxcsr_std = NULL;
+address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -54,6 +54,8 @@
   static address _double_sign_mask;
   static address _double_sign_flip;
   static address _mxcsr_std;
+  // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
+  static address _key_shuffle_mask_addr;
 
  public:
 
@@ -116,6 +118,9 @@
   {
     return _mxcsr_std;
   }
+
+  static address key_shuffle_mask_addr()                     { return _key_shuffle_mask_addr; }
+
 };
 
 #endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -419,13 +419,16 @@
   if (UseAVX < 1)
     _cpuFeatures &= ~CPU_AVX;
 
+  if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
+    _cpuFeatures &= ~CPU_AES;
+
   if (logical_processors_per_package() == 1) {
     // HT processor could be installed on a system which doesn't support HT.
     _cpuFeatures &= ~CPU_HT;
   }
 
   char buf[256];
-  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                cores_per_cpu(), threads_per_core(),
                cpu_family(), _model, _stepping,
                (supports_cmov() ? ", cmov" : ""),
@@ -441,6 +444,7 @@
                (supports_popcnt() ? ", popcnt" : ""),
                (supports_avx()    ? ", avx" : ""),
                (supports_avx2()   ? ", avx2" : ""),
+               (supports_aes()    ? ", aes" : ""),
                (supports_mmx_ext() ? ", mmxext" : ""),
                (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
                (supports_lzcnt()   ? ", lzcnt": ""),
@@ -472,6 +476,29 @@
   if (!supports_avx ()) // Drop to 0 if no AVX  support
     UseAVX = 0;
 
+  // Use AES instructions if available.
+  if (supports_aes()) {
+    if (FLAG_IS_DEFAULT(UseAES)) {
+      UseAES = true;
+    }
+  } else if (UseAES) {
+    if (!FLAG_IS_DEFAULT(UseAES))
+      warning("AES instructions not available on this CPU");
+    FLAG_SET_DEFAULT(UseAES, false);
+  }
+
+  // The AES intrinsic stubs require AES instruction support (of course)
+  // but also require AVX mode for misaligned SSE access
+  if (UseAES && (UseAVX > 0)) {
+    if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+      UseAESIntrinsics = true;
+    }
+  } else if (UseAESIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
+      warning("AES intrinsics not available on this CPU");
+    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+  }
+
 #ifdef COMPILER2
   if (UseFPUForSpilling) {
     if (UseSSE < 2) {
@@ -714,6 +741,9 @@
     if (UseAVX > 0) {
       tty->print("  UseAVX=%d",UseAVX);
     }
+    if (UseAES) {
+      tty->print("  UseAES=1");
+    }
     tty->cr();
     tty->print("Allocation");
     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -78,7 +78,9 @@
                sse4_2   : 1,
                         : 2,
                popcnt   : 1,
-                        : 3,
+                        : 1,
+               aes      : 1,
+                        : 1,
                osxsave  : 1,
                avx      : 1,
                         : 3;
@@ -244,7 +246,8 @@
     CPU_TSC    = (1 << 15),
     CPU_TSCINV = (1 << 16),
     CPU_AVX    = (1 << 17),
-    CPU_AVX2   = (1 << 18)
+    CPU_AVX2   = (1 << 18),
+    CPU_AES    = (1 << 19)
   } cpuFeatureFlags;
 
   enum {
@@ -420,6 +423,8 @@
       result |= CPU_TSC;
     if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
       result |= CPU_TSCINV;
+    if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
+      result |= CPU_AES;
 
     // AMD features.
     if (is_amd()) {
@@ -544,6 +549,7 @@
   static bool supports_avx()      { return (_cpuFeatures & CPU_AVX) != 0; }
   static bool supports_avx2()     { return (_cpuFeatures & CPU_AVX2) != 0; }
   static bool supports_tsc()      { return (_cpuFeatures & CPU_TSC)    != 0; }
+  static bool supports_aes()      { return (_cpuFeatures & CPU_AES) != 0; }
 
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
--- a/src/cpu/x86/vm/x86.ad	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/x86/vm/x86.ad	Mon Nov 05 13:55:31 2012 -0800
@@ -4102,9 +4102,158 @@
 
 // ----------------------- LogicalRightShift -----------------------------------
 
-// Shorts/Chars vector logical right shift produces incorrect Java result
+// Shorts vector logical right shift produces incorrect Java result
 // for negative data because java code convert short value into int with
-// sign extension before a shift.
+// sign extension before a shift. But char vectors are fine since chars are
+// unsigned values.
+
+instruct vsrl2S(vecS dst, vecS shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl2S_imm(vecS dst, immI8 shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, (int)$shift$$constant);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl2S_reg(vecS dst, vecS src, vecS shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed2S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S(vecD dst, vecS shift) %{
+  predicate(n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S_imm(vecD dst, immI8 shift) %{
+  predicate(n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, (int)$shift$$constant);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S_reg(vecD dst, vecD src, vecS shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed4S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S(vecX dst, vecS shift) %{
+  predicate(n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S_imm(vecX dst, immI8 shift) %{
+  predicate(n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS dst shift));
+  format %{ "psrlw   $dst,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    __ psrlw($dst$$XMMRegister, (int)$shift$$constant);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S_reg(vecX dst, vecX src, vecS shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed8S" %}
+  ins_encode %{
+    bool vector256 = false;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl16S_reg(vecY dst, vecY src, vecS shift) %{
+  predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed16S" %}
+  ins_encode %{
+    bool vector256 = true;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vsrl16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
+  predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
+  match(Set dst (URShiftVS src shift));
+  format %{ "vpsrlw  $dst,$src,$shift\t! logical right shift packed16S" %}
+  ins_encode %{
+    bool vector256 = true;
+    __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
+  %}
+  ins_pipe( pipe_slow );
+%}
 
 // Integers vector logical right shift
 instruct vsrl2I(vecD dst, vecS shift) %{
--- a/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -31,12 +31,17 @@
     return _masm;
   }
 
- protected:
-  address generate_entry(address entry_point) {
-    ZeroEntry *entry = (ZeroEntry *) assembler()->pc();
-    assembler()->advance(sizeof(ZeroEntry));
+ public:
+  static address generate_entry_impl(MacroAssembler* masm, address entry_point) {
+    ZeroEntry *entry = (ZeroEntry *) masm->pc();
+    masm->advance(sizeof(ZeroEntry));
     entry->set_entry_point(entry_point);
     return (address) entry;
   }
 
+ protected:
+  address generate_entry(address entry_point) {
+        return generate_entry_impl(assembler(), entry_point);
+  }
+
 #endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -180,25 +180,6 @@
         method, istate->osr_entry(), istate->osr_buf(), THREAD);
       return;
     }
-    else if (istate->msg() == BytecodeInterpreter::call_method_handle) {
-      oop method_handle = istate->callee();
-
-      // Trim back the stack to put the parameters at the top
-      stack->set_sp(istate->stack() + 1);
-
-      // Make the call
-      process_method_handle(method_handle, THREAD);
-      fixup_after_potential_safepoint();
-
-      // Convert the result
-      istate->set_stack(stack->sp() - 1);
-
-      // Restore the stack
-      stack->set_sp(istate->stack_limit() + 1);
-
-      // Resume the interpreter
-      istate->set_msg(BytecodeInterpreter::method_resume);
-    }
     else {
       ShouldNotReachHere();
     }
@@ -535,35 +516,35 @@
   if (entry->is_volatile()) {
     switch (entry->flag_state()) {
     case ctos:
-      SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->char_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case btos:
-      SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->byte_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case stos:
-      SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->short_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case itos:
-      SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0);
+      SET_LOCALS_INT(object->int_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case ltos:
-      SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0);
+      SET_LOCALS_LONG(object->long_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case ftos:
-      SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0);
+      SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case dtos:
-      SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0);
+      SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2_as_index()), 0);
       break;
 
     case atos:
-      SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0);
+      SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2_as_index()), 0);
       break;
 
     default:
@@ -573,35 +554,35 @@
   else {
     switch (entry->flag_state()) {
     case ctos:
-      SET_LOCALS_INT(object->char_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->char_field(entry->f2_as_index()), 0);
       break;
 
     case btos:
-      SET_LOCALS_INT(object->byte_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->byte_field(entry->f2_as_index()), 0);
       break;
 
     case stos:
-      SET_LOCALS_INT(object->short_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->short_field(entry->f2_as_index()), 0);
       break;
 
     case itos:
-      SET_LOCALS_INT(object->int_field(entry->f2()), 0);
+      SET_LOCALS_INT(object->int_field(entry->f2_as_index()), 0);
       break;
 
     case ltos:
-      SET_LOCALS_LONG(object->long_field(entry->f2()), 0);
+      SET_LOCALS_LONG(object->long_field(entry->f2_as_index()), 0);
       break;
 
     case ftos:
-      SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0);
+      SET_LOCALS_FLOAT(object->float_field(entry->f2_as_index()), 0);
       break;
 
     case dtos:
-      SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0);
+      SET_LOCALS_DOUBLE(object->double_field(entry->f2_as_index()), 0);
       break;
 
     case atos:
-      SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0);
+      SET_LOCALS_OBJECT(object->obj_field(entry->f2_as_index()), 0);
       break;
 
     default:
@@ -629,516 +610,6 @@
   return 0;
 }
 
-int CppInterpreter::method_handle_entry(Method* method,
-                                        intptr_t UNUSED, TRAPS) {
-  JavaThread *thread = (JavaThread *) THREAD;
-  ZeroStack *stack = thread->zero_stack();
-  int argument_slots = method->size_of_parameters();
-  int result_slots = type2size[result_type_of(method)];
-  intptr_t *vmslots = stack->sp();
-  intptr_t *unwind_sp = vmslots + argument_slots;
-
-  // Find the MethodType
-  address p = (address) method;
-  for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) {
-    p = *(address*)(p + (*pc));
-  }
-  oop method_type = (oop) p;
-
-  // The MethodHandle is in the slot after the arguments
-  int num_vmslots = argument_slots - 1;
-  oop method_handle = VMSLOTS_OBJECT(num_vmslots);
-
-  // InvokeGeneric requires some extra shuffling
-  oop mhtype = java_lang_invoke_MethodHandle::type(method_handle);
-  bool is_exact = mhtype == method_type;
-  if (!is_exact) {
-    if (true || // FIXME
-        method->intrinsic_id() == vmIntrinsics::_invokeExact) {
-      CALL_VM_NOCHECK_NOFIX(
-        SharedRuntime::throw_WrongMethodTypeException(
-          thread, method_type, mhtype));
-      // NB all oops trashed!
-      assert(HAS_PENDING_EXCEPTION, "should do");
-      stack->set_sp(unwind_sp);
-      return 0;
-    }
-    assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be");
-
-    // Load up an adapter from the calling type
-    // NB the x86 code for this (in methodHandles_x86.cpp, search for
-    // "genericInvoker") is really really odd.  I'm hoping it's trying
-    // to accomodate odd VM/class library combinations I can ignore.
-    oop adapter = NULL; //FIXME: load the adapter from the CP cache
-    IF (adapter == NULL) {
-      CALL_VM_NOCHECK_NOFIX(
-        SharedRuntime::throw_WrongMethodTypeException(
-          thread, method_type, mhtype));
-      // NB all oops trashed!
-      assert(HAS_PENDING_EXCEPTION, "should do");
-      stack->set_sp(unwind_sp);
-      return 0;
-    }
-
-    // Adapters are shared among form-families of method-type.  The
-    // type being called is passed as a trusted first argument so that
-    // the adapter knows the actual types of its arguments and return
-    // values.
-    insert_vmslots(num_vmslots + 1, 1, THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      // NB all oops trashed!
-      stack->set_sp(unwind_sp);
-      return 0;
-    }
-
-    vmslots = stack->sp();
-    num_vmslots++;
-    SET_VMSLOTS_OBJECT(method_type, num_vmslots);
-
-    method_handle = adapter;
-  }
-
-  // Start processing
-  process_method_handle(method_handle, THREAD);
-  if (HAS_PENDING_EXCEPTION)
-    result_slots = 0;
-
-  // If this is an invokeExact then the eventual callee will not
-  // have unwound the method handle argument so we have to do it.
-  // If a result is being returned the it will be above the method
-  // handle argument we're unwinding.
-  if (is_exact) {
-    intptr_t result[2];
-    for (int i = 0; i < result_slots; i++)
-      result[i] = stack->pop();
-    stack->pop();
-    for (int i = result_slots - 1; i >= 0; i--)
-      stack->push(result[i]);
-  }
-
-  // Check
-  assert(stack->sp() == unwind_sp - result_slots, "should be");
-
-  // No deoptimized frames on the stack
-  return 0;
-}
-
-void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
-  JavaThread *thread = (JavaThread *) THREAD;
-  ZeroStack *stack = thread->zero_stack();
-  intptr_t *vmslots = stack->sp();
-
-  bool direct_to_method = false;
-  BasicType src_rtype = T_ILLEGAL;
-  BasicType dst_rtype = T_ILLEGAL;
-
-  MethodHandleEntry *entry =
-    java_lang_invoke_MethodHandle::vmentry(method_handle);
-  MethodHandles::EntryKind entry_kind =
-    (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff);
-
-  Method* method = NULL;
-  switch (entry_kind) {
-  case MethodHandles::_invokestatic_mh:
-    direct_to_method = true;
-    break;
-
-  case MethodHandles::_invokespecial_mh:
-  case MethodHandles::_invokevirtual_mh:
-  case MethodHandles::_invokeinterface_mh:
-    {
-      oop receiver =
-        VMSLOTS_OBJECT(
-          java_lang_invoke_MethodHandle::vmslots(method_handle) - 1);
-      if (receiver == NULL) {
-          stack->set_sp(calculate_unwind_sp(stack, method_handle));
-          CALL_VM_NOCHECK_NOFIX(
-            throw_exception(
-              thread, vmSymbols::java_lang_NullPointerException()));
-          // NB all oops trashed!
-          assert(HAS_PENDING_EXCEPTION, "should do");
-          return;
-      }
-      if (entry_kind != MethodHandles::_invokespecial_mh) {
-        intptr_t index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
-        InstanceKlass* rcvrKlass =
-          (InstanceKlass *) receiver->klass();
-        if (entry_kind == MethodHandles::_invokevirtual_mh) {
-          method = (Method*) rcvrKlass->start_of_vtable()[index];
-        }
-        else {
-          oop iclass = java_lang_invoke_MethodHandle::next_target(method_handle);
-          itableOffsetEntry* ki =
-            (itableOffsetEntry *) rcvrKlass->start_of_itable();
-          int i, length = rcvrKlass->itable_length();
-          for (i = 0; i < length; i++, ki++ ) {
-            if (ki->interface_klass() == iclass)
-              break;
-          }
-          if (i == length) {
-            stack->set_sp(calculate_unwind_sp(stack, method_handle));
-            CALL_VM_NOCHECK_NOFIX(
-              throw_exception(
-                thread, vmSymbols::java_lang_IncompatibleClassChangeError()));
-            // NB all oops trashed!
-            assert(HAS_PENDING_EXCEPTION, "should do");
-            return;
-          }
-          itableMethodEntry* im = ki->first_method_entry(receiver->klass());
-          method = im[index].method();
-          if (method == NULL) {
-            stack->set_sp(calculate_unwind_sp(stack, method_handle));
-            CALL_VM_NOCHECK_NOFIX(
-              throw_exception(
-                thread, vmSymbols::java_lang_AbstractMethodError()));
-            // NB all oops trashed!
-            assert(HAS_PENDING_EXCEPTION, "should do");
-            return;
-          }
-        }
-      }
-    }
-    direct_to_method = true;
-    break;
-
-  case MethodHandles::_bound_ref_direct_mh:
-  case MethodHandles::_bound_int_direct_mh:
-  case MethodHandles::_bound_long_direct_mh:
-    direct_to_method = true;
-    // fall through
-  case MethodHandles::_bound_ref_mh:
-  case MethodHandles::_bound_int_mh:
-  case MethodHandles::_bound_long_mh:
-    {
-      BasicType arg_type  = T_ILLEGAL;
-      int       arg_mask  = -1;
-      int       arg_slots = -1;
-      MethodHandles::get_ek_bound_mh_info(
-        entry_kind, arg_type, arg_mask, arg_slots);
-      int arg_slot =
-        java_lang_invoke_BoundMethodHandle::vmargslot(method_handle);
-
-      // Create the new slot(s)
-      intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-      insert_vmslots(arg_slot, arg_slots, THREAD);
-      if (HAS_PENDING_EXCEPTION) {
-        // all oops trashed
-        stack->set_sp(unwind_sp);
-        return;
-      }
-      vmslots = stack->sp();
-
-      // Store bound argument into new stack slot
-      oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle);
-      if (arg_type == T_OBJECT) {
-        assert(arg_slots == 1, "should be");
-        SET_VMSLOTS_OBJECT(arg, arg_slot);
-      }
-      else {
-        jvalue arg_value;
-        arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
-        switch (arg_type) {
-        case T_BOOLEAN:
-          SET_VMSLOTS_INT(arg_value.z, arg_slot);
-          break;
-        case T_CHAR:
-          SET_VMSLOTS_INT(arg_value.c, arg_slot);
-          break;
-        case T_BYTE:
-          SET_VMSLOTS_INT(arg_value.b, arg_slot);
-          break;
-        case T_SHORT:
-          SET_VMSLOTS_INT(arg_value.s, arg_slot);
-          break;
-        case T_INT:
-          SET_VMSLOTS_INT(arg_value.i, arg_slot);
-          break;
-        case T_FLOAT:
-          SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
-          break;
-        case T_LONG:
-          SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1);
-          break;
-        case T_DOUBLE:
-          SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1);
-          break;
-        default:
-          tty->print_cr("unhandled type %s", type2name(arg_type));
-          ShouldNotReachHere();
-        }
-      }
-    }
-    break;
-
-  case MethodHandles::_adapter_retype_only:
-  case MethodHandles::_adapter_retype_raw:
-    src_rtype = result_type_of_handle(
-      java_lang_invoke_MethodHandle::next_target(method_handle));
-    dst_rtype = result_type_of_handle(method_handle);
-    break;
-
-  case MethodHandles::_adapter_check_cast:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      oop arg = VMSLOTS_OBJECT(arg_slot);
-      if (arg != NULL) {
-        Klass* objKlassOop = arg->klass();
-        Klass* klassOf = java_lang_Class::as_Klass(
-          java_lang_invoke_AdapterMethodHandle::argument(method_handle));
-
-        if (objKlassOop != klassOf &&
-            !objKlassOop->is_subtype_of(klassOf)) {
-          ResourceMark rm(THREAD);
-          const char* objName = Klass::cast(objKlassOop)->external_name();
-          const char* klassName = Klass::cast(klassOf)->external_name();
-          char* message = SharedRuntime::generate_class_cast_message(
-            objName, klassName);
-
-          stack->set_sp(calculate_unwind_sp(stack, method_handle));
-          CALL_VM_NOCHECK_NOFIX(
-            throw_exception(
-              thread, vmSymbols::java_lang_ClassCastException(), message));
-          // NB all oops trashed!
-          assert(HAS_PENDING_EXCEPTION, "should do");
-          return;
-        }
-      }
-    }
-    break;
-
-  case MethodHandles::_adapter_dup_args:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int conv =
-        java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
-      int num_slots = -MethodHandles::adapter_conversion_stack_move(conv);
-      assert(num_slots > 0, "should be");
-
-      // Create the new slot(s)
-      intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-      stack->overflow_check(num_slots, THREAD);
-      if (HAS_PENDING_EXCEPTION) {
-        // all oops trashed
-        stack->set_sp(unwind_sp);
-        return;
-      }
-
-      // Duplicate the arguments
-      for (int i = num_slots - 1; i >= 0; i--)
-        stack->push(*VMSLOTS_SLOT(arg_slot + i));
-
-      vmslots = stack->sp(); // unused, but let the compiler figure that out
-    }
-    break;
-
-  case MethodHandles::_adapter_drop_args:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int conv =
-        java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
-      int num_slots = MethodHandles::adapter_conversion_stack_move(conv);
-      assert(num_slots > 0, "should be");
-
-      remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap
-      vmslots = stack->sp(); // unused, but let the compiler figure that out
-    }
-    break;
-
-  case MethodHandles::_adapter_opt_swap_1:
-  case MethodHandles::_adapter_opt_swap_2:
-  case MethodHandles::_adapter_opt_rot_1_up:
-  case MethodHandles::_adapter_opt_rot_1_down:
-  case MethodHandles::_adapter_opt_rot_2_up:
-  case MethodHandles::_adapter_opt_rot_2_down:
-    {
-      int arg1 =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int conv =
-        java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
-      int arg2 = MethodHandles::adapter_conversion_vminfo(conv);
-
-      int swap_bytes = 0, rotate = 0;
-      MethodHandles::get_ek_adapter_opt_swap_rot_info(
-        entry_kind, swap_bytes, rotate);
-      int swap_slots = swap_bytes >> LogBytesPerWord;
-
-      intptr_t tmp;
-      switch (rotate) {
-      case 0: // swap
-        for (int i = 0; i < swap_slots; i++) {
-          tmp = *VMSLOTS_SLOT(arg1 + i);
-          SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i);
-          SET_VMSLOTS_SLOT(&tmp, arg2 + i);
-        }
-        break;
-
-      case 1: // up
-        assert(arg1 - swap_slots > arg2, "should be");
-
-        tmp = *VMSLOTS_SLOT(arg1);
-        for (int i = arg1 - swap_slots; i >= arg2; i--)
-          SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots);
-        SET_VMSLOTS_SLOT(&tmp, arg2);
-
-        break;
-
-      case -1: // down
-        assert(arg2 - swap_slots > arg1, "should be");
-
-        tmp = *VMSLOTS_SLOT(arg1);
-        for (int i = arg1 + swap_slots; i <= arg2; i++)
-          SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots);
-        SET_VMSLOTS_SLOT(&tmp, arg2);
-        break;
-
-      default:
-        ShouldNotReachHere();
-      }
-    }
-    break;
-
-  case MethodHandles::_adapter_opt_i2l:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      int arg = VMSLOTS_INT(arg_slot);
-      intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-      insert_vmslots(arg_slot, 1, THREAD);
-      if (HAS_PENDING_EXCEPTION) {
-        // all oops trashed
-        stack->set_sp(unwind_sp);
-        return;
-      }
-      vmslots = stack->sp();
-      arg_slot++;
-      SET_VMSLOTS_LONG(arg, arg_slot);
-    }
-    break;
-
-  case MethodHandles::_adapter_opt_unboxi:
-  case MethodHandles::_adapter_opt_unboxl:
-    {
-      int arg_slot =
-        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
-      oop arg = VMSLOTS_OBJECT(arg_slot);
-      jvalue arg_value;
-      if (arg == NULL) {
-        // queue a nullpointer exception for the caller
-        stack->set_sp(calculate_unwind_sp(stack, method_handle));
-        CALL_VM_NOCHECK_NOFIX(
-          throw_exception(
-            thread, vmSymbols::java_lang_NullPointerException()));
-        // NB all oops trashed!
-        assert(HAS_PENDING_EXCEPTION, "should do");
-        return;
-      }
-      BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
-      if (arg_type == T_LONG || arg_type == T_DOUBLE) {
-        intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
-        insert_vmslots(arg_slot, 1, THREAD);
-        if (HAS_PENDING_EXCEPTION) {
-          // all oops trashed
-          stack->set_sp(unwind_sp);
-          return;
-        }
-        vmslots = stack->sp();
-        arg_slot++;
-      }
-      switch (arg_type) {
-      case T_BOOLEAN:
-        SET_VMSLOTS_INT(arg_value.z, arg_slot);
-        break;
-      case T_CHAR:
-        SET_VMSLOTS_INT(arg_value.c, arg_slot);
-        break;
-      case T_BYTE:
-        SET_VMSLOTS_INT(arg_value.b, arg_slot);
-        break;
-      case T_SHORT:
-        SET_VMSLOTS_INT(arg_value.s, arg_slot);
-        break;
-      case T_INT:
-        SET_VMSLOTS_INT(arg_value.i, arg_slot);
-        break;
-      case T_FLOAT:
-        SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
-        break;
-      case T_LONG:
-        SET_VMSLOTS_LONG(arg_value.j, arg_slot);
-        break;
-      case T_DOUBLE:
-        SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot);
-        break;
-      default:
-        tty->print_cr("unhandled type %s", type2name(arg_type));
-        ShouldNotReachHere();
-      }
-    }
-    break;
-
-  default:
-    tty->print_cr("unhandled entry_kind %s",
-                  MethodHandles::entry_name(entry_kind));
-    ShouldNotReachHere();
-  }
-
-  // Continue along the chain
-  if (direct_to_method) {
-    if (method == NULL) {
-      method =
-        (Method*) java_lang_invoke_MethodHandle::vmtarget(method_handle);
-    }
-    address entry_point = method->from_interpreted_entry();
-    Interpreter::invoke_method(method, entry_point, THREAD);
-  }
-  else {
-    process_method_handle(
-      java_lang_invoke_MethodHandle::next_target(method_handle), THREAD);
-  }
-  // NB all oops now trashed
-
-  // Adapt the result type, if necessary
-  if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) {
-    switch (dst_rtype) {
-    case T_VOID:
-      for (int i = 0; i < type2size[src_rtype]; i++)
-        stack->pop();
-      return;
-
-    case T_INT:
-      switch (src_rtype) {
-      case T_VOID:
-        stack->overflow_check(1, CHECK);
-        stack->push(0);
-        return;
-
-      case T_BOOLEAN:
-      case T_CHAR:
-      case T_BYTE:
-      case T_SHORT:
-        return;
-      }
-      // INT results sometimes need narrowing
-    case T_BOOLEAN:
-    case T_CHAR:
-    case T_BYTE:
-    case T_SHORT:
-      switch (src_rtype) {
-      case T_INT:
-        return;
-      }
-    }
-
-    tty->print_cr("unhandled conversion:");
-    tty->print_cr("src_rtype = %s", type2name(src_rtype));
-    tty->print_cr("dst_rtype = %s", type2name(dst_rtype));
-    ShouldNotReachHere();
-  }
-}
-
 // The new slots will be inserted before slot insert_before.
 // Slots < insert_before will have the same slot number after the insert.
 // Slots >= insert_before will become old_slot + num_slots.
@@ -1380,10 +851,6 @@
     entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
     break;
 
-  case Interpreter::method_handle:
-    entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();
-    break;
-
   case Interpreter::java_lang_math_sin:
   case Interpreter::java_lang_math_cos:
   case Interpreter::java_lang_math_tan:
@@ -1391,6 +858,8 @@
   case Interpreter::java_lang_math_log:
   case Interpreter::java_lang_math_log10:
   case Interpreter::java_lang_math_sqrt:
+  case Interpreter::java_lang_math_pow:
+  case Interpreter::java_lang_math_exp:
     entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
     break;
 
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -36,7 +36,6 @@
   static int native_entry(Method* method, intptr_t UNUSED, TRAPS);
   static int accessor_entry(Method* method, intptr_t UNUSED, TRAPS);
   static int empty_entry(Method* method, intptr_t UNUSED, TRAPS);
-  static int method_handle_entry(Method* method, intptr_t UNUSED, TRAPS);
 
  public:
   // Main loop of normal_entry
@@ -44,7 +43,6 @@
 
  private:
   // Helpers for method_handle_entry
-  static void process_method_handle(oop method_handle, TRAPS);
   static void insert_vmslots(int insert_before, int num_slots, TRAPS);
   static void remove_vmslots(int first_slot, int num_slots, TRAPS);
   static BasicType result_type_of_handle(oop method_handle);
--- a/src/cpu/zero/vm/frame_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/frame_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -351,7 +351,7 @@
   switch (offset) {
   case pc_off:
     strncpy(fieldbuf, "pc", buflen);
-    if (method()->is_oop()) {
+    if (method()->is_method()) {
       nmethod *code = method()->code();
       if (code && code->pc_desc_at(pc())) {
         SimpleScopeDesc ssd(code, pc());
@@ -367,7 +367,7 @@
 
   case method_off:
     strncpy(fieldbuf, "method", buflen);
-    if (method()->is_oop()) {
+    if (method()->is_method()) {
       method()->name_and_sig_as_C_string(valuebuf, buflen);
     }
     return;
@@ -378,7 +378,7 @@
   }
 
   // Variable part
-  if (method()->is_oop()) {
+  if (method()->is_method()) {
     identify_vp_word(frame_index, addr_of_word(offset),
                      addr_of_word(header_words + 1),
                      unextended_sp() + method()->max_stack(),
@@ -430,4 +430,3 @@
   // unused... but returns fp() to minimize changes introduced by 7087445
   return fp();
 }
-
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -36,6 +36,8 @@
   _deopt_state = unknown;
 }
 
+inline address  frame::sender_pc()           const { ShouldNotCallThis();  }
+
 inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
   _zeroframe = zf;
   _sp = sp;
--- a/src/cpu/zero/vm/icBuffer_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/icBuffer_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -40,7 +40,7 @@
 }
 
 void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
-                                                Metadata* cached_oop,
+                                                void* cached_oop,
                                                 address entry_point) {
   // NB ic_stub_code_size() must return the size of the code we generate
   ShouldNotCallThis();
@@ -51,7 +51,6 @@
   ShouldNotCallThis();
 }
 
-Metadata* InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) {
-  // NB ic_stub_code_size() must return the size of the code we generate
+void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
   ShouldNotCallThis();
 }
--- a/src/cpu/zero/vm/methodHandles_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/methodHandles_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -24,26 +24,159 @@
  */
 
 #include "precompiled.hpp"
+#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
 
-int MethodHandles::adapter_conversion_ops_supported_mask() {
-  return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
-         |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
-         //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
-         );
-  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+void MethodHandles::invoke_target(Method* method, TRAPS) {
+
+  JavaThread *thread = (JavaThread *) THREAD;
+  ZeroStack *stack = thread->zero_stack();
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+
+  // Trim back the stack to put the parameters at the top
+  stack->set_sp(istate->stack() + 1);
+
+  Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD);
+
+  // Convert the result
+  istate->set_stack(stack->sp() - 1);
+
 }
 
-void MethodHandles::generate_method_handle_stub(MacroAssembler*          masm,
-                                                MethodHandles::EntryKind ek) {
-  init_entry(ek, (MethodHandleEntry *) ek);
+oop MethodHandles::popFromStack(TRAPS) {
+
+  JavaThread *thread = (JavaThread *) THREAD;
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+  intptr_t* topOfStack = istate->stack();
+
+  oop top = STACK_OBJECT(-1);
+  MORE_STACK(-1);
+  istate->set_stack(topOfStack);
+
+  return top;
+
 }
+
+int MethodHandles::method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS) {
+
+  JavaThread *thread = (JavaThread *) THREAD;
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+  intptr_t* topOfStack = istate->stack();
+
+  // 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget.
+  int numArgs = method->size_of_parameters();
+  oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form
+  oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1);
+  Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmEntry1);
+
+  invoke_target(vmtarget, THREAD);
+
+  // No deoptimized frames on the stack
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS) {
+
+  // Pop appendix argument from stack. This is a MemberName which we resolve to the
+  // target method.
+  oop vmentry = popFromStack(THREAD);
+
+  Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+
+  invoke_target(vmtarget, THREAD);
+
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+
+  // Pop appendix argument from stack. This is a MemberName which we resolve to the
+  // target method.
+  oop vmentry = popFromStack(THREAD);
+  intptr_t* topOfStack = istate->stack();
+
+  // Resolve target method by looking up in the receiver object's itable.
+  Klass* clazz = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(vmentry));
+  intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
+  Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+
+  int numArgs = target->size_of_parameters();
+  oop recv = STACK_OBJECT(-numArgs);
+
+  InstanceKlass* klass_part = InstanceKlass::cast(recv->klass());
+  itableOffsetEntry* ki = (itableOffsetEntry*) klass_part->start_of_itable();
+  int i;
+  for ( i = 0 ; i < klass_part->itable_length() ; i++, ki++ ) {
+    if (ki->interface_klass() == clazz) break;
+  }
+
+  itableMethodEntry* im = ki->first_method_entry(recv->klass());
+  Method* vmtarget = im[vmindex].method();
+
+  invoke_target(vmtarget, THREAD);
+
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+
+  InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+  interpreterState istate = frame->interpreter_state();
+
+  // Pop appendix argument from stack. This is a MemberName which we resolve to the
+  // target method.
+  oop vmentry = popFromStack(THREAD);
+  intptr_t* topOfStack = istate->stack();
+
+  // Resolve target method by looking up in the receiver object's vtable.
+  intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
+  Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+  int numArgs = target->size_of_parameters();
+  oop recv = STACK_OBJECT(-numArgs);
+  Klass* clazz = recv->klass();
+  Klass* klass_part = InstanceKlass::cast(clazz);
+  klassVtable* vtable = klass_part->vtable();
+  Method* vmtarget = vtable->method_at(vmindex);
+
+  invoke_target(vmtarget, THREAD);
+
+  return 0;
+}
+
+int MethodHandles::method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS) {
+  ShouldNotReachHere();
+  return 0;
+}
+
+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm,
+                                                                vmIntrinsics::ID iid) {
+  switch (iid) {
+  case vmIntrinsics::_invokeGeneric:
+  case vmIntrinsics::_compiledLambdaForm:
+    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
+    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
+    // They all allow an appendix argument.
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid);
+  case vmIntrinsics::_invokeBasic:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic);
+  case vmIntrinsics::_linkToStatic:
+  case vmIntrinsics::_linkToSpecial:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial);
+  case vmIntrinsics::_linkToInterface:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface);
+  case vmIntrinsics::_linkToVirtual:
+    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual);
+  default:
+    ShouldNotReachHere();
+    return NULL;
+  }
+}
--- a/src/cpu/zero/vm/methodHandles_zero.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/methodHandles_zero.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -26,6 +26,14 @@
 
 // Adapters
 enum /* platform_dependent_constants */ {
-  adapter_code_size = 0
+  adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1)
 };
 
+private:
+  static oop popFromStack(TRAPS);
+  static void invoke_target(Method* method, TRAPS);
+  static int method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS);
+  static int method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS);
--- a/src/cpu/zero/vm/register_zero.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/register_zero.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -114,5 +114,8 @@
 };
 
 CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
+#ifndef DONT_USE_REGISTER_DEFINES
+#define noreg ((Register)(noreg_RegisterEnumValue))
+#endif
 
 #endif // CPU_ZERO_VM_REGISTER_ZERO_HPP
--- a/src/cpu/zero/vm/relocInfo_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/relocInfo_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -77,3 +77,7 @@
                                                        CodeBuffer*       dst) {
   ShouldNotCallThis();
 }
+
+void metadata_Relocation::pd_fix_value(address x) {
+  ShouldNotCallThis();
+}
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -35,6 +35,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "vmreg_zero.inline.hpp"
+
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif
@@ -47,6 +48,12 @@
 #endif
 
 
+
+static address zero_null_code_stub() {
+  address start = ShouldNotCallThisStub();
+  return start;
+}
+
 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
                                            VMRegPair *regs,
                                            int total_args_passed,
@@ -63,16 +70,14 @@
                         AdapterFingerPrint *fingerprint) {
   return AdapterHandlerLibrary::new_entry(
     fingerprint,
-    ShouldNotCallThisStub(),
-    ShouldNotCallThisStub(),
-    ShouldNotCallThisStub());
+    CAST_FROM_FN_PTR(address,zero_null_code_stub),
+    CAST_FROM_FN_PTR(address,zero_null_code_stub),
+    CAST_FROM_FN_PTR(address,zero_null_code_stub));
 }
 
 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
                                                 methodHandle method,
                                                 int compile_id,
-                                                int total_args_passed,
-                                                int max_arg,
                                                 BasicType *sig_bt,
                                                 VMRegPair *regs,
                                                 BasicType ret_type) {
@@ -96,19 +101,20 @@
   ShouldNotCallThis();
 }
 
+JRT_LEAF(void, zero_stub())
+  ShouldNotCallThis();
+JRT_END
+
 static RuntimeStub* generate_empty_runtime_stub(const char* name) {
-  CodeBuffer buffer(name, 0, 0);
-  return RuntimeStub::new_runtime_stub(name, &buffer, 0, 0, NULL, false);
+  return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub);
 }
 
 static SafepointBlob* generate_empty_safepoint_blob() {
-  CodeBuffer buffer("handler_blob", 0, 0);
-  return SafepointBlob::create(&buffer, NULL, 0);
+  return CAST_FROM_FN_PTR(SafepointBlob*,zero_stub);
 }
 
 static DeoptimizationBlob* generate_empty_deopt_blob() {
-  CodeBuffer buffer("handler_blob", 0, 0);
-  return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0);
+  return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub);
 }
 
 
@@ -116,7 +122,7 @@
   _deopt_blob = generate_empty_deopt_blob();
 }
 
-SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
   return generate_empty_safepoint_blob();
 }
 
@@ -124,6 +130,7 @@
   return generate_empty_runtime_stub("resolve_blob");
 }
 
+
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
                                          int total_args_passed) {
--- a/src/os/bsd/vm/attachListener_bsd.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os/bsd/vm/attachListener_bsd.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -342,7 +342,6 @@
 
     // get the credentials of the peer and check the effective uid/guid
     // - check with jeff on this.
-#ifdef _ALLBSD_SOURCE
     uid_t puid;
     gid_t pgid;
     if (::getpeereid(s, &puid, &pgid) != 0) {
@@ -350,17 +349,6 @@
       RESTARTABLE(::close(s), res);
       continue;
     }
-#else
-    struct ucred cred_info;
-    socklen_t optlen = sizeof(cred_info);
-    if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
-      int res;
-      RESTARTABLE(::close(s), res);
-      continue;
-    }
-    uid_t puid = cred_info.uid;
-    gid_t pgid = cred_info.gid;
-#endif
     uid_t euid = geteuid();
     gid_t egid = getegid();
 
--- a/src/os/bsd/vm/osThread_bsd.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os/bsd/vm/osThread_bsd.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -39,18 +39,12 @@
 
  private:
 
-#ifdef _ALLBSD_SOURCE
-
 #ifdef __APPLE__
   typedef thread_t thread_id_t;
 #else
   typedef pthread_t thread_id_t;
 #endif
 
-#else
-  typedef pid_t thread_id_t;
-#endif
-
   // _pthread_id is the pthread id, which is used by library calls
   // (e.g. pthread_kill).
   pthread_t _pthread_id;
--- a/src/os/bsd/vm/os_bsd.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os/bsd/vm/os_bsd.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -108,14 +108,8 @@
 # include <semaphore.h>
 # include <fcntl.h>
 # include <string.h>
-#ifdef _ALLBSD_SOURCE
 # include <sys/param.h>
 # include <sys/sysctl.h>
-#else
-# include <syscall.h>
-# include <sys/sysinfo.h>
-# include <gnu/libc-version.h>
-#endif
 # include <sys/ipc.h>
 # include <sys/shm.h>
 #ifndef __APPLE__
@@ -150,25 +144,10 @@
 // global variables
 julong os::Bsd::_physical_memory = 0;
 
-#ifndef _ALLBSD_SOURCE
-address   os::Bsd::_initial_thread_stack_bottom = NULL;
-uintptr_t os::Bsd::_initial_thread_stack_size   = 0;
-#endif
 
 int (*os::Bsd::_clock_gettime)(clockid_t, struct timespec *) = NULL;
-#ifndef _ALLBSD_SOURCE
-int (*os::Bsd::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
-Mutex* os::Bsd::_createThread_lock = NULL;
-#endif
 pthread_t os::Bsd::_main_thread;
 int os::Bsd::_page_size = -1;
-#ifndef _ALLBSD_SOURCE
-bool os::Bsd::_is_floating_stack = false;
-bool os::Bsd::_is_NPTL = false;
-bool os::Bsd::_supports_fast_thread_cpu_time = false;
-const char * os::Bsd::_glibc_version = NULL;
-const char * os::Bsd::_libpthread_version = NULL;
-#endif
 
 static jlong initial_time_count=0;
 
@@ -176,7 +155,7 @@
 
 // For diagnostics to print a message once. see run_periodic_checks
 static sigset_t check_signal_done;
-static bool check_signals = true;;
+static bool check_signals = true;
 
 static pid_t _initial_pid = 0;
 
@@ -198,16 +177,8 @@
 }
 
 julong os::Bsd::available_memory() {
-#ifdef _ALLBSD_SOURCE
   // XXXBSD: this is just a stopgap implementation
   return physical_memory() >> 2;
-#else
-  // values in struct sysinfo are "unsigned long"
-  struct sysinfo si;
-  sysinfo(&si);
-
-  return (julong)si.freeram * si.mem_unit;
-#endif
 }
 
 julong os::physical_memory() {
@@ -255,22 +226,6 @@
 }
 
 
-#ifndef _ALLBSD_SOURCE
-#ifndef SYS_gettid
-// i386: 224, ia64: 1105, amd64: 186, sparc 143
-#ifdef __ia64__
-#define SYS_gettid 1105
-#elif __i386__
-#define SYS_gettid 224
-#elif __amd64__
-#define SYS_gettid 186
-#elif __sparc__
-#define SYS_gettid 143
-#else
-#error define gettid for the arch
-#endif
-#endif
-#endif
 
 // Cpu architecture string
 #if   defined(ZERO)
@@ -302,36 +257,7 @@
 #define COMPILER_VARIANT "client"
 #endif
 
-#ifndef _ALLBSD_SOURCE
-// pid_t gettid()
-//
-// Returns the kernel thread id of the currently running thread. Kernel
-// thread id is used to access /proc.
-//
-// (Note that getpid() on BsdThreads returns kernel thread id too; but
-// on NPTL, it returns the same pid for all threads, as required by POSIX.)
-//
-pid_t os::Bsd::gettid() {
-  int rslt = syscall(SYS_gettid);
-  if (rslt == -1) {
-     // old kernel, no NPTL support
-     return getpid();
-  } else {
-     return (pid_t)rslt;
-  }
-}
-
-// Most versions of bsd have a bug where the number of processors are
-// determined by looking at the /proc file system.  In a chroot environment,
-// the system call returns 1.  This causes the VM to act as if it is
-// a single processor and elide locking (see is_MP() call).
-static bool unsafe_chroot_detected = false;
-static const char *unstable_chroot_error = "/proc file system not found.\n"
-                     "Java may be unstable running multithreaded in a chroot "
-                     "environment on Bsd when /proc filesystem is not mounted.";
-#endif
-
-#ifdef _ALLBSD_SOURCE
+
 void os::Bsd::initialize_system_info() {
   int mib[2];
   size_t len;
@@ -370,24 +296,6 @@
   }
 #endif
 }
-#else
-void os::Bsd::initialize_system_info() {
-  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
-  if (processor_count() == 1) {
-    pid_t pid = os::Bsd::gettid();
-    char fname[32];
-    jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
-    FILE *fp = fopen(fname, "r");
-    if (fp == NULL) {
-      unsafe_chroot_detected = true;
-    } else {
-      fclose(fp);
-    }
-  }
-  _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
-  assert(processor_count() > 0, "bsd error");
-}
-#endif
 
 #ifdef __APPLE__
 static const char *get_home() {
@@ -744,171 +652,6 @@
   }
 }
 
-#ifndef _ALLBSD_SOURCE
-//////////////////////////////////////////////////////////////////////////////
-// detecting pthread library
-
-void os::Bsd::libpthread_init() {
-  // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
-  // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
-  // generic name for earlier versions.
-  // Define macros here so we can build HotSpot on old systems.
-# ifndef _CS_GNU_LIBC_VERSION
-# define _CS_GNU_LIBC_VERSION 2
-# endif
-# ifndef _CS_GNU_LIBPTHREAD_VERSION
-# define _CS_GNU_LIBPTHREAD_VERSION 3
-# endif
-
-  size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
-  if (n > 0) {
-     char *str = (char *)malloc(n);
-     confstr(_CS_GNU_LIBC_VERSION, str, n);
-     os::Bsd::set_glibc_version(str);
-  } else {
-     // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
-     static char _gnu_libc_version[32];
-     jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
-              "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
-     os::Bsd::set_glibc_version(_gnu_libc_version);
-  }
-
-  n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
-  if (n > 0) {
-     char *str = (char *)malloc(n);
-     confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
-     // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
-     // us "NPTL-0.29" even we are running with BsdThreads. Check if this
-     // is the case. BsdThreads has a hard limit on max number of threads.
-     // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
-     // On the other hand, NPTL does not have such a limit, sysconf()
-     // will return -1 and errno is not changed. Check if it is really NPTL.
-     if (strcmp(os::Bsd::glibc_version(), "glibc 2.3.2") == 0 &&
-         strstr(str, "NPTL") &&
-         sysconf(_SC_THREAD_THREADS_MAX) > 0) {
-       free(str);
-       os::Bsd::set_libpthread_version("bsdthreads");
-     } else {
-       os::Bsd::set_libpthread_version(str);
-     }
-  } else {
-    // glibc before 2.3.2 only has BsdThreads.
-    os::Bsd::set_libpthread_version("bsdthreads");
-  }
-
-  if (strstr(libpthread_version(), "NPTL")) {
-     os::Bsd::set_is_NPTL();
-  } else {
-     os::Bsd::set_is_BsdThreads();
-  }
-
-  // BsdThreads have two flavors: floating-stack mode, which allows variable
-  // stack size; and fixed-stack mode. NPTL is always floating-stack.
-  if (os::Bsd::is_NPTL() || os::Bsd::supports_variable_stack_size()) {
-     os::Bsd::set_is_floating_stack();
-  }
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// thread stack
-
-// Force Bsd kernel to expand current thread stack. If "bottom" is close
-// to the stack guard, caller should block all signals.
-//
-// MAP_GROWSDOWN:
-//   A special mmap() flag that is used to implement thread stacks. It tells
-//   kernel that the memory region should extend downwards when needed. This
-//   allows early versions of BsdThreads to only mmap the first few pages
-//   when creating a new thread. Bsd kernel will automatically expand thread
-//   stack as needed (on page faults).
-//
-//   However, because the memory region of a MAP_GROWSDOWN stack can grow on
-//   demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
-//   region, it's hard to tell if the fault is due to a legitimate stack
-//   access or because of reading/writing non-exist memory (e.g. buffer
-//   overrun). As a rule, if the fault happens below current stack pointer,
-//   Bsd kernel does not expand stack, instead a SIGSEGV is sent to the
-//   application (see Bsd kernel fault.c).
-//
-//   This Bsd feature can cause SIGSEGV when VM bangs thread stack for
-//   stack overflow detection.
-//
-//   Newer version of BsdThreads (since glibc-2.2, or, RH-7.x) and NPTL do
-//   not use this flag. However, the stack of initial thread is not created
-//   by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
-//   unlikely) that user code can create a thread with MAP_GROWSDOWN stack
-//   and then attach the thread to JVM.
-//
-// To get around the problem and allow stack banging on Bsd, we need to
-// manually expand thread stack after receiving the SIGSEGV.
-//
-// There are two ways to expand thread stack to address "bottom", we used
-// both of them in JVM before 1.5:
-//   1. adjust stack pointer first so that it is below "bottom", and then
-//      touch "bottom"
-//   2. mmap() the page in question
-//
-// Now alternate signal stack is gone, it's harder to use 2. For instance,
-// if current sp is already near the lower end of page 101, and we need to
-// call mmap() to map page 100, it is possible that part of the mmap() frame
-// will be placed in page 100. When page 100 is mapped, it is zero-filled.
-// That will destroy the mmap() frame and cause VM to crash.
-//
-// The following code works by adjusting sp first, then accessing the "bottom"
-// page to force a page fault. Bsd kernel will then automatically expand the
-// stack mapping.
-//
-// _expand_stack_to() assumes its frame size is less than page size, which
-// should always be true if the function is not inlined.
-
-#if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
-#define NOINLINE
-#else
-#define NOINLINE __attribute__ ((noinline))
-#endif
-
-static void _expand_stack_to(address bottom) NOINLINE;
-
-static void _expand_stack_to(address bottom) {
-  address sp;
-  size_t size;
-  volatile char *p;
-
-  // Adjust bottom to point to the largest address within the same page, it
-  // gives us a one-page buffer if alloca() allocates slightly more memory.
-  bottom = (address)align_size_down((uintptr_t)bottom, os::Bsd::page_size());
-  bottom += os::Bsd::page_size() - 1;
-
-  // sp might be slightly above current stack pointer; if that's the case, we
-  // will alloca() a little more space than necessary, which is OK. Don't use
-  // os::current_stack_pointer(), as its result can be slightly below current
-  // stack pointer, causing us to not alloca enough to reach "bottom".
-  sp = (address)&sp;
-
-  if (sp > bottom) {
-    size = sp - bottom;
-    p = (volatile char *)alloca(size);
-    assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
-    p[0] = '\0';
-  }
-}
-
-bool os::Bsd::manually_expand_stack(JavaThread * t, address addr) {
-  assert(t!=NULL, "just checking");
-  assert(t->osthread()->expanding_stack(), "expand should be set");
-  assert(t->stack_base() != NULL, "stack_base was not initialized");
-
-  if (addr <  t->stack_base() && addr >= t->stack_yellow_zone_base()) {
-    sigset_t mask_all, old_sigset;
-    sigfillset(&mask_all);
-    pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
-    _expand_stack_to(addr);
-    pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
-    return true;
-  }
-  return false;
-}
-#endif
 
 //////////////////////////////////////////////////////////////////////////////
 // create new thread
@@ -917,43 +660,7 @@
 
 // check if it's safe to start a new thread
 static bool _thread_safety_check(Thread* thread) {
-#ifdef _ALLBSD_SOURCE
-    return true;
-#else
-  if (os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack()) {
-    // Fixed stack BsdThreads (SuSE Bsd/x86, and some versions of Redhat)
-    //   Heap is mmap'ed at lower end of memory space. Thread stacks are
-    //   allocated (MAP_FIXED) from high address space. Every thread stack
-    //   occupies a fixed size slot (usually 2Mbytes, but user can change
-    //   it to other values if they rebuild BsdThreads).
-    //
-    // Problem with MAP_FIXED is that mmap() can still succeed even part of
-    // the memory region has already been mmap'ed. That means if we have too
-    // many threads and/or very large heap, eventually thread stack will
-    // collide with heap.
-    //
-    // Here we try to prevent heap/stack collision by comparing current
-    // stack bottom with the highest address that has been mmap'ed by JVM
-    // plus a safety margin for memory maps created by native code.
-    //
-    // This feature can be disabled by setting ThreadSafetyMargin to 0
-    //
-    if (ThreadSafetyMargin > 0) {
-      address stack_bottom = os::current_stack_base() - os::current_stack_size();
-
-      // not safe if our stack extends below the safety margin
-      return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
-    } else {
-      return true;
-    }
-  } else {
-    // Floating stack BsdThreads or NPTL:
-    //   Unlike fixed stack BsdThreads, thread stacks are not MAP_FIXED. When
-    //   there's not enough space left, pthread_create() will fail. If we come
-    //   here, that means enough space has been reserved for stack.
-    return true;
-  }
-#endif
+  return true;
 }
 
 #ifdef __APPLE__
@@ -991,7 +698,6 @@
     return NULL;
   }
 
-#ifdef _ALLBSD_SOURCE
 #ifdef __APPLE__
   // thread_id is mach thread on macos
   osthread->set_thread_id(::mach_thread_self());
@@ -999,17 +705,6 @@
   // thread_id is pthread_id on BSD
   osthread->set_thread_id(::pthread_self());
 #endif
-#else
-  // thread_id is kernel thread id (similar to Solaris LWP id)
-  osthread->set_thread_id(os::Bsd::gettid());
-
-  if (UseNUMA) {
-    int lgrp_id = os::numa_get_group_id();
-    if (lgrp_id != -1) {
-      thread->set_lgrp_id(lgrp_id);
-    }
-  }
-#endif
   // initialize signal mask for this thread
   os::Bsd::hotspot_sigmask(thread);
 
@@ -1099,23 +794,9 @@
     // let pthread_create() pick the default value.
   }
 
-#ifndef _ALLBSD_SOURCE
-  // glibc guard page
-  pthread_attr_setguardsize(&attr, os::Bsd::default_guard_size(thr_type));
-#endif
-
   ThreadState state;
 
   {
-
-#ifndef _ALLBSD_SOURCE
-    // Serialize thread creation if we are running with fixed stack BsdThreads
-    bool lock = os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack();
-    if (lock) {
-      os::Bsd::createThread_lock()->lock_without_safepoint_check();
-    }
-#endif
-
     pthread_t tid;
     int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 
@@ -1128,9 +809,6 @@
       // Need to clean up stuff we've allocated so far
       thread->set_osthread(NULL);
       delete osthread;
-#ifndef _ALLBSD_SOURCE
-      if (lock) os::Bsd::createThread_lock()->unlock();
-#endif
       return false;
     }
 
@@ -1146,11 +824,6 @@
       }
     }
 
-#ifndef _ALLBSD_SOURCE
-    if (lock) {
-      os::Bsd::createThread_lock()->unlock();
-    }
-#endif
   }
 
   // Aborted due to thread limit being reached
@@ -1188,15 +861,11 @@
   }
 
   // Store pthread info into the OSThread
-#ifdef _ALLBSD_SOURCE
 #ifdef __APPLE__
   osthread->set_thread_id(::mach_thread_self());
 #else
   osthread->set_thread_id(::pthread_self());
 #endif
-#else
-  osthread->set_thread_id(os::Bsd::gettid());
-#endif
   osthread->set_pthread_id(::pthread_self());
 
   // initialize floating point control register
@@ -1207,35 +876,6 @@
 
   thread->set_osthread(osthread);
 
-#ifndef _ALLBSD_SOURCE
-  if (UseNUMA) {
-    int lgrp_id = os::numa_get_group_id();
-    if (lgrp_id != -1) {
-      thread->set_lgrp_id(lgrp_id);
-    }
-  }
-
-  if (os::Bsd::is_initial_thread()) {
-    // If current thread is initial thread, its stack is mapped on demand,
-    // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
-    // the entire stack region to avoid SEGV in stack banging.
-    // It is also useful to get around the heap-stack-gap problem on SuSE
-    // kernel (see 4821821 for details). We first expand stack to the top
-    // of yellow zone, then enable stack yellow zone (order is significant,
-    // enabling yellow zone first will crash JVM on SuSE Bsd), so there
-    // is no gap between the last two virtual memory regions.
-
-    JavaThread *jt = (JavaThread *)thread;
-    address addr = jt->stack_yellow_zone_base();
-    assert(addr != NULL, "initialization problem?");
-    assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
-
-    osthread->set_expanding_stack();
-    os::Bsd::manually_expand_stack(jt, addr);
-    osthread->clear_expanding_stack();
-  }
-#endif
-
   // initialize signal mask for this thread
   // and save the caller's signal mask
   os::Bsd::hotspot_sigmask(thread);
@@ -1290,247 +930,6 @@
   return ThreadLocalStorage::thread();
 }
 
-//////////////////////////////////////////////////////////////////////////////
-// initial thread
-
-#ifndef _ALLBSD_SOURCE
-// Check if current thread is the initial thread, similar to Solaris thr_main.
-bool os::Bsd::is_initial_thread(void) {
-  char dummy;
-  // If called before init complete, thread stack bottom will be null.
-  // Can be called if fatal error occurs before initialization.
-  if (initial_thread_stack_bottom() == NULL) return false;
-  assert(initial_thread_stack_bottom() != NULL &&
-         initial_thread_stack_size()   != 0,
-         "os::init did not locate initial thread's stack region");
-  if ((address)&dummy >= initial_thread_stack_bottom() &&
-      (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
-       return true;
-  else return false;
-}
-
-// Find the virtual memory area that contains addr
-static bool find_vma(address addr, address* vma_low, address* vma_high) {
-  FILE *fp = fopen("/proc/self/maps", "r");
-  if (fp) {
-    address low, high;
-    while (!feof(fp)) {
-      if (fscanf(fp, "%p-%p", &low, &high) == 2) {
-        if (low <= addr && addr < high) {
-           if (vma_low)  *vma_low  = low;
-           if (vma_high) *vma_high = high;
-           fclose (fp);
-           return true;
-        }
-      }
-      for (;;) {
-        int ch = fgetc(fp);
-        if (ch == EOF || ch == (int)'\n') break;
-      }
-    }
-    fclose(fp);
-  }
-  return false;
-}
-
-// Locate initial thread stack. This special handling of initial thread stack
-// is needed because pthread_getattr_np() on most (all?) Bsd distros returns
-// bogus value for initial thread.
-void os::Bsd::capture_initial_stack(size_t max_size) {
-  // stack size is the easy part, get it from RLIMIT_STACK
-  size_t stack_size;
-  struct rlimit rlim;
-  getrlimit(RLIMIT_STACK, &rlim);
-  stack_size = rlim.rlim_cur;
-
-  // 6308388: a bug in ld.so will relocate its own .data section to the
-  //   lower end of primordial stack; reduce ulimit -s value a little bit
-  //   so we won't install guard page on ld.so's data section.
-  stack_size -= 2 * page_size();
-
-  // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat
-  //   7.1, in both cases we will get 2G in return value.
-  // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0,
-  //   SuSE 7.2, Debian) can not handle alternate signal stack correctly
-  //   for initial thread if its stack size exceeds 6M. Cap it at 2M,
-  //   in case other parts in glibc still assumes 2M max stack size.
-  // FIXME: alt signal stack is gone, maybe we can relax this constraint?
-#ifndef IA64
-  if (stack_size > 2 * K * K) stack_size = 2 * K * K;
-#else
-  // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
-  if (stack_size > 4 * K * K) stack_size = 4 * K * K;
-#endif
-
-  // Try to figure out where the stack base (top) is. This is harder.
-  //
-  // When an application is started, glibc saves the initial stack pointer in
-  // a global variable "__libc_stack_end", which is then used by system
-  // libraries. __libc_stack_end should be pretty close to stack top. The
-  // variable is available since the very early days. However, because it is
-  // a private interface, it could disappear in the future.
-  //
-  // Bsd kernel saves start_stack information in /proc/<pid>/stat. Similar
-  // to __libc_stack_end, it is very close to stack top, but isn't the real
-  // stack top. Note that /proc may not exist if VM is running as a chroot
-  // program, so reading /proc/<pid>/stat could fail. Also the contents of
-  // /proc/<pid>/stat could change in the future (though unlikely).
-  //
-  // We try __libc_stack_end first. If that doesn't work, look for
-  // /proc/<pid>/stat. If neither of them works, we use current stack pointer
-  // as a hint, which should work well in most cases.
-
-  uintptr_t stack_start;
-
-  // try __libc_stack_end first
-  uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
-  if (p && *p) {
-    stack_start = *p;
-  } else {
-    // see if we can get the start_stack field from /proc/self/stat
-    FILE *fp;
-    int pid;
-    char state;
-    int ppid;
-    int pgrp;
-    int session;
-    int nr;
-    int tpgrp;
-    unsigned long flags;
-    unsigned long minflt;
-    unsigned long cminflt;
-    unsigned long majflt;
-    unsigned long cmajflt;
-    unsigned long utime;
-    unsigned long stime;
-    long cutime;
-    long cstime;
-    long prio;
-    long nice;
-    long junk;
-    long it_real;
-    uintptr_t start;
-    uintptr_t vsize;
-    intptr_t rss;
-    uintptr_t rsslim;
-    uintptr_t scodes;
-    uintptr_t ecode;
-    int i;
-
-    // Figure what the primordial thread stack base is. Code is inspired
-    // by email from Hans Boehm. /proc/self/stat begins with current pid,
-    // followed by command name surrounded by parentheses, state, etc.
-    char stat[2048];
-    int statlen;
-
-    fp = fopen("/proc/self/stat", "r");
-    if (fp) {
-      statlen = fread(stat, 1, 2047, fp);
-      stat[statlen] = '\0';
-      fclose(fp);
-
-      // Skip pid and the command string. Note that we could be dealing with
-      // weird command names, e.g. user could decide to rename java launcher
-      // to "java 1.4.2 :)", then the stat file would look like
-      //                1234 (java 1.4.2 :)) R ... ...
-      // We don't really need to know the command string, just find the last
-      // occurrence of ")" and then start parsing from there. See bug 4726580.
-      char * s = strrchr(stat, ')');
-
-      i = 0;
-      if (s) {
-        // Skip blank chars
-        do s++; while (isspace(*s));
-
-#define _UFM UINTX_FORMAT
-#define _DFM INTX_FORMAT
-
-        /*                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2 */
-        /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8 */
-        i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
-             &state,          /* 3  %c  */
-             &ppid,           /* 4  %d  */
-             &pgrp,           /* 5  %d  */
-             &session,        /* 6  %d  */
-             &nr,             /* 7  %d  */
-             &tpgrp,          /* 8  %d  */
-             &flags,          /* 9  %lu  */
-             &minflt,         /* 10 %lu  */
-             &cminflt,        /* 11 %lu  */
-             &majflt,         /* 12 %lu  */
-             &cmajflt,        /* 13 %lu  */
-             &utime,          /* 14 %lu  */
-             &stime,          /* 15 %lu  */
-             &cutime,         /* 16 %ld  */
-             &cstime,         /* 17 %ld  */
-             &prio,           /* 18 %ld  */
-             &nice,           /* 19 %ld  */
-             &junk,           /* 20 %ld  */
-             &it_real,        /* 21 %ld  */
-             &start,          /* 22 UINTX_FORMAT */
-             &vsize,          /* 23 UINTX_FORMAT */
-             &rss,            /* 24 INTX_FORMAT  */
-             &rsslim,         /* 25 UINTX_FORMAT */
-             &scodes,         /* 26 UINTX_FORMAT */
-             &ecode,          /* 27 UINTX_FORMAT */
-             &stack_start);   /* 28 UINTX_FORMAT */
-      }
-
-#undef _UFM
-#undef _DFM
-
-      if (i != 28 - 2) {
-         assert(false, "Bad conversion from /proc/self/stat");
-         // product mode - assume we are the initial thread, good luck in the
-         // embedded case.
-         warning("Can't detect initial thread stack location - bad conversion");
-         stack_start = (uintptr_t) &rlim;
-      }
-    } else {
-      // For some reason we can't open /proc/self/stat (for example, running on
-      // FreeBSD with a Bsd emulator, or inside chroot), this should work for
-      // most cases, so don't abort:
-      warning("Can't detect initial thread stack location - no /proc/self/stat");
-      stack_start = (uintptr_t) &rlim;
-    }
-  }
-
-  // Now we have a pointer (stack_start) very close to the stack top, the
-  // next thing to do is to figure out the exact location of stack top. We
-  // can find out the virtual memory area that contains stack_start by
-  // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
-  // and its upper limit is the real stack top. (again, this would fail if
-  // running inside chroot, because /proc may not exist.)
-
-  uintptr_t stack_top;
-  address low, high;
-  if (find_vma((address)stack_start, &low, &high)) {
-    // success, "high" is the true stack top. (ignore "low", because initial
-    // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
-    stack_top = (uintptr_t)high;
-  } else {
-    // failed, likely because /proc/self/maps does not exist
-    warning("Can't detect initial thread stack location - find_vma failed");
-    // best effort: stack_start is normally within a few pages below the real
-    // stack top, use it as stack top, and reduce stack size so we won't put
-    // guard page outside stack.
-    stack_top = stack_start;
-    stack_size -= 16 * page_size();
-  }
-
-  // stack_top could be partially down the page so align it
-  stack_top = align_size_up(stack_top, page_size());
-
-  if (max_size && stack_size > max_size) {
-     _initial_thread_stack_size = max_size;
-  } else {
-     _initial_thread_stack_size = stack_size;
-  }
-
-  _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
-  _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
-}
-#endif
 
 ////////////////////////////////////////////////////////////////////////////////
 // time support
@@ -1576,7 +975,7 @@
 void os::Bsd::clock_init() {
         // XXXDARWIN: Investigate replacement monotonic clock
 }
-#elif defined(_ALLBSD_SOURCE)
+#else
 void os::Bsd::clock_init() {
   struct timespec res;
   struct timespec tp;
@@ -1586,86 +985,8 @@
     _clock_gettime = ::clock_gettime;
   }
 }
-#else
-void os::Bsd::clock_init() {
-  // we do dlopen's in this particular order due to bug in bsd
-  // dynamical loader (see 6348968) leading to crash on exit
-  void* handle = dlopen("librt.so.1", RTLD_LAZY);
-  if (handle == NULL) {
-    handle = dlopen("librt.so", RTLD_LAZY);
-  }
-
-  if (handle) {
-    int (*clock_getres_func)(clockid_t, struct timespec*) =
-           (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
-    int (*clock_gettime_func)(clockid_t, struct timespec*) =
-           (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
-    if (clock_getres_func && clock_gettime_func) {
-      // See if monotonic clock is supported by the kernel. Note that some
-      // early implementations simply return kernel jiffies (updated every
-      // 1/100 or 1/1000 second). It would be bad to use such a low res clock
-      // for nano time (though the monotonic property is still nice to have).
-      // It's fixed in newer kernels, however clock_getres() still returns
-      // 1/HZ. We check if clock_getres() works, but will ignore its reported
-      // resolution for now. Hopefully as people move to new kernels, this
-      // won't be a problem.
-      struct timespec res;
-      struct timespec tp;
-      if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
-          clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
-        // yes, monotonic clock is supported
-        _clock_gettime = clock_gettime_func;
-      } else {
-        // close librt if there is no monotonic clock
-        dlclose(handle);
-      }
-    }
-  }
-}
 #endif
 
-#ifndef _ALLBSD_SOURCE
-#ifndef SYS_clock_getres
-
-#if defined(IA32) || defined(AMD64)
-#define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229)
-#define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
-#else
-#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
-#define sys_clock_getres(x,y)  -1
-#endif
-
-#else
-#define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
-#endif
-
-void os::Bsd::fast_thread_clock_init() {
-  if (!UseBsdPosixThreadCPUClocks) {
-    return;
-  }
-  clockid_t clockid;
-  struct timespec tp;
-  int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
-      (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
-
-  // Switch to using fast clocks for thread cpu time if
-  // the sys_clock_getres() returns 0 error code.
-  // Note, that some kernels may support the current thread
-  // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
-  // returned by the pthread_getcpuclockid().
-  // If the fast Posix clocks are supported then the sys_clock_getres()
-  // must return at least tp.tv_sec == 0 which means a resolution
-  // better than 1 sec. This is extra check for reliability.
-
-  if(pthread_getcpuclockid_func &&
-     pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
-     sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
-
-    _supports_fast_thread_cpu_time = true;
-    _pthread_getcpuclockid = pthread_getcpuclockid_func;
-  }
-}
-#endif
 
 jlong os::javaTimeNanos() {
   if (Bsd::supports_monotonic_clock()) {
@@ -1978,7 +1299,6 @@
   return false;
 }
 
-#ifdef _ALLBSD_SOURCE
 // ported from solaris version
 bool os::dll_address_to_library_name(address addr, char* buf,
                                      int buflen, int* offset) {
@@ -1994,86 +1314,10 @@
      return false;
   }
 }
-#else
-struct _address_to_library_name {
-  address addr;          // input : memory address
-  size_t  buflen;        //         size of fname
-  char*   fname;         // output: library name
-  address base;          //         library base addr
-};
-
-static int address_to_library_name_callback(struct dl_phdr_info *info,
-                                            size_t size, void *data) {
-  int i;
-  bool found = false;
-  address libbase = NULL;
-  struct _address_to_library_name * d = (struct _address_to_library_name *)data;
-
-  // iterate through all loadable segments
-  for (i = 0; i < info->dlpi_phnum; i++) {
-    address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
-    if (info->dlpi_phdr[i].p_type == PT_LOAD) {
-      // base address of a library is the lowest address of its loaded
-      // segments.
-      if (libbase == NULL || libbase > segbase) {
-        libbase = segbase;
-      }
-      // see if 'addr' is within current segment
-      if (segbase <= d->addr &&
-          d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
-        found = true;
-      }
-    }
-  }
-
-  // dlpi_name is NULL or empty if the ELF file is executable, return 0
-  // so dll_address_to_library_name() can fall through to use dladdr() which
-  // can figure out executable name from argv[0].
-  if (found && info->dlpi_name && info->dlpi_name[0]) {
-    d->base = libbase;
-    if (d->fname) {
-      jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
-    }
-    return 1;
-  }
-  return 0;
-}
-
-bool os::dll_address_to_library_name(address addr, char* buf,
-                                     int buflen, int* offset) {
-  Dl_info dlinfo;
-  struct _address_to_library_name data;
-
-  // There is a bug in old glibc dladdr() implementation that it could resolve
-  // to wrong library name if the .so file has a base address != NULL. Here
-  // we iterate through the program headers of all loaded libraries to find
-  // out which library 'addr' really belongs to. This workaround can be
-  // removed once the minimum requirement for glibc is moved to 2.3.x.
-  data.addr = addr;
-  data.fname = buf;
-  data.buflen = buflen;
-  data.base = NULL;
-  int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
-
-  if (rslt) {
-     // buf already contains library name
-     if (offset) *offset = addr - data.base;
-     return true;
-  } else if (dladdr((void*)addr, &dlinfo)){
-     if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
-     if (offset) *offset = addr - (address)dlinfo.dli_fbase;
-     return true;
-  } else {
-     if (buf) buf[0] = '\0';
-     if (offset) *offset = -1;
-     return false;
-  }
-}
-#endif
-
-  // Loads .dll/.so and
-  // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+
+// Loads .dll/.so and
+// in case of error it checks if .dll/.so was built for the
+// same architecture as Hotspot is running on
 
 #ifdef __APPLE__
 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
@@ -2292,7 +1536,6 @@
 
 void os::print_dll_info(outputStream *st) {
    st->print_cr("Dynamic libraries:");
-#ifdef _ALLBSD_SOURCE
 #ifdef RTLD_DI_LINKMAP
     Dl_info dli;
     void *handle;
@@ -2336,16 +1579,6 @@
 #else
    st->print_cr("Error: Cannot print dynamic libraries.");
 #endif
-#else
-   char fname[32];
-   pid_t pid = os::Bsd::gettid();
-
-   jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
-
-   if (!_print_ascii_file(fname, st)) {
-     st->print("Can not get library information for pid = %d\n", pid);
-   }
-#endif
 }
 
 void os::print_os_info_brief(outputStream* st) {
@@ -2374,22 +1607,10 @@
   st->print("Memory:");
   st->print(" %dk page", os::vm_page_size()>>10);
 
-#ifndef _ALLBSD_SOURCE
-  // values in struct sysinfo are "unsigned long"
-  struct sysinfo si;
-  sysinfo(&si);
-#endif
-
   st->print(", physical " UINT64_FORMAT "k",
             os::physical_memory() >> 10);
   st->print("(" UINT64_FORMAT "k free)",
             os::available_memory() >> 10);
-#ifndef _ALLBSD_SOURCE
-  st->print(", swap " UINT64_FORMAT "k",
-            ((jlong)si.totalswap * si.mem_unit) >> 10);
-  st->print("(" UINT64_FORMAT "k free)",
-            ((jlong)si.freeswap * si.mem_unit) >> 10);
-#endif
   st->cr();
 
   // meminfo
@@ -2786,42 +2007,13 @@
 #endif
 }
 
-#ifndef _ALLBSD_SOURCE
-// Define MAP_HUGETLB here so we can build HotSpot on old systems.
-#ifndef MAP_HUGETLB
-#define MAP_HUGETLB 0x40000
-#endif
-
-// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
-#ifndef MADV_HUGEPAGE
-#define MADV_HUGEPAGE 14
-#endif
-#endif
 
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
-#ifndef _ALLBSD_SOURCE
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
-    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
-    uintptr_t res =
-      (uintptr_t) ::mmap(addr, size, prot,
-                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
-                         -1, 0);
-    return res != (uintptr_t) MAP_FAILED;
-  }
-#endif
-
   return commit_memory(addr, size, exec);
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
-#ifndef _ALLBSD_SOURCE
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
-    // We don't check the return value: madvise(MADV_HUGEPAGE) may not
-    // be supported or the memory may already be backed by huge pages.
-    ::madvise(addr, bytes, MADV_HUGEPAGE);
-  }
-#endif
 }
 
 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -2860,111 +2052,6 @@
   return end;
 }
 
-#ifndef _ALLBSD_SOURCE
-// Something to do with the numa-aware allocator needs these symbols
-extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
-extern "C" JNIEXPORT void numa_error(char *where) { }
-extern "C" JNIEXPORT int fork1() { return fork(); }
-
-
-// If we are running with libnuma version > 2, then we should
-// be trying to use symbols with versions 1.1
-// If we are running with earlier version, which did not have symbol versions,
-// we should use the base version.
-void* os::Bsd::libnuma_dlsym(void* handle, const char *name) {
-  void *f = dlvsym(handle, name, "libnuma_1.1");
-  if (f == NULL) {
-    f = dlsym(handle, name);
-  }
-  return f;
-}
-
-bool os::Bsd::libnuma_init() {
-  // sched_getcpu() should be in libc.
-  set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
-                                  dlsym(RTLD_DEFAULT, "sched_getcpu")));
-
-  if (sched_getcpu() != -1) { // Does it work?
-    void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
-    if (handle != NULL) {
-      set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
-                                           libnuma_dlsym(handle, "numa_node_to_cpus")));
-      set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
-                                       libnuma_dlsym(handle, "numa_max_node")));
-      set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
-                                        libnuma_dlsym(handle, "numa_available")));
-      set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
-                                            libnuma_dlsym(handle, "numa_tonode_memory")));
-      set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
-                                            libnuma_dlsym(handle, "numa_interleave_memory")));
-
-
-      if (numa_available() != -1) {
-        set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
-        // Create a cpu -> node mapping
-        _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
-        rebuild_cpu_to_node_map();
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
-// The table is later used in get_node_by_cpu().
-void os::Bsd::rebuild_cpu_to_node_map() {
-  const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
-                              // in libnuma (possible values are starting from 16,
-                              // and continuing up with every other power of 2, but less
-                              // than the maximum number of CPUs supported by kernel), and
-                              // is a subject to change (in libnuma version 2 the requirements
-                              // are more reasonable) we'll just hardcode the number they use
-                              // in the library.
-  const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
-
-  size_t cpu_num = os::active_processor_count();
-  size_t cpu_map_size = NCPUS / BitsPerCLong;
-  size_t cpu_map_valid_size =
-    MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
-
-  cpu_to_node()->clear();
-  cpu_to_node()->at_grow(cpu_num - 1);
-  size_t node_num = numa_get_groups_num();
-
-  unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
-  for (size_t i = 0; i < node_num; i++) {
-    if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
-      for (size_t j = 0; j < cpu_map_valid_size; j++) {
-        if (cpu_map[j] != 0) {
-          for (size_t k = 0; k < BitsPerCLong; k++) {
-            if (cpu_map[j] & (1UL << k)) {
-              cpu_to_node()->at_put(j * BitsPerCLong + k, i);
-            }
-          }
-        }
-      }
-    }
-  }
-  FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
-}
-
-int os::Bsd::get_node_by_cpu(int cpu_id) {
-  if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
-    return cpu_to_node()->at(cpu_id);
-  }
-  return -1;
-}
-
-GrowableArray<int>* os::Bsd::_cpu_to_node;
-os::Bsd::sched_getcpu_func_t os::Bsd::_sched_getcpu;
-os::Bsd::numa_node_to_cpus_func_t os::Bsd::_numa_node_to_cpus;
-os::Bsd::numa_max_node_func_t os::Bsd::_numa_max_node;
-os::Bsd::numa_available_func_t os::Bsd::_numa_available;
-os::Bsd::numa_tonode_memory_func_t os::Bsd::_numa_tonode_memory;
-os::Bsd::numa_interleave_memory_func_t os::Bsd::_numa_interleave_memory;
-unsigned long* os::Bsd::_numa_all_nodes;
-#endif
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
 #ifdef __OpenBSD__
@@ -3084,42 +2171,7 @@
 }
 
 bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
-  bool result = false;
-#ifndef _ALLBSD_SOURCE
-  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
-                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-                  -1, 0);
-
-  if (p != (void *) -1) {
-    // We don't know if this really is a huge page or not.
-    FILE *fp = fopen("/proc/self/maps", "r");
-    if (fp) {
-      while (!feof(fp)) {
-        char chars[257];
-        long x = 0;
-        if (fgets(chars, sizeof(chars), fp)) {
-          if (sscanf(chars, "%lx-%*x", &x) == 1
-              && x == (long)p) {
-            if (strstr (chars, "hugepage")) {
-              result = true;
-              break;
-            }
-          }
-        }
-      }
-      fclose(fp);
-    }
-    munmap (p, page_size);
-    if (result)
-      return true;
-  }
-
-  if (warn) {
-    warning("HugeTLBFS is not supported by the operating system.");
-  }
-#endif
-
-  return result;
+  return false;
 }
 
 /*
@@ -3164,92 +2216,8 @@
 static size_t _large_page_size = 0;
 
 void os::large_page_init() {
-#ifndef _ALLBSD_SOURCE
-  if (!UseLargePages) {
-    UseHugeTLBFS = false;
-    UseSHM = false;
-    return;
-  }
-
-  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // If UseLargePages is specified on the command line try both methods,
-    // if it's default, then try only HugeTLBFS.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseHugeTLBFS = true;
-    } else {
-      UseHugeTLBFS = UseSHM = true;
-    }
-  }
-
-  if (LargePageSizeInBytes) {
-    _large_page_size = LargePageSizeInBytes;
-  } else {
-    // large_page_size on Bsd is used to round up heap size. x86 uses either
-    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
-    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
-    // page as large as 256M.
-    //
-    // Here we try to figure out page size by parsing /proc/meminfo and looking
-    // for a line with the following format:
-    //    Hugepagesize:     2048 kB
-    //
-    // If we can't determine the value (e.g. /proc is not mounted, or the text
-    // format has been changed), we'll use the largest page size supported by
-    // the processor.
-
-#ifndef ZERO
-    _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
-                       ARM_ONLY(2 * M) PPC_ONLY(4 * M);
-#endif // ZERO
-
-    FILE *fp = fopen("/proc/meminfo", "r");
-    if (fp) {
-      while (!feof(fp)) {
-        int x = 0;
-        char buf[16];
-        if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
-          if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
-            _large_page_size = x * K;
-            break;
-          }
-        } else {
-          // skip to next line
-          for (;;) {
-            int ch = fgetc(fp);
-            if (ch == EOF || ch == (int)'\n') break;
-          }
-        }
-      }
-      fclose(fp);
-    }
-  }
-
-  // print a warning if any large page related flag is specified on command line
-  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
-  const size_t default_page_size = (size_t)Bsd::page_size();
-  if (_large_page_size > default_page_size) {
-    _page_sizes[0] = _large_page_size;
-    _page_sizes[1] = default_page_size;
-    _page_sizes[2] = 0;
-  }
-  UseHugeTLBFS = UseHugeTLBFS &&
-                 Bsd::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
-  if (UseHugeTLBFS)
-    UseSHM = false;
-
-  UseLargePages = UseHugeTLBFS || UseSHM;
-
-  set_coredump_filter();
-#endif
 }
 
-#ifndef _ALLBSD_SOURCE
-#ifndef SHM_HUGETLB
-#define SHM_HUGETLB 04000
-#endif
-#endif
 
 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
@@ -3267,11 +2235,7 @@
 
   // Create a large shared memory region to attach to based on size.
   // Currently, size is the total size of the heap
-#ifndef _ALLBSD_SOURCE
-  int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
-#else
   int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
-#endif
   if (shmid == -1) {
      // Possible reasons for shmget failure:
      // 1. shmmax is too small for Java heap.
@@ -3558,7 +2522,7 @@
 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
 // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
 
-#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
+#if !defined(__APPLE__)
 int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
@@ -3578,7 +2542,7 @@
 
   31               // 11 CriticalPriority
 };
-#elif defined(__APPLE__)
+#else
 /* Using Mach high-level priority assignments */
 int os::java_to_os_priority[CriticalPriority + 1] = {
    0,              // 0 Entry should never be used (MINPRI_USER)
@@ -3599,26 +2563,6 @@
 
   36               // 11 CriticalPriority
 };
-#else
-int os::java_to_os_priority[CriticalPriority + 1] = {
-  19,              // 0 Entry should never be used
-
-   4,              // 1 MinPriority
-   3,              // 2
-   2,              // 3
-
-   1,              // 4
-   0,              // 5 NormPriority
-  -1,              // 6
-
-  -2,              // 7
-  -3,              // 8
-  -4,              // 9 NearMaxPriority
-
-  -5,              // 10 MaxPriority
-
-  -5               // 11 CriticalPriority
-};
 #endif
 
 static int prio_init() {
@@ -4179,22 +3123,6 @@
   }
 }
 
-#ifndef _ALLBSD_SOURCE
-// This is the fastest way to get thread cpu time on Bsd.
-// Returns cpu time (user+sys) for any thread, not only for current.
-// POSIX compliant clocks are implemented in the kernels 2.6.16+.
-// It might work on 2.6.10+ with a special kernel/glibc patch.
-// For reference, please, see IEEE Std 1003.1-2004:
-//   http://www.unix.org/single_unix_specification
-
-jlong os::Bsd::fast_thread_cpu_time(clockid_t clockid) {
-  struct timespec tp;
-  int rc = os::Bsd::clock_gettime(clockid, &tp);
-  assert(rc == 0, "clock_gettime is expected to return 0 code");
-
-  return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
-}
-#endif
 
 /////
 // glibc on Bsd platform uses non-documented flag
@@ -4458,10 +3386,6 @@
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void)
 {
-#ifndef _ALLBSD_SOURCE
-  Bsd::fast_thread_clock_init();
-#endif
-
   // Allocate a single page and mark it as readable for safepoint polling
   address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
@@ -4518,48 +3442,6 @@
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
         vm_page_size()));
 
-#ifndef _ALLBSD_SOURCE
-  Bsd::capture_initial_stack(JavaThread::stack_size_at_create());
-
-  Bsd::libpthread_init();
-  if (PrintMiscellaneous && (Verbose || WizardMode)) {
-     tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
-          Bsd::glibc_version(), Bsd::libpthread_version(),
-          Bsd::is_floating_stack() ? "floating stack" : "fixed stack");
-  }
-
-  if (UseNUMA) {
-    if (!Bsd::libnuma_init()) {
-      UseNUMA = false;
-    } else {
-      if ((Bsd::numa_max_node() < 1)) {
-        // There's only one node(they start from 0), disable NUMA.
-        UseNUMA = false;
-      }
-    }
-    // With SHM large pages we cannot uncommit a page, so there's not way
-    // we can make the adaptive lgrp chunk resizing work. If the user specified
-    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
-    // disable adaptive resizing.
-    if (UseNUMA && UseLargePages && UseSHM) {
-      if (!FLAG_IS_DEFAULT(UseNUMA)) {
-        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
-          UseLargePages = false;
-        } else {
-          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
-          UseAdaptiveSizePolicy = false;
-          UseAdaptiveNUMAChunkSizing = false;
-        }
-      } else {
-        UseNUMA = false;
-      }
-    }
-    if (!UseNUMA && ForceNUMA) {
-      UseNUMA = true;
-    }
-  }
-#endif
-
   if (MaxFDLimit) {
     // set the number of file descriptors to max. print out error
     // if getrlimit/setrlimit fails but continue regardless.
@@ -4586,11 +3468,6 @@
     }
   }
 
-#ifndef _ALLBSD_SOURCE
-  // Initialize lock used to serialize thread creation (see os::create_thread)
-  Bsd::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
-#endif
-
   // at-exit methods are called in the reverse order of their registration.
   // atexit functions are called on return from main or as a result of a
   // call to exit(3C). There can be only 32 of these functions registered
@@ -4641,15 +3518,7 @@
 };
 
 int os::active_processor_count() {
-#ifdef _ALLBSD_SOURCE
   return _processor_count;
-#else
-  // Bsd doesn't yet have a (official) notion of processor sets,
-  // so just return the number of online processors.
-  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
-  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
-  return online_cpus;
-#endif
 }
 
 void os::set_native_thread_name(const char *name) {
@@ -4703,25 +3572,7 @@
 
 int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
 {
-#ifdef _ALLBSD_SOURCE
   return pthread_cond_timedwait(_cond, _mutex, _abstime);
-#else
-   if (is_NPTL()) {
-      return pthread_cond_timedwait(_cond, _mutex, _abstime);
-   } else {
-#ifndef IA64
-      // 6292965: BsdThreads pthread_cond_timedwait() resets FPU control
-      // word back to default 64bit precision if condvar is signaled. Java
-      // wants 53bit precision.  Save and restore current value.
-      int fpu = get_fpu_control_word();
-#endif // IA64
-      int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
-#ifndef IA64
-      set_fpu_control_word(fpu);
-#endif // IA64
-      return status;
-   }
-#endif
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -5041,20 +3892,6 @@
   return munmap(addr, bytes) == 0;
 }
 
-#ifndef _ALLBSD_SOURCE
-static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
-
-static clockid_t thread_cpu_clockid(Thread* thread) {
-  pthread_t tid = thread->osthread()->pthread_id();
-  clockid_t clockid;
-
-  // Get thread clockid
-  int rc = os::Bsd::pthread_getcpuclockid(tid, &clockid);
-  assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
-  return clockid;
-}
-#endif
-
 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
 // of a thread.
@@ -5065,36 +3902,15 @@
 jlong os::current_thread_cpu_time() {
 #ifdef __APPLE__
   return os::thread_cpu_time(Thread::current(), true /* user + sys */);
-#elif !defined(_ALLBSD_SOURCE)
-  if (os::Bsd::supports_fast_thread_cpu_time()) {
-    return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
-  } else {
-    // return user + sys since the cost is the same
-    return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
-  }
 #endif
 }
 
 jlong os::thread_cpu_time(Thread* thread) {
-#ifndef _ALLBSD_SOURCE
-  // consistent with what current_thread_cpu_time() returns
-  if (os::Bsd::supports_fast_thread_cpu_time()) {
-    return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
-  } else {
-    return slow_thread_cpu_time(thread, true /* user + sys */);
-  }
-#endif
 }
 
 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
 #ifdef __APPLE__
   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
-#elif !defined(_ALLBSD_SOURCE)
-  if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
-    return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
-  } else {
-    return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
-  }
 #endif
 }
 
@@ -5118,106 +3934,9 @@
   } else {
     return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
   }
-#elif !defined(_ALLBSD_SOURCE)
-  if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
-    return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
-  } else {
-    return slow_thread_cpu_time(thread, user_sys_cpu_time);
-  }
 #endif
 }
 
-#ifndef _ALLBSD_SOURCE
-//
-//  -1 on error.
-//
-
-static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
-  static bool proc_pid_cpu_avail = true;
-  static bool proc_task_unchecked = true;
-  static const char *proc_stat_path = "/proc/%d/stat";
-  pid_t  tid = thread->osthread()->thread_id();
-  int i;
-  char *s;
-  char stat[2048];
-  int statlen;
-  char proc_name[64];
-  int count;
-  long sys_time, user_time;
-  char string[64];
-  char cdummy;
-  int idummy;
-  long ldummy;
-  FILE *fp;
-
-  // We first try accessing /proc/<pid>/cpu since this is faster to
-  // process.  If this file is not present (bsd kernels 2.5 and above)
-  // then we open /proc/<pid>/stat.
-  if ( proc_pid_cpu_avail ) {
-    sprintf(proc_name, "/proc/%d/cpu", tid);
-    fp =  fopen(proc_name, "r");
-    if ( fp != NULL ) {
-      count = fscanf( fp, "%s %lu %lu\n", string, &user_time, &sys_time);
-      fclose(fp);
-      if ( count != 3 ) return -1;
-
-      if (user_sys_cpu_time) {
-        return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
-      } else {
-        return (jlong)user_time * (1000000000 / clock_tics_per_sec);
-      }
-    }
-    else proc_pid_cpu_avail = false;
-  }
-
-  // The /proc/<tid>/stat aggregates per-process usage on
-  // new Bsd kernels 2.6+ where NPTL is supported.
-  // The /proc/self/task/<tid>/stat still has the per-thread usage.
-  // See bug 6328462.
-  // There can be no directory /proc/self/task on kernels 2.4 with NPTL
-  // and possibly in some other cases, so we check its availability.
-  if (proc_task_unchecked && os::Bsd::is_NPTL()) {
-    // This is executed only once
-    proc_task_unchecked = false;
-    fp = fopen("/proc/self/task", "r");
-    if (fp != NULL) {
-      proc_stat_path = "/proc/self/task/%d/stat";
-      fclose(fp);
-    }
-  }
-
-  sprintf(proc_name, proc_stat_path, tid);
-  fp = fopen(proc_name, "r");
-  if ( fp == NULL ) return -1;
-  statlen = fread(stat, 1, 2047, fp);
-  stat[statlen] = '\0';
-  fclose(fp);
-
-  // Skip pid and the command string. Note that we could be dealing with
-  // weird command names, e.g. user could decide to rename java launcher
-  // to "java 1.4.2 :)", then the stat file would look like
-  //                1234 (java 1.4.2 :)) R ... ...
-  // We don't really need to know the command string, just find the last
-  // occurrence of ")" and then start parsing from there. See bug 4726580.
-  s = strrchr(stat, ')');
-  i = 0;
-  if (s == NULL ) return -1;
-
-  // Skip blank chars
-  do s++; while (isspace(*s));
-
-  count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
-                 &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
-                 &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
-                 &user_time, &sys_time);
-  if ( count != 13 ) return -1;
-  if (user_sys_cpu_time) {
-    return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
-  } else {
-    return (jlong)user_time * (1000000000 / clock_tics_per_sec);
-  }
-}
-#endif
 
 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
@@ -5236,10 +3955,8 @@
 bool os::is_thread_cpu_time_supported() {
 #ifdef __APPLE__
   return true;
-#elif defined(_ALLBSD_SOURCE)
+#else
   return false;
-#else
-  return true;
 #endif
 }
 
--- a/src/os/bsd/vm/os_bsd.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os/bsd/vm/os_bsd.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -56,19 +56,6 @@
   static int sigflags[MAXSIGNUM];
 
   static int (*_clock_gettime)(clockid_t, struct timespec *);
-#ifndef _ALLBSD_SOURCE
-  static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *);
-
-  static address   _initial_thread_stack_bottom;
-  static uintptr_t _initial_thread_stack_size;
-
-  static const char *_glibc_version;
-  static const char *_libpthread_version;
-
-  static bool _is_floating_stack;
-  static bool _is_NPTL;
-  static bool _supports_fast_thread_cpu_time;
-#endif
 
   static GrowableArray<int>* _cpu_to_node;
 
@@ -76,28 +63,14 @@
 
   static julong _physical_memory;
   static pthread_t _main_thread;
-#ifndef _ALLBSD_SOURCE
-  static Mutex* _createThread_lock;
-#endif
   static int _page_size;
 
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
   static void initialize_system_info();
 
-#ifndef _ALLBSD_SOURCE
-  static void set_glibc_version(const char *s)      { _glibc_version = s; }
-  static void set_libpthread_version(const char *s) { _libpthread_version = s; }
-#endif
-
   static bool supports_variable_stack_size();
 
-#ifndef _ALLBSD_SOURCE
-  static void set_is_NPTL()                   { _is_NPTL = true;  }
-  static void set_is_BsdThreads()           { _is_NPTL = false; }
-  static void set_is_floating_stack()         { _is_floating_stack = true; }
-#endif
-
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
 
@@ -106,25 +79,10 @@
  public:
 
   static void init_thread_fpu_state();
-#ifndef _ALLBSD_SOURCE
-  static int  get_fpu_control_word();
-  static void set_fpu_control_word(int fpu_control);
-#endif
   static pthread_t main_thread(void)                                { return _main_thread; }
 
-#ifndef _ALLBSD_SOURCE
-  // returns kernel thread id (similar to LWP id on Solaris), which can be
-  // used to access /proc
-  static pid_t gettid();
-  static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
-  static Mutex* createThread_lock(void)                             { return _createThread_lock; }
-#endif
   static void hotspot_sigmask(Thread* thread);
 
-#ifndef _ALLBSD_SOURCE
-  static address   initial_thread_stack_bottom(void)                { return _initial_thread_stack_bottom; }
-  static uintptr_t initial_thread_stack_size(void)                  { return _initial_thread_stack_size; }
-#endif
   static bool is_initial_thread(void);
 
   static int page_size(void)                                        { return _page_size; }
@@ -161,23 +119,6 @@
   static struct sigaction *get_chained_signal_action(int sig);
   static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
 
-#ifndef _ALLBSD_SOURCE
-  // GNU libc and libpthread version strings
-  static const char *glibc_version()          { return _glibc_version; }
-  static const char *libpthread_version()     { return _libpthread_version; }
-
-  // NPTL or BsdThreads?
-  static bool is_BsdThreads()               { return !_is_NPTL; }
-  static bool is_NPTL()                       { return _is_NPTL;  }
-
-  // NPTL is always floating stack. BsdThreads could be using floating
-  // stack or fixed stack.
-  static bool is_floating_stack()             { return _is_floating_stack; }
-
-  static void libpthread_init();
-  static bool libnuma_init();
-  static void* libnuma_dlsym(void* handle, const char* name);
-#endif
   // Minimum stack size a thread can be created with (allowing
   // the VM to completely create the thread and enter user code)
   static size_t min_stack_allowed;
@@ -186,22 +127,9 @@
   static size_t default_stack_size(os::ThreadType thr_type);
   static size_t default_guard_size(os::ThreadType thr_type);
 
-#ifndef _ALLBSD_SOURCE
-  static void capture_initial_stack(size_t max_size);
-
-  // Stack overflow handling
-  static bool manually_expand_stack(JavaThread * t, address addr);
-  static int max_register_window_saves_before_flushing();
-#endif
-
   // Real-time clock functions
   static void clock_init(void);
 
-#ifndef _ALLBSD_SOURCE
-  // fast POSIX clocks support
-  static void fast_thread_clock_init(void);
-#endif
-
   static inline bool supports_monotonic_clock() {
     return _clock_gettime != NULL;
   }
@@ -210,18 +138,6 @@
     return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
   }
 
-#ifndef _ALLBSD_SOURCE
-  static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) {
-    return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1;
-  }
-
-  static bool supports_fast_thread_cpu_time() {
-    return _supports_fast_thread_cpu_time;
-  }
-
-  static jlong fast_thread_cpu_time(clockid_t clockid);
-#endif
-
   // Stack repair handling
 
   // none present
--- a/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -25,10 +25,6 @@
 #ifndef OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP
 #define OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP
 
-#ifndef _ALLBSD_SOURCE
-#include <byteswap.h>
-#endif
-
 #ifdef __APPLE__
 #include <libkern/OSByteOrder.h>
 #endif
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -76,7 +76,7 @@
 # include <ucontext.h>
 #endif
 
-#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
+#if !defined(__APPLE__) && !defined(__NetBSD__)
 # include <pthread_np.h>
 #endif
 
@@ -489,23 +489,6 @@
           // to handle_unexpected_exception way down below.
           thread->disable_stack_red_zone();
           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
-#ifndef _ALLBSD_SOURCE
-        } else {
-          // Accessing stack address below sp may cause SEGV if current
-          // thread has MAP_GROWSDOWN stack. This should only happen when
-          // current thread was created by user code with MAP_GROWSDOWN flag
-          // and then attached to VM. See notes in os_bsd.cpp.
-          if (thread->osthread()->expanding_stack() == 0) {
-             thread->osthread()->set_expanding_stack();
-             if (os::Bsd::manually_expand_stack(thread, addr)) {
-               thread->osthread()->clear_expanding_stack();
-               return 1;
-             }
-             thread->osthread()->clear_expanding_stack();
-          } else {
-             fatal("recursive segv. expanding stack.");
-          }
-#endif
         }
       }
     }
@@ -744,61 +727,21 @@
   ShouldNotReachHere();
 }
 
-#ifdef _ALLBSD_SOURCE
 // From solaris_i486.s ported to bsd_i486.s
 extern "C" void fixcw();
-#endif
 
 void os::Bsd::init_thread_fpu_state(void) {
 #ifndef AMD64
-# ifdef _ALLBSD_SOURCE
   // Set fpu to 53 bit precision. This happens too early to use a stub.
   fixcw();
-# else
-  // set fpu to 53 bit precision
-  set_fpu_control_word(0x27f);
-# endif
 #endif // !AMD64
 }
 
-#ifndef _ALLBSD_SOURCE
-int os::Bsd::get_fpu_control_word(void) {
-#ifdef AMD64
-  return 0;
-#else
-  int fpu_control;
-  _FPU_GETCW(fpu_control);
-  return fpu_control & 0xffff;
-#endif // AMD64
-}
-
-void os::Bsd::set_fpu_control_word(int fpu_control) {
-#ifndef AMD64
-  _FPU_SETCW(fpu_control);
-#endif // !AMD64
-}
-#endif
 
 // Check that the bsd kernel version is 2.4 or higher since earlier
 // versions do not support SSE without patches.
 bool os::supports_sse() {
-#if defined(AMD64) || defined(_ALLBSD_SOURCE)
   return true;
-#else
-  struct utsname uts;
-  if( uname(&uts) != 0 ) return false; // uname fails?
-  char *minor_string;
-  int major = strtol(uts.release,&minor_string,10);
-  int minor = strtol(minor_string+1,NULL,10);
-  bool result = (major > 2 || (major==2 && minor >= 4));
-#ifndef PRODUCT
-  if (PrintMiscellaneous && Verbose) {
-    tty->print("OS version is %d.%d, which %s support SSE/SSE2\n",
-               major,minor, result ? "DOES" : "does NOT");
-  }
-#endif
-  return result;
-#endif // AMD64
 }
 
 bool os::is_allocatable(size_t bytes) {
@@ -836,46 +779,7 @@
 #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
 #endif
 
-#ifdef _ALLBSD_SOURCE
 bool os::Bsd::supports_variable_stack_size() { return true; }
-#else
-// Test if pthread library can support variable thread stack size. BsdThreads
-// in fixed stack mode allocates 2M fixed slot for each thread. BsdThreads
-// in floating stack mode and NPTL support variable stack size.
-bool os::Bsd::supports_variable_stack_size() {
-  if (os::Bsd::is_NPTL()) {
-     // NPTL, yes
-     return true;
-
-  } else {
-    // Note: We can't control default stack size when creating a thread.
-    // If we use non-default stack size (pthread_attr_setstacksize), both
-    // floating stack and non-floating stack BsdThreads will return the
-    // same value. This makes it impossible to implement this function by
-    // detecting thread stack size directly.
-    //
-    // An alternative approach is to check %gs. Fixed-stack BsdThreads
-    // do not use %gs, so its value is 0. Floating-stack BsdThreads use
-    // %gs (either as LDT selector or GDT selector, depending on kernel)
-    // to access thread specific data.
-    //
-    // Note that %gs is a reserved glibc register since early 2001, so
-    // applications are not allowed to change its value (Ulrich Drepper from
-    // Redhat confirmed that all known offenders have been modified to use
-    // either %fs or TSD). In the worst case scenario, when VM is embedded in
-    // a native application that plays with %gs, we might see non-zero %gs
-    // even BsdThreads is running in fixed stack mode. As the result, we'll
-    // return true and skip _thread_safety_check(), so we may not be able to
-    // detect stack-heap collisions. But otherwise it's harmless.
-    //
-#ifdef __GNUC__
-    return (GET_GS() != 0);
-#else
-    return false;
-#endif
-  }
-}
-#endif
 #endif // AMD64
 
 // return default stack size for thr_type
@@ -943,7 +847,7 @@
 
   *bottom = (address)((char *)ss.ss_sp - ss.ss_size);
   *size   = ss.ss_size;
-#elif defined(_ALLBSD_SOURCE)
+#else
   pthread_attr_t attr;
 
   int rslt = pthread_attr_init(&attr);
@@ -963,33 +867,6 @@
   }
 
   pthread_attr_destroy(&attr);
-#else
-  if (os::Bsd::is_initial_thread()) {
-     // initial thread needs special handling because pthread_getattr_np()
-     // may return bogus value.
-     *bottom = os::Bsd::initial_thread_stack_bottom();
-     *size   = os::Bsd::initial_thread_stack_size();
-  } else {
-     pthread_attr_t attr;
-
-     int rslt = pthread_getattr_np(pthread_self(), &attr);
-
-     // JVM needs to know exact stack location, abort if it fails
-     if (rslt != 0) {
-       if (rslt == ENOMEM) {
-         vm_exit_out_of_memory(0, "pthread_getattr_np");
-       } else {
-         fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
-       }
-     }
-
-     if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
-         fatal("Can not locate current stack attributes!");
-     }
-
-     pthread_attr_destroy(&attr);
-
-  }
 #endif
   assert(os::current_stack_pointer() >= *bottom &&
          os::current_stack_pointer() < *bottom + *size, "just checking");
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -23,7 +23,7 @@
  *
  */
 
-#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
+#if !defined(__APPLE__) && !defined(__NetBSD__)
 #include <pthread.h>
 # include <pthread_np.h> /* For pthread_attr_get_np */
 #endif
@@ -178,26 +178,6 @@
           thread->disable_stack_red_zone();
           ShouldNotCallThis();
         }
-#ifndef _ALLBSD_SOURCE
-        else {
-          // Accessing stack address below sp may cause SEGV if
-          // current thread has MAP_GROWSDOWN stack. This should
-          // only happen when current thread was created by user
-          // code with MAP_GROWSDOWN flag and then attached to VM.
-          // See notes in os_bsd.cpp.
-          if (thread->osthread()->expanding_stack() == 0) {
-            thread->osthread()->set_expanding_stack();
-            if (os::Bsd::manually_expand_stack(thread, addr)) {
-              thread->osthread()->clear_expanding_stack();
-              return true;
-            }
-            thread->osthread()->clear_expanding_stack();
-          }
-          else {
-            fatal("recursive segv. expanding stack.");
-          }
-        }
-#endif
       }
     }
 
@@ -266,16 +246,6 @@
   // Nothing to do
 }
 
-#ifndef _ALLBSD_SOURCE
-int os::Bsd::get_fpu_control_word() {
-  ShouldNotCallThis();
-}
-
-void os::Bsd::set_fpu_control_word(int fpu) {
-  ShouldNotCallThis();
-}
-#endif
-
 bool os::is_allocatable(size_t bytes) {
 #ifdef _LP64
   return true;
@@ -339,7 +309,7 @@
   stack_top = (address) ss.ss_sp;
   stack_bytes  = ss.ss_size;
   stack_bottom = stack_top - stack_bytes;
-#elif defined(_ALLBSD_SOURCE)
+#else
   pthread_attr_t attr;
 
   int rslt = pthread_attr_init(&attr);
@@ -362,67 +332,6 @@
   pthread_attr_destroy(&attr);
 
   stack_top = stack_bottom + stack_bytes;
-#else /* Linux */
-  pthread_attr_t attr;
-  int res = pthread_getattr_np(pthread_self(), &attr);
-  if (res != 0) {
-    if (res == ENOMEM) {
-      vm_exit_out_of_memory(0, "pthread_getattr_np");
-    }
-    else {
-      fatal(err_msg("pthread_getattr_np failed with errno = " INT32_FORMAT,
-            res));
-    }
-  }
-
-  res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
-  if (res != 0) {
-    fatal(err_msg("pthread_attr_getstack failed with errno = " INT32_FORMAT,
-          res));
-  }
-  stack_top = stack_bottom + stack_bytes;
-
-  // The block of memory returned by pthread_attr_getstack() includes
-  // guard pages where present.  We need to trim these off.
-  size_t page_bytes = os::Bsd::page_size();
-  assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
-
-  size_t guard_bytes;
-  res = pthread_attr_getguardsize(&attr, &guard_bytes);
-  if (res != 0) {
-    fatal(err_msg(
-        "pthread_attr_getguardsize failed with errno = " INT32_FORMAT, res));
-  }
-  int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
-  assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
-
-#ifdef IA64
-  // IA64 has two stacks sharing the same area of memory, a normal
-  // stack growing downwards and a register stack growing upwards.
-  // Guard pages, if present, are in the centre.  This code splits
-  // the stack in two even without guard pages, though in theory
-  // there's nothing to stop us allocating more to the normal stack
-  // or more to the register stack if one or the other were found
-  // to grow faster.
-  int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
-  stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
-#endif // IA64
-
-  stack_bottom += guard_bytes;
-
-  pthread_attr_destroy(&attr);
-
-  // The initial thread has a growable stack, and the size reported
-  // by pthread_attr_getstack is the maximum size it could possibly
-  // be given what currently mapped.  This can be huge, so we cap it.
-  if (os::Bsd::is_initial_thread()) {
-    stack_bytes = stack_top - stack_bottom;
-
-    if (stack_bytes > JavaThread::stack_size_at_create())
-      stack_bytes = JavaThread::stack_size_at_create();
-
-    stack_bottom = stack_top - stack_bytes;
-  }
 #endif
 
   assert(os::current_stack_pointer() >= stack_bottom, "should do");
--- a/src/share/tools/hsdis/hsdis-demo.c	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/tools/hsdis/hsdis-demo.c	Mon Nov 05 13:55:31 2012 -0800
@@ -85,9 +85,11 @@
 
 #include "dlfcn.h"
 
-#define DECODE_INSTRUCTIONS_NAME "decode_instructions_virtual"
+#define DECODE_INSTRUCTIONS_VIRTUAL_NAME "decode_instructions_virtual"
+#define DECODE_INSTRUCTIONS_NAME "decode_instructions"
 #define HSDIS_NAME               "hsdis"
 static void* decode_instructions_pv = 0;
+static void* decode_instructions_sv = 0;
 static const char* hsdis_path[] = {
   HSDIS_NAME"-"LIBARCH LIB_EXT,
   "./" HSDIS_NAME"-"LIBARCH LIB_EXT,
@@ -101,11 +103,12 @@
   void* dllib = NULL;
   const char* *next_in_path = hsdis_path;
   while (1) {
-    decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME);
-    if (decode_instructions_pv != NULL)
+    decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_VIRTUAL_NAME);
+    decode_instructions_sv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME);
+    if (decode_instructions_pv != NULL || decode_instructions_sv != NULL)
       return NULL;
     if (dllib != NULL)
-      return "plugin does not defined "DECODE_INSTRUCTIONS_NAME;
+      return "plugin does not defined "DECODE_INSTRUCTIONS_VIRTUAL_NAME" and "DECODE_INSTRUCTIONS_NAME;
     for (dllib = NULL; dllib == NULL; ) {
       const char* next_lib = (*next_in_path++);
       if (next_lib == NULL)
@@ -213,20 +216,44 @@
     printf("%s: %s\n", err, dlerror());
     exit(1);
   }
-  printf("Decoding from %p to %p...\n", from, to);
-  decode_instructions_ftype decode_instructions
-    = (decode_instructions_ftype) decode_instructions_pv;
+  decode_func_vtype decode_instructions_v
+    = (decode_func_vtype) decode_instructions_pv;
+  decode_func_stype decode_instructions_s
+    = (decode_func_stype) decode_instructions_sv;
   void* res;
-  if (raw && xml) {
-    res = (*decode_instructions)(from, to, (unsigned char*)from, to - from, simple_handle_event, stdout, NULL, stdout, options);
-  } else if (raw) {
-    res = (*decode_instructions)(from, to, (unsigned char*)from, to - from, simple_handle_event, stdout, NULL, stdout, options);
-  } else {
-    res = (*decode_instructions)(from, to, (unsigned char*)from, to - from,
-                                 handle_event, (void*) event_cookie,
-                                 fprintf_callback, stdout,
-                                 options);
+  if (decode_instructions_pv != NULL) {
+    printf("\nDecoding from %p to %p...with %s\n", from, to, DECODE_INSTRUCTIONS_VIRTUAL_NAME);
+    if (raw) {
+      res = (*decode_instructions_v)(from, to,
+                                     (unsigned char*)from, to - from,
+                                     simple_handle_event, stdout,
+                                     NULL, stdout,
+                                     options, 0);
+    } else {
+      res = (*decode_instructions_v)(from, to,
+                                    (unsigned char*)from, to - from,
+                                     handle_event, (void*) event_cookie,
+                                     fprintf_callback, stdout,
+                                     options, 0);
+    }
+    if (res != (void*)to)
+      printf("*** Result was %p!\n", res);
   }
-  if (res != (void*)to)
-    printf("*** Result was %p!\n", res);
+  void* sres;
+  if (decode_instructions_sv != NULL) {
+    printf("\nDecoding from %p to %p...with old decode_instructions\n", from, to, DECODE_INSTRUCTIONS_NAME);
+    if (raw) {
+      sres = (*decode_instructions_s)(from, to,
+                                      simple_handle_event, stdout,
+                                      NULL, stdout,
+                                      options);
+    } else {
+      sres = (*decode_instructions_s)(from, to,
+                                      handle_event, (void*) event_cookie,
+                                      fprintf_callback, stdout,
+                                      options);
+    }
+    if (sres != (void *)to)
+      printf("*** Result of decode_instructions %p!\n", sres);
+  }
 }
--- a/src/share/tools/hsdis/hsdis.c	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/tools/hsdis/hsdis.c	Mon Nov 05 13:55:31 2012 -0800
@@ -99,7 +99,7 @@
                             unsigned char* buffer, uintptr_t length,
                             event_callback_t  event_callback_arg,  void* event_stream_arg,
                             printf_callback_t printf_callback_arg, void* printf_stream_arg,
-                            const char* options) {
+                            const char* options, int newline) {
   struct hsdis_app_data app_data;
   memset(&app_data, 0, sizeof(app_data));
   app_data.start_va    = start_va;
@@ -110,7 +110,7 @@
   app_data.event_stream    = event_stream_arg;
   app_data.printf_callback = printf_callback_arg;
   app_data.printf_stream   = printf_stream_arg;
-  app_data.do_newline = false;
+  app_data.do_newline = newline == 0 ? false : true;
 
   return decode(&app_data, options);
 }
@@ -132,7 +132,7 @@
                              event_stream_arg,
                              printf_callback_arg,
                              printf_stream_arg,
-                             options);
+                             options, false);
 }
 
 static void* decode(struct hsdis_app_data* app_data, const char* options) {
@@ -173,7 +173,7 @@
       if (!app_data->losing) {
         const char* insn_close = format_insn_close("/insn", &app_data->dinfo,
                                                    buf, sizeof(buf));
-        (*event_callback)(event_stream, insn_close, (void*) p) != NULL;
+        (*event_callback)(event_stream, insn_close, (void*) p);
 
         if (app_data->do_newline) {
           /* follow each complete insn by a nice newline */
@@ -182,13 +182,14 @@
       }
     }
 
-    (*event_callback)(event_stream, "/insns", (void*) p);
+    if (app_data->losing) (*event_callback)(event_stream, "/insns", (void*) p);
     return (void*) p;
   }
 }
 
 /* take the address of the function, for luck, and also test the typedef: */
-const decode_instructions_ftype decode_instructions_address = &decode_instructions_virtual;
+const decode_func_vtype decode_func_virtual_address = &decode_instructions_virtual;
+const decode_func_stype decode_func_address = &decode_instructions;
 
 static const char* format_insn_close(const char* close,
                                      disassemble_info* dinfo,
--- a/src/share/tools/hsdis/hsdis.h	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/tools/hsdis/hsdis.h	Mon Nov 05 13:55:31 2012 -0800
@@ -47,6 +47,9 @@
    where tag is a simple identifier, signifying (as in XML) a element start,
    element end, and standalone element.  (To render as XML, add angle brackets.)
 */
+#ifndef SHARED_TOOLS_HSDIS_H
+#define SHARED_TOOLS_HSDIS_H
+
 extern
 #ifdef DLL_EXPORT
   DLL_EXPORT
@@ -57,16 +60,37 @@
                                   void* event_stream,
                                   int (*printf_callback)(void*, const char*, ...),
                                   void* printf_stream,
-                                  const char* options);
+                                  const char* options,
+                                  int newline /* bool value for nice new line */);
+
+/* This is the compatability interface for older versions of hotspot */
+extern
+#ifdef DLL_ENTRY
+  DLL_ENTRY
+#endif
+void* decode_instructions(void* start_pv, void* end_pv,
+                    void* (*event_callback)(void*, const char*, void*),
+                    void* event_stream,
+                    int   (*printf_callback)(void*, const char*, ...),
+                    void* printf_stream,
+                    const char* options);
 
 /* convenience typedefs */
 
 typedef void* (*decode_instructions_event_callback_ftype)  (void*, const char*, void*);
 typedef int   (*decode_instructions_printf_callback_ftype) (void*, const char*, ...);
-typedef void* (*decode_instructions_ftype) (uintptr_t start_va, uintptr_t end_va,
-                                            unsigned char* buffer, uintptr_t length,
-                                            decode_instructions_event_callback_ftype event_callback,
-                                            void* event_stream,
-                                            decode_instructions_printf_callback_ftype printf_callback,
-                                            void* printf_stream,
-                                            const char* options);
+typedef void* (*decode_func_vtype) (uintptr_t start_va, uintptr_t end_va,
+                                    unsigned char* buffer, uintptr_t length,
+                                    decode_instructions_event_callback_ftype event_callback,
+                                    void* event_stream,
+                                    decode_instructions_printf_callback_ftype printf_callback,
+                                    void* printf_stream,
+                                    const char* options,
+                                    int newline);
+typedef void* (*decode_func_stype) (void* start_pv, void* end_pv,
+                                    decode_instructions_event_callback_ftype event_callback,
+                                    void* event_stream,
+                                    decode_instructions_printf_callback_ftype printf_callback,
+                                    void* printf_stream,
+                                    const char* options);
+#endif /* SHARED_TOOLS_HSDIS_H */
--- a/src/share/vm/asm/codeBuffer.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/asm/codeBuffer.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -758,7 +758,7 @@
     }
   }
 
-  if (dest->blob() == NULL) {
+  if (dest->blob() == NULL && dest_filled != NULL) {
     // Destination is a final resting place, not just another buffer.
     // Normalize uninitialized bytes in the final padding.
     Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -1844,17 +1844,12 @@
         code == Bytecodes::_invokevirtual && target->is_final_method() ||
         code == Bytecodes::_invokedynamic) {
       ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
-      bool success = false;
-      if (target->is_method_handle_intrinsic()) {
-        // method handle invokes
-        success = try_method_handle_inline(target);
-      } else {
-        // static binding => check if callee is ok
-        success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
-      }
+      // static binding => check if callee is ok
+      bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
+
       CHECK_BAILOUT();
-
       clear_inline_bailout();
+
       if (success) {
         // Register dependence if JVMTI has either breakpoint
         // setting or hotswapping of methods capabilities since they may
@@ -3201,6 +3196,11 @@
     return false;
   }
 
+  // method handle invokes
+  if (callee->is_method_handle_intrinsic()) {
+    return try_method_handle_inline(callee);
+  }
+
   // handle intrinsics
   if (callee->intrinsic_id() != vmIntrinsics::_none) {
     if (try_inline_intrinsics(callee)) {
@@ -3885,10 +3885,14 @@
       ValueType* type = state()->stack_at(args_base)->type();
       if (type->is_constant()) {
         ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget();
-        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
-        Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
-        if (try_inline(target, /*holder_known*/ true, bc)) {
-          return true;
+        // We don't do CHA here so only inline static and statically bindable methods.
+        if (target->is_static() || target->can_be_statically_bound()) {
+          Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
+          if (try_inline(target, /*holder_known*/ true, bc)) {
+            return true;
+          }
+        } else {
+          print_inlining(target, "not static or statically bindable", /*success*/ false);
         }
       } else {
         print_inlining(callee, "receiver not constant", /*success*/ false);
@@ -3941,9 +3945,14 @@
             }
             j += t->size();  // long and double take two slots
           }
-          Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
-          if (try_inline(target, /*holder_known*/ true, bc)) {
-            return true;
+          // We don't do CHA here so only inline static and statically bindable methods.
+          if (target->is_static() || target->can_be_statically_bound()) {
+            Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
+            if (try_inline(target, /*holder_known*/ true, bc)) {
+              return true;
+            }
+          } else {
+            print_inlining(target, "not static or statically bindable", /*success*/ false);
           }
         }
       } else {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/bytecodeAssembler.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/bytecodeAssembler.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/constantPool.hpp"
+
+#ifdef TARGET_ARCH_x86
+# include "bytes_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "bytes_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "bytes_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "bytes_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "bytes_ppc.hpp"
+#endif
+
+u2 BytecodeConstantPool::find_or_add(BytecodeCPEntry const& bcpe) {
+  u2 index;
+  u2* probe = _indices.get(bcpe);
+  if (probe == NULL) {
+    index = _entries.length();
+    _entries.append(bcpe);
+    _indices.put(bcpe, index);
+  } else {
+    index = *probe;
+  }
+  return index + _orig->length();
+}
+
+ConstantPool* BytecodeConstantPool::create_constant_pool(TRAPS) const {
+  if (_entries.length() == 0) {
+    return _orig;
+  }
+
+  ConstantPool* cp = ConstantPool::allocate(
+      _orig->pool_holder()->class_loader_data(),
+      _orig->length() + _entries.length(), CHECK_NULL);
+
+  cp->set_pool_holder(_orig->pool_holder());
+  _orig->copy_cp_to(1, _orig->length() - 1, cp, 1, CHECK_NULL);
+
+  for (int i = 0; i < _entries.length(); ++i) {
+    BytecodeCPEntry entry = _entries.at(i);
+    int idx = i + _orig->length();
+    switch (entry._tag) {
+      case BytecodeCPEntry::UTF8:
+        cp->symbol_at_put(idx, entry._u.utf8);
+        entry._u.utf8->increment_refcount();
+        break;
+      case BytecodeCPEntry::KLASS:
+        cp->unresolved_klass_at_put(
+            idx, cp->symbol_at(entry._u.klass));
+        break;
+      case BytecodeCPEntry::STRING:
+        cp->unresolved_string_at_put(
+            idx, cp->symbol_at(entry._u.string));
+        break;
+      case BytecodeCPEntry::NAME_AND_TYPE:
+        cp->name_and_type_at_put(idx,
+            entry._u.name_and_type.name_index,
+            entry._u.name_and_type.type_index);
+        break;
+      case BytecodeCPEntry::METHODREF:
+        cp->method_at_put(idx,
+            entry._u.methodref.class_index,
+            entry._u.methodref.name_and_type_index);
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  }
+  return cp;
+}
+
+void BytecodeAssembler::append(u1 imm_u1) {
+  _code->append(imm_u1);
+}
+
+void BytecodeAssembler::append(u2 imm_u2) {
+  _code->append(0);
+  _code->append(0);
+  Bytes::put_Java_u2(_code->adr_at(_code->length() - 2), imm_u2);
+}
+
+void BytecodeAssembler::append(u4 imm_u4) {
+  _code->append(0);
+  _code->append(0);
+  _code->append(0);
+  _code->append(0);
+  Bytes::put_Java_u4(_code->adr_at(_code->length() - 4), imm_u4);
+}
+
+void BytecodeAssembler::xload(u4 index, u1 onebyteop, u1 twobyteop) {
+  if (index < 4) {
+    _code->append(onebyteop + index);
+  } else {
+    _code->append(twobyteop);
+    _code->append((u2)index);
+  }
+}
+
+void BytecodeAssembler::dup() {
+  _code->append(Bytecodes::_dup);
+}
+
+void BytecodeAssembler::_new(Symbol* sym) {
+  u2 cpool_index = _cp->klass(sym);
+  _code->append(Bytecodes::_new);
+  append(cpool_index);
+}
+
+void BytecodeAssembler::load_string(Symbol* sym) {
+  u2 cpool_index = _cp->string(sym);
+  if (cpool_index < 0x100) {
+    ldc(cpool_index);
+  } else {
+    ldc_w(cpool_index);
+  }
+}
+
+void BytecodeAssembler::ldc(u1 index) {
+  _code->append(Bytecodes::_ldc);
+  append(index);
+}
+
+void BytecodeAssembler::ldc_w(u2 index) {
+  _code->append(Bytecodes::_ldc_w);
+  append(index);
+}
+
+void BytecodeAssembler::athrow() {
+  _code->append(Bytecodes::_athrow);
+}
+
+void BytecodeAssembler::iload(u4 index) {
+  xload(index, Bytecodes::_iload_0, Bytecodes::_iload);
+}
+
+void BytecodeAssembler::lload(u4 index) {
+  xload(index, Bytecodes::_lload_0, Bytecodes::_lload);
+}
+
+void BytecodeAssembler::fload(u4 index) {
+  xload(index, Bytecodes::_fload_0, Bytecodes::_fload);
+}
+
+void BytecodeAssembler::dload(u4 index) {
+  xload(index, Bytecodes::_dload_0, Bytecodes::_dload);
+}
+
+void BytecodeAssembler::aload(u4 index) {
+  xload(index, Bytecodes::_aload_0, Bytecodes::_aload);
+}
+
+void BytecodeAssembler::load(BasicType bt, u4 index) {
+  switch (bt) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:     iload(index); break;
+    case T_FLOAT:   fload(index); break;
+    case T_DOUBLE:  dload(index); break;
+    case T_LONG:    lload(index); break;
+    case T_OBJECT:
+    case T_ARRAY:   aload(index); break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+void BytecodeAssembler::checkcast(Symbol* sym) {
+  u2 cpool_index = _cp->klass(sym);
+  _code->append(Bytecodes::_checkcast);
+  append(cpool_index);
+}
+
+void BytecodeAssembler::invokespecial(Method* method) {
+  invokespecial(method->klass_name(), method->name(), method->signature());
+}
+
+void BytecodeAssembler::invokespecial(Symbol* klss, Symbol* name, Symbol* sig) {
+  u2 methodref_index = _cp->methodref(klss, name, sig);
+  _code->append(Bytecodes::_invokespecial);
+  append(methodref_index);
+}
+
+void BytecodeAssembler::invokevirtual(Method* method) {
+  invokevirtual(method->klass_name(), method->name(), method->signature());
+}
+
+void BytecodeAssembler::invokevirtual(Symbol* klss, Symbol* name, Symbol* sig) {
+  u2 methodref_index = _cp->methodref(klss, name, sig);
+  _code->append(Bytecodes::_invokevirtual);
+  append(methodref_index);
+}
+
+void BytecodeAssembler::ireturn() {
+  _code->append(Bytecodes::_ireturn);
+}
+
+void BytecodeAssembler::lreturn() {
+  _code->append(Bytecodes::_lreturn);
+}
+
+void BytecodeAssembler::freturn() {
+  _code->append(Bytecodes::_freturn);
+}
+
+void BytecodeAssembler::dreturn() {
+  _code->append(Bytecodes::_dreturn);
+}
+
+void BytecodeAssembler::areturn() {
+  _code->append(Bytecodes::_areturn);
+}
+
+void BytecodeAssembler::_return() {
+  _code->append(Bytecodes::_return);
+}
+
+void BytecodeAssembler::_return(BasicType bt) {
+  switch (bt) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:     ireturn(); break;
+    case T_FLOAT:   freturn(); break;
+    case T_DOUBLE:  dreturn(); break;
+    case T_LONG:    lreturn(); break;
+    case T_OBJECT:
+    case T_ARRAY:   areturn(); break;
+    case T_VOID:    _return(); break;
+    default:
+      ShouldNotReachHere();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/bytecodeAssembler.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_BYTECODEASSEMBLER_HPP
+#define SHARE_VM_CLASSFILE_BYTECODEASSEMBLER_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/method.hpp"
+#include "oops/symbol.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/resourceHash.hpp"
+
+
+/**
+ * Bytecode Assembler
+ *
+ * These classes are used to synthesize code for creating new methods from
+ * within the VM.  This is only a partial implementation of an assembler;
+ * only the bytecodes that are needed by clients are implemented at this time.
+ * This is used during default method analysis to create overpass methods
+ * and add them to a call during parsing.  Other uses (such as creating
+ * bridges) may come later.  Any missing bytecodes can be implemented on an
+ * as-need basis.
+ */
+
+class BytecodeBuffer : public GrowableArray<u1> {
+ public:
+  BytecodeBuffer() : GrowableArray<u1>(20) {}
+};
+
+// Entries in a yet-to-be-created constant pool.  Limited types for now.
+class BytecodeCPEntry VALUE_OBJ_CLASS_SPEC {
+ public:
+  enum tag {
+    ERROR_TAG,
+    UTF8,
+    KLASS,
+    STRING,
+    NAME_AND_TYPE,
+    METHODREF
+  };
+
+  u1 _tag;
+  union {
+    Symbol* utf8;
+    u2 klass;
+    u2 string;
+    struct {
+      u2 name_index;
+      u2 type_index;
+    } name_and_type;
+    struct {
+      u2 class_index;
+      u2 name_and_type_index;
+    } methodref;
+    uintptr_t hash;
+  } _u;
+
+  BytecodeCPEntry() : _tag(ERROR_TAG) { _u.hash = 0; }
+  BytecodeCPEntry(u1 tag) : _tag(tag) { _u.hash = 0; }
+
+  static BytecodeCPEntry utf8(Symbol* symbol) {
+    BytecodeCPEntry bcpe(UTF8);
+    bcpe._u.utf8 = symbol;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry klass(u2 index) {
+    BytecodeCPEntry bcpe(KLASS);
+    bcpe._u.klass = index;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry string(u2 index) {
+    BytecodeCPEntry bcpe(STRING);
+    bcpe._u.string = index;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry name_and_type(u2 name, u2 type) {
+    BytecodeCPEntry bcpe(NAME_AND_TYPE);
+    bcpe._u.name_and_type.name_index = name;
+    bcpe._u.name_and_type.type_index = type;
+    return bcpe;
+  }
+
+  static BytecodeCPEntry methodref(u2 class_index, u2 nat) {
+    BytecodeCPEntry bcpe(METHODREF);
+    bcpe._u.methodref.class_index = class_index;
+    bcpe._u.methodref.name_and_type_index = nat;
+    return bcpe;
+  }
+
+  static bool equals(BytecodeCPEntry const& e0, BytecodeCPEntry const& e1) {
+    return e0._tag == e1._tag && e0._u.hash == e1._u.hash;
+  }
+
+  static unsigned hash(BytecodeCPEntry const& e0) {
+    return (unsigned)(e0._tag ^ e0._u.hash);
+  }
+};
+
+class BytecodeConstantPool : ResourceObj {
+ private:
+  typedef ResourceHashtable<BytecodeCPEntry, u2,
+      &BytecodeCPEntry::hash, &BytecodeCPEntry::equals> IndexHash;
+
+  ConstantPool* _orig;
+  GrowableArray<BytecodeCPEntry> _entries;
+  IndexHash _indices;
+
+  u2 find_or_add(BytecodeCPEntry const& bcpe);
+
+ public:
+
+  BytecodeConstantPool(ConstantPool* orig) : _orig(orig) {}
+
+  BytecodeCPEntry const& at(u2 index) const { return _entries.at(index); }
+
+  InstanceKlass* pool_holder() const {
+    return InstanceKlass::cast(_orig->pool_holder());
+  }
+
+  u2 utf8(Symbol* sym) {
+    return find_or_add(BytecodeCPEntry::utf8(sym));
+  }
+
+  u2 klass(Symbol* class_name) {
+    return find_or_add(BytecodeCPEntry::klass(utf8(class_name)));
+  }
+
+  u2 string(Symbol* str) {
+    return find_or_add(BytecodeCPEntry::string(utf8(str)));
+  }
+
+  u2 name_and_type(Symbol* name, Symbol* sig) {
+    return find_or_add(BytecodeCPEntry::name_and_type(utf8(name), utf8(sig)));
+  }
+
+  u2 methodref(Symbol* class_name, Symbol* name, Symbol* sig) {
+    return find_or_add(BytecodeCPEntry::methodref(
+        klass(class_name), name_and_type(name, sig)));
+  }
+
+  ConstantPool* create_constant_pool(TRAPS) const;
+};
+
+// Partial bytecode assembler - only what we need for creating
+// overpass methods for default methods is implemented
+class BytecodeAssembler : StackObj {
+ private:
+  BytecodeBuffer* _code;
+  BytecodeConstantPool* _cp;
+
+  void append(u1 imm_u1);
+  void append(u2 imm_u2);
+  void append(u4 imm_u4);
+
+  void xload(u4 index, u1 quick, u1 twobyte);
+
+ public:
+  BytecodeAssembler(BytecodeBuffer* buffer, BytecodeConstantPool* cp)
+    : _code(buffer), _cp(cp) {}
+
+  void aload(u4 index);
+  void areturn();
+  void athrow();
+  void checkcast(Symbol* sym);
+  void dload(u4 index);
+  void dreturn();
+  void dup();
+  void fload(u4 index);
+  void freturn();
+  void iload(u4 index);
+  void invokespecial(Method* method);
+  void invokespecial(Symbol* cls, Symbol* name, Symbol* sig);
+  void invokevirtual(Method* method);
+  void invokevirtual(Symbol* cls, Symbol* name, Symbol* sig);
+  void ireturn();
+  void ldc(u1 index);
+  void ldc_w(u2 index);
+  void lload(u4 index);
+  void lreturn();
+  void _new(Symbol* sym);
+  void _return();
+
+  void load_string(Symbol* sym);
+  void load(BasicType bt, u4 index);
+  void _return(BasicType bt);
+};
+
+#endif // SHARE_VM_CLASSFILE_BYTECODEASSEMBLER_HPP
--- a/src/share/vm/classfile/classFileParser.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/classfile/classFileParser.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -27,6 +27,8 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/classLoaderData.inline.hpp"
+#include "classfile/defaultMethods.hpp"
+#include "classfile/genericSignatures.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
@@ -84,6 +86,9 @@
 // - to check NameAndType_info signatures more aggressively
 #define JAVA_7_VERSION                    51
 
+// Extension method support.
+#define JAVA_8_VERSION                    52
+
 
 void ClassFileParser::parse_constant_pool_entries(ClassLoaderData* loader_data, constantPoolHandle cp, int length, TRAPS) {
   // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
@@ -785,6 +790,7 @@
                                                  ClassLoaderData* loader_data,
                                                  Handle protection_domain,
                                                  Symbol* class_name,
+                                                 bool* has_default_methods,
                                                  TRAPS) {
   ClassFileStream* cfs = stream();
   assert(length > 0, "only called for length>0");
@@ -821,6 +827,9 @@
     if (!Klass::cast(interf())->is_interface()) {
       THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", NULL);
     }
+    if (InstanceKlass::cast(interf())->has_default_methods()) {
+      *has_default_methods = true;
+    }
     interfaces->at_put(index, interf());
   }
 
@@ -1928,7 +1937,8 @@
     if (method_attribute_name == vmSymbols::tag_code()) {
       // Parse Code attribute
       if (_need_verify) {
-        guarantee_property(!access_flags.is_native() && !access_flags.is_abstract(),
+        guarantee_property(
+            !access_flags.is_native() && !access_flags.is_abstract(),
                         "Code attribute in native or abstract methods in class file %s",
                          CHECK_(nullHandle));
       }
@@ -2125,7 +2135,9 @@
         runtime_visible_annotations_length = method_attribute_length;
         runtime_visible_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_annotations != NULL, "null visible annotations");
-        parse_annotations(runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle));
+        parse_annotations(runtime_visible_annotations,
+            runtime_visible_annotations_length, cp, &parsed_annotations,
+            CHECK_(nullHandle));
         cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
       } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
         runtime_invisible_annotations_length = method_attribute_length;
@@ -2169,12 +2181,10 @@
   }
 
   // All sizing information for a Method* is finally available, now create it
-  Method* m = Method::allocate(loader_data, code_length, access_flags,
-                               linenumber_table_length,
-                               total_lvt_length,
-                               exception_table_length,
-                               checked_exceptions_length,
-                               CHECK_(nullHandle));
+  Method* m = Method::allocate(
+      loader_data, code_length, access_flags, linenumber_table_length,
+      total_lvt_length, exception_table_length, checked_exceptions_length,
+      ConstMethod::NORMAL, CHECK_(nullHandle));
 
   ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
 
@@ -2204,7 +2214,6 @@
   // Fill in code attribute information
   m->set_max_stack(max_stack);
   m->set_max_locals(max_locals);
-
   m->constMethod()->set_stackmap_data(stackmap_data);
 
   // Copy byte codes
@@ -2356,6 +2365,7 @@
                                                Array<AnnotationArray*>** methods_annotations,
                                                Array<AnnotationArray*>** methods_parameter_annotations,
                                                Array<AnnotationArray*>** methods_default_annotations,
+                                               bool* has_default_methods,
                                                TRAPS) {
   ClassFileStream* cfs = stream();
   AnnotationArray* method_annotations = NULL;
@@ -2382,6 +2392,10 @@
       if (method->is_final()) {
         *has_final_method = true;
       }
+      if (is_interface && !method->is_abstract() && !method->is_static()) {
+        // default method
+        *has_default_methods = true;
+      }
       methods->at_put(index, method());
       if (*methods_annotations == NULL) {
         *methods_annotations =
@@ -2907,6 +2921,34 @@
 }
 
 
+#ifndef PRODUCT
+static void parseAndPrintGenericSignatures(
+    instanceKlassHandle this_klass, TRAPS) {
+  assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
+  ResourceMark rm;
+
+  if (this_klass->generic_signature() != NULL) {
+    using namespace generic;
+    ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
+
+    tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
+    spec->print_on(tty);
+
+    for (int i = 0; i < this_klass->methods()->length(); ++i) {
+      Method* m = this_klass->methods()->at(i);
+      MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
+      Symbol* sig = m->generic_signature();
+      if (sig == NULL) {
+        sig = m->signature();
+      }
+      tty->print_cr("Parsing %s", sig->as_C_string());
+      method_spec->print_on(tty);
+    }
+  }
+}
+#endif // ndef PRODUCT
+
+
 instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
                                                     Handle class_loader,
                                                     Handle protection_domain,
@@ -2923,6 +2965,8 @@
   unsigned char *cached_class_file_bytes = NULL;
   jint cached_class_file_length;
   ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
+  bool has_default_methods = false;
+  ResourceMark rm(THREAD);
 
   ClassFileStream* cfs = stream();
   // Timing
@@ -3138,7 +3182,9 @@
     if (itfs_len == 0) {
       local_interfaces = Universe::the_empty_klass_array();
     } else {
-      local_interfaces = parse_interfaces(cp, itfs_len, loader_data, protection_domain, _class_name, CHECK_(nullHandle));
+      local_interfaces = parse_interfaces(
+          cp, itfs_len, loader_data, protection_domain, _class_name,
+          &has_default_methods, CHECK_(nullHandle));
     }
 
     u2 java_fields_count = 0;
@@ -3164,6 +3210,7 @@
                                             &methods_annotations,
                                             &methods_parameter_annotations,
                                             &methods_default_annotations,
+                                            &has_default_methods,
                                             CHECK_(nullHandle));
 
     // Additional attributes
@@ -3193,6 +3240,11 @@
       super_klass = instanceKlassHandle(THREAD, kh());
     }
     if (super_klass.not_null()) {
+
+      if (super_klass->has_default_methods()) {
+        has_default_methods = true;
+      }
+
       if (super_klass->is_interface()) {
         ResourceMark rm(THREAD);
         Exceptions::fthrow(
@@ -3229,14 +3281,11 @@
     int itable_size = 0;
     int num_miranda_methods = 0;
 
-    klassVtable::compute_vtable_size_and_num_mirandas(vtable_size,
-                                                      num_miranda_methods,
-                                                      super_klass(),
-                                                      methods,
-                                                      access_flags,
-                                                      class_loader,
-                                                      class_name,
-                                                      local_interfaces,
+    GrowableArray<Method*> all_mirandas(20);
+
+    klassVtable::compute_vtable_size_and_num_mirandas(
+        &vtable_size, &num_miranda_methods, &all_mirandas, super_klass(), methods,
+        access_flags, class_loader, class_name, local_interfaces,
                                                       CHECK_(nullHandle));
 
     // Size of Java itable (in words)
@@ -3656,6 +3705,7 @@
 
     this_klass->set_minor_version(minor_version);
     this_klass->set_major_version(major_version);
+    this_klass->set_has_default_methods(has_default_methods);
 
     // Set up Method*::intrinsic_id as soon as we know the names of methods.
     // (We used to do this lazily, but now we query it in Rewriter,
@@ -3673,6 +3723,16 @@
                                         cached_class_file_length);
     }
 
+    // Fill in field values obtained by parse_classfile_attributes
+    if (parsed_annotations.has_any_annotations())
+      parsed_annotations.apply_to(this_klass);
+    // Create annotations
+    if (_annotations != NULL && this_klass->annotations() == NULL) {
+      Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
+      this_klass->set_annotations(anno);
+    }
+    apply_parsed_class_attributes(this_klass);
+
     // Miranda methods
     if ((num_miranda_methods > 0) ||
         // if this class introduced new miranda methods or
@@ -3682,18 +3742,6 @@
       this_klass->set_has_miranda_methods(); // then set a flag
     }
 
-    // Fill in field values obtained by parse_classfile_attributes
-    if (parsed_annotations.has_any_annotations()) {
-      parsed_annotations.apply_to(this_klass);
-    }
-    // Create annotations
-    if (_annotations != NULL && this_klass->annotations() == NULL) {
-      Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
-      this_klass->set_annotations(anno);
-    }
-    apply_parsed_class_attributes(this_klass);
-
-    // Compute transitive closure of interfaces this class implements
     this_klass->set_transitive_interfaces(transitive_interfaces);
 
     // Fill in information needed to compute superclasses.
@@ -3702,6 +3750,7 @@
     // Initialize itable offset tables
     klassItable::setup_itable_offset_table(this_klass);
 
+    // Compute transitive closure of interfaces this class implements
     // Do final class setup
     fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts);
 
@@ -3726,6 +3775,21 @@
       check_illegal_static_method(this_klass, CHECK_(nullHandle));
     }
 
+
+#ifdef ASSERT
+    if (ParseAllGenericSignatures) {
+      parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
+    }
+#endif
+
+    // Generate any default methods - default methods are interface methods
+    // that have a default implementation.  This is new with Lambda project.
+    if (has_default_methods && !access_flags.is_interface() &&
+        local_interfaces->length() > 0) {
+      DefaultMethods::generate_default_methods(
+          this_klass(), &all_mirandas, CHECK_(nullHandle));
+    }
+
     // Allocate mirror and initialize static fields
     java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
 
@@ -3744,6 +3808,7 @@
                                              false /* not shared class */);
 
     if (TraceClassLoading) {
+      ResourceMark rm;
       // print in a single call to reduce interleaving of output
       if (cfs->source() != NULL) {
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
@@ -3758,13 +3823,13 @@
           tty->print("[Loaded %s]\n", this_klass->external_name());
         }
       } else {
-        ResourceMark rm;
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
                    InstanceKlass::cast(class_loader->klass())->external_name());
       }
     }
 
     if (TraceClassResolution) {
+      ResourceMark rm;
       // print out the superclass.
       const char * from = Klass::cast(this_klass())->external_name();
       if (this_klass->java_super() != NULL) {
@@ -3785,6 +3850,7 @@
 
 #ifndef PRODUCT
     if( PrintCompactFieldsSavings ) {
+      ResourceMark rm;
       if( nonstatic_field_size < orig_nonstatic_field_size ) {
         tty->print("[Saved %d of %d bytes in %s]\n",
                  (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize,
@@ -3811,7 +3877,6 @@
   return this_klass;
 }
 
-
 unsigned int
 ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
                                        unsigned int nonstatic_oop_map_count,
@@ -4263,13 +4328,16 @@
   const bool is_strict       = (flags & JVM_ACC_STRICT)       != 0;
   const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0;
   const bool major_gte_15    = _major_version >= JAVA_1_5_VERSION;
+  const bool major_gte_8     = _major_version >= JAVA_8_VERSION;
   const bool is_initializer  = (name == vmSymbols::object_initializer_name());
 
   bool is_illegal = false;
 
   if (is_interface) {
-    if (!is_abstract || !is_public || is_static || is_final ||
-        is_native || (major_gte_15 && (is_synchronized || is_strict))) {
+    if (!is_public || is_static || is_final || is_native ||
+        ((is_synchronized || is_strict) && major_gte_15 &&
+            (!major_gte_8 || is_abstract)) ||
+        (!major_gte_8 && !is_abstract)) {
       is_illegal = true;
     }
   } else { // not interface
--- a/src/share/vm/classfile/classFileParser.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/classfile/classFileParser.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -151,6 +151,7 @@
                                   ClassLoaderData* loader_data,
                                   Handle protection_domain,
                                   Symbol* class_name,
+                                  bool* has_default_methods,
                                   TRAPS);
   void record_defined_class_dependencies(instanceKlassHandle defined_klass, TRAPS);
 
@@ -188,6 +189,7 @@
                                 Array<AnnotationArray*>** methods_annotations,
                                 Array<AnnotationArray*>** methods_parameter_annotations,
                                 Array<AnnotationArray*>** methods_default_annotations,
+                                bool* has_default_method,
                                 TRAPS);
   Array<int>* sort_methods(ClassLoaderData* loader_data,
                            Array<Method*>* methods,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/defaultMethods.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,1387 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/bytecodeAssembler.hpp"
+#include "classfile/defaultMethods.hpp"
+#include "classfile/genericSignatures.hpp"
+#include "classfile/symbolTable.hpp"
+#include "memory/allocation.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/thread.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/klass.hpp"
+#include "oops/method.hpp"
+#include "utilities/accessFlags.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/ostream.hpp"
+#include "utilities/pair.hpp"
+#include "utilities/resourceHash.hpp"
+
+typedef enum { QUALIFIED, DISQUALIFIED } QualifiedState;
+
+// Because we use an iterative algorithm when iterating over the type
+// hierarchy, we can't use traditional scoped objects which automatically do
+// cleanup in the destructor when the scope is exited.  PseudoScope (and
+// PseudoScopeMark) provides a similar functionality, but for when you want a
+// scoped object in non-stack memory (such as in resource memory, as we do
+// here).  You've just got to remember to call 'destroy()' on the scope when
+// leaving it (and marks have to be explicitly added).
+class PseudoScopeMark : public ResourceObj {
+ public:
+  virtual void destroy() = 0;
+};
+
+class PseudoScope : public ResourceObj {
+ private:
+  GrowableArray<PseudoScopeMark*> _marks;
+ public:
+
+  static PseudoScope* cast(void* data) {
+    return static_cast<PseudoScope*>(data);
+  }
+
+  void add_mark(PseudoScopeMark* psm) {
+   _marks.append(psm);
+  }
+
+  void destroy() {
+    for (int i = 0; i < _marks.length(); ++i) {
+      _marks.at(i)->destroy();
+    }
+  }
+};
+
+class ContextMark : public PseudoScopeMark {
+ private:
+  generic::Context::Mark _mark;
+ public:
+  ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
+  virtual void destroy() { _mark.destroy(); }
+};
+
+#ifndef PRODUCT
+static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
+  ResourceMark rm;
+  str->print("%s%s", name->as_C_string(), signature->as_C_string());
+}
+
+static void print_method(outputStream* str, Method* mo, bool with_class=true) {
+  ResourceMark rm;
+  if (with_class) {
+    str->print("%s.", mo->klass_name()->as_C_string());
+  }
+  print_slot(str, mo->name(), mo->signature());
+}
+#endif // ndef PRODUCT
+
+/**
+ * Perform a depth-first iteration over the class hierarchy, applying
+ * algorithmic logic as it goes.
+ *
+ * This class is one half of the inheritance hierarchy analysis mechanism.
+ * It is meant to be used in conjunction with another class, the algorithm,
+ * which is indicated by the ALGO template parameter.  This class can be
+ * paired with any algorithm class that provides the required methods.
+ *
+ * This class contains all the mechanics for iterating over the class hierarchy
+ * starting at a particular root, without recursing (thus limiting stack growth
+ * from this point).  It visits each superclass (if present) and superinterface
+ * in a depth-first manner, with callbacks to the ALGO class as each class is
+ * encountered (visit()), The algorithm can cut-off further exploration of a
+ * particular branch by returning 'false' from a visit() call.
+ *
+ * The ALGO class, must provide a visit() method, which each of which will be
+ * called once for each node in the inheritance tree during the iteration.  In
+ * addition, it can provide a memory block via new_node_data(InstanceKlass*),
+ * which it can use for node-specific storage (and access via the
+ * current_data() and data_at_depth(int) methods).
+ *
+ * Bare minimum needed to be an ALGO class:
+ * class Algo : public HierarchyVisitor<Algo> {
+ *   void* new_node_data(InstanceKlass* cls) { return NULL; }
+ *   void free_node_data(void* data) { return; }
+ *   bool visit() { return true; }
+ * };
+ */
+template <class ALGO>
+class HierarchyVisitor : StackObj {
+ private:
+
+  class Node : public ResourceObj {
+   public:
+    InstanceKlass* _class;
+    bool _super_was_visited;
+    int _interface_index;
+    void* _algorithm_data;
+
+    Node(InstanceKlass* cls, void* data, bool visit_super)
+        : _class(cls), _super_was_visited(!visit_super),
+          _interface_index(0), _algorithm_data(data) {}
+
+    int number_of_interfaces() { return _class->local_interfaces()->length(); }
+    int interface_index() { return _interface_index; }
+    void set_super_visited() { _super_was_visited = true; }
+    void increment_visited_interface() { ++_interface_index; }
+    void set_all_interfaces_visited() {
+      _interface_index = number_of_interfaces();
+    }
+    bool has_visited_super() { return _super_was_visited; }
+    bool has_visited_all_interfaces() {
+      return interface_index() >= number_of_interfaces();
+    }
+    InstanceKlass* interface_at(int index) {
+      return InstanceKlass::cast(_class->local_interfaces()->at(index));
+    }
+    InstanceKlass* next_super() { return _class->java_super(); }
+    InstanceKlass* next_interface() {
+      return interface_at(interface_index());
+    }
+  };
+
+  bool _cancelled;
+  GrowableArray<Node*> _path;
+
+  Node* current_top() const { return _path.top(); }
+  bool has_more_nodes() const { return !_path.is_empty(); }
+  void push(InstanceKlass* cls, void* data) {
+    assert(cls != NULL, "Requires a valid instance class");
+    Node* node = new Node(cls, data, has_super(cls));
+    _path.push(node);
+  }
+  void pop() { _path.pop(); }
+
+  void reset_iteration() {
+    _cancelled = false;
+    _path.clear();
+  }
+  bool is_cancelled() const { return _cancelled; }
+
+  static bool has_super(InstanceKlass* cls) {
+    return cls->super() != NULL && !cls->is_interface();
+  }
+
+  Node* node_at_depth(int i) const {
+    return (i >= _path.length()) ? NULL : _path.at(_path.length() - i - 1);
+  }
+
+ protected:
+
+  // Accessors available to the algorithm
+  int current_depth() const { return _path.length() - 1; }
+
+  InstanceKlass* class_at_depth(int i) {
+    Node* n = node_at_depth(i);
+    return n == NULL ? NULL : n->_class;
+  }
+  InstanceKlass* current_class() { return class_at_depth(0); }
+
+  void* data_at_depth(int i) {
+    Node* n = node_at_depth(i);
+    return n == NULL ? NULL : n->_algorithm_data;
+  }
+  void* current_data() { return data_at_depth(0); }
+
+  void cancel_iteration() { _cancelled = true; }
+
+ public:
+
+  void run(InstanceKlass* root) {
+    ALGO* algo = static_cast<ALGO*>(this);
+
+    reset_iteration();
+
+    void* algo_data = algo->new_node_data(root);
+    push(root, algo_data);
+    bool top_needs_visit = true;
+
+    do {
+      Node* top = current_top();
+      if (top_needs_visit) {
+        if (algo->visit() == false) {
+          // algorithm does not want to continue along this path.  Arrange
+          // it so that this state is immediately popped off the stack
+          top->set_super_visited();
+          top->set_all_interfaces_visited();
+        }
+        top_needs_visit = false;
+      }
+
+      if (top->has_visited_super() && top->has_visited_all_interfaces()) {
+        algo->free_node_data(top->_algorithm_data);
+        pop();
+      } else {
+        InstanceKlass* next = NULL;
+        if (top->has_visited_super() == false) {
+          next = top->next_super();
+          top->set_super_visited();
+        } else {
+          next = top->next_interface();
+          top->increment_visited_interface();
+        }
+        assert(next != NULL, "Otherwise we shouldn't be here");
+        algo_data = algo->new_node_data(next);
+        push(next, algo_data);
+        top_needs_visit = true;
+      }
+    } while (!is_cancelled() && has_more_nodes());
+  }
+};
+
+#ifndef PRODUCT
+class PrintHierarchy : public HierarchyVisitor<PrintHierarchy> {
+ public:
+
+  bool visit() {
+    InstanceKlass* cls = current_class();
+    streamIndentor si(tty, current_depth() * 2);
+    tty->indent().print_cr("%s", cls->name()->as_C_string());
+    return true;
+  }
+
+  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void free_node_data(void* data) { return; }
+};
+#endif // ndef PRODUCT
+
+// Used to register InstanceKlass objects and all related metadata structures
+// (Methods, ConstantPools) as "in-use" by the current thread so that they can't
+// be deallocated by class redefinition while we're using them.  The classes are
+// de-registered when this goes out of scope.
+//
+// Once a class is registered, we need not bother with methodHandles or
+// constantPoolHandles for it's associated metadata.
+class KeepAliveRegistrar : public StackObj {
+ private:
+  Thread* _thread;
+  GrowableArray<ConstantPool*> _keep_alive;
+
+ public:
+  KeepAliveRegistrar(Thread* thread) : _thread(thread), _keep_alive(20) {
+    assert(thread == Thread::current(), "Must be current thread");
+  }
+
+  ~KeepAliveRegistrar() {
+    for (int i = _keep_alive.length() - 1; i >= 0; --i) {
+      ConstantPool* cp = _keep_alive.at(i);
+      int idx = _thread->metadata_handles()->find_from_end(cp);
+      assert(idx > 0, "Must be in the list");
+      _thread->metadata_handles()->remove_at(idx);
+    }
+  }
+
+  // Register a class as 'in-use' by the thread.  It's fine to register a class
+  // multiple times (though perhaps inefficient)
+  void register_class(InstanceKlass* ik) {
+    ConstantPool* cp = ik->constants();
+    _keep_alive.push(cp);
+    _thread->metadata_handles()->push(cp);
+  }
+};
+
+class KeepAliveVisitor : public HierarchyVisitor<KeepAliveVisitor> {
+ private:
+  KeepAliveRegistrar* _registrar;
+
+ public:
+  KeepAliveVisitor(KeepAliveRegistrar* registrar) : _registrar(registrar) {}
+
+  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void free_node_data(void* data) { return; }
+
+  bool visit() {
+    _registrar->register_class(current_class());
+    return true;
+  }
+};
+
+// A method family contains a set of all methods that implement a single
+// language-level method.  Because of erasure, these methods may have different
+// signatures.  As members of the set are collected while walking over the
+// hierarchy, they are tagged with a qualification state.  The qualification
+// state for an erased method is set to disqualified if there exists a path
+// from the root of hierarchy to the method that contains an interleaving
+// language-equivalent method defined in an interface.
+class MethodFamily : public ResourceObj {
+ private:
+
+  generic::MethodDescriptor* _descriptor; // language-level description
+  GrowableArray<Pair<Method*,QualifiedState> > _members;
+  ResourceHashtable<Method*, int> _member_index;
+
+  Method* _selected_target;  // Filled in later, if a unique target exists
+  Symbol* _exception_message; // If no unique target is found
+
+  bool contains_method(Method* method) {
+    int* lookup = _member_index.get(method);
+    return lookup != NULL;
+  }
+
+  void add_method(Method* method, QualifiedState state) {
+    Pair<Method*,QualifiedState> entry(method, state);
+    _member_index.put(method, _members.length());
+    _members.append(entry);
+  }
+
+  void disqualify_method(Method* method) {
+    int* index = _member_index.get(method);
+    assert(index != NULL && *index >= 0 && *index < _members.length(), "bad index");
+    _members.at(*index).second = DISQUALIFIED;
+  }
+
+  Symbol* generate_no_defaults_message(TRAPS) const;
+  Symbol* generate_abstract_method_message(Method* method, TRAPS) const;
+  Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
+
+ public:
+
+  MethodFamily(generic::MethodDescriptor* canonical_desc)
+      : _descriptor(canonical_desc), _selected_target(NULL),
+        _exception_message(NULL) {}
+
+  generic::MethodDescriptor* descriptor() const { return _descriptor; }
+
+  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
+    return descriptor()->covariant_match(md, ctx);
+  }
+
+  void set_target_if_empty(Method* m) {
+    if (_selected_target == NULL && !m->is_overpass()) {
+      _selected_target = m;
+    }
+  }
+
+  void record_qualified_method(Method* m) {
+    // If the method already exists in the set as qualified, this operation is
+    // redundant.  If it already exists as disqualified, then we leave it as
+    // disqualfied.  Thus we only add to the set if it's not already in the
+    // set.
+    if (!contains_method(m)) {
+      add_method(m, QUALIFIED);
+    }
+  }
+
+  void record_disqualified_method(Method* m) {
+    // If not in the set, add it as disqualified.  If it's already in the set,
+    // then set the state to disqualified no matter what the previous state was.
+    if (!contains_method(m)) {
+      add_method(m, DISQUALIFIED);
+    } else {
+      disqualify_method(m);
+    }
+  }
+
+  bool has_target() const { return _selected_target != NULL; }
+  bool throws_exception() { return _exception_message != NULL; }
+
+  Method* get_selected_target() { return _selected_target; }
+  Symbol* get_exception_message() { return _exception_message; }
+
+  // Either sets the target or the exception error message
+  void determine_target(InstanceKlass* root, TRAPS) {
+    if (has_target() || throws_exception()) {
+      return;
+    }
+
+    GrowableArray<Method*> qualified_methods;
+    for (int i = 0; i < _members.length(); ++i) {
+      Pair<Method*,QualifiedState> entry = _members.at(i);
+      if (entry.second == QUALIFIED) {
+        qualified_methods.append(entry.first);
+      }
+    }
+
+    if (qualified_methods.length() == 0) {
+      _exception_message = generate_no_defaults_message(CHECK);
+    } else if (qualified_methods.length() == 1) {
+      Method* method = qualified_methods.at(0);
+      if (method->is_abstract()) {
+        _exception_message = generate_abstract_method_message(method, CHECK);
+      } else {
+        _selected_target = qualified_methods.at(0);
+      }
+    } else {
+      _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
+    }
+
+    assert((has_target() ^ throws_exception()) == 1,
+           "One and only one must be true");
+  }
+
+  bool contains_signature(Symbol* query) {
+    for (int i = 0; i < _members.length(); ++i) {
+      if (query == _members.at(i).first->signature()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const {
+    print_on(str, 0);
+  }
+
+  void print_on(outputStream* str, int indent) const {
+    streamIndentor si(str, indent * 2);
+
+    generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
+    TempNewSymbol family = descriptor()->reify_signature(&ctx, Thread::current());
+    str->indent().print_cr("Logical Method %s:", family->as_C_string());
+
+    streamIndentor si2(str);
+    for (int i = 0; i < _members.length(); ++i) {
+      str->indent();
+      print_method(str, _members.at(i).first);
+      if (_members.at(i).second == DISQUALIFIED) {
+        str->print(" (disqualified)");
+      }
+      str->print_cr("");
+    }
+
+    if (_selected_target != NULL) {
+      print_selected(str, 1);
+    }
+  }
+
+  void print_selected(outputStream* str, int indent) const {
+    assert(has_target(), "Should be called otherwise");
+    streamIndentor si(str, indent * 2);
+    str->indent().print("Selected method: ");
+    print_method(str, _selected_target);
+    str->print_cr("");
+  }
+
+  void print_exception(outputStream* str, int indent) {
+    assert(throws_exception(), "Should be called otherwise");
+    streamIndentor si(str, indent * 2);
+    str->indent().print_cr("%s", _exception_message->as_C_string());
+  }
+#endif // ndef PRODUCT
+};
+
+Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
+  return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
+}
+
+Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const {
+  Symbol* klass = method->klass_name();
+  Symbol* name = method->name();
+  Symbol* sig = method->signature();
+  stringStream ss;
+  ss.print("Method ");
+  ss.write((const char*)klass->bytes(), klass->utf8_length());
+  ss.print(".");
+  ss.write((const char*)name->bytes(), name->utf8_length());
+  ss.write((const char*)sig->bytes(), sig->utf8_length());
+  ss.print(" is abstract");
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+}
+
+Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
+  stringStream ss;
+  ss.print("Conflicting default methods:");
+  for (int i = 0; i < methods->length(); ++i) {
+    Method* method = methods->at(i);
+    Symbol* klass = method->klass_name();
+    Symbol* name = method->name();
+    ss.print(" ");
+    ss.write((const char*)klass->bytes(), klass->utf8_length());
+    ss.print(".");
+    ss.write((const char*)name->bytes(), name->utf8_length());
+  }
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+}
+
+class StateRestorer;
+
+// StatefulMethodFamily is a wrapper around MethodFamily that maintains the
+// qualification state during hierarchy visitation, and applies that state
+// when adding members to the MethodFamily.
+class StatefulMethodFamily : public ResourceObj {
+  friend class StateRestorer;
+ private:
+  MethodFamily* _method;
+  QualifiedState _qualification_state;
+
+  void set_qualification_state(QualifiedState state) {
+    _qualification_state = state;
+  }
+
+ public:
+  StatefulMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) {
+    _method = new MethodFamily(md->canonicalize(ctx));
+    _qualification_state = QUALIFIED;
+  }
+
+  void set_target_if_empty(Method* m) { _method->set_target_if_empty(m); }
+
+  MethodFamily* get_method_family() { return _method; }
+
+  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
+    return _method->descriptor_matches(md, ctx);
+  }
+
+  StateRestorer* record_method_and_dq_further(Method* mo);
+};
+
+class StateRestorer : public PseudoScopeMark {
+ private:
+  StatefulMethodFamily* _method;
+  QualifiedState _state_to_restore;
+ public:
+  StateRestorer(StatefulMethodFamily* dm, QualifiedState state)
+      : _method(dm), _state_to_restore(state) {}
+  ~StateRestorer() { destroy(); }
+  void restore_state() { _method->set_qualification_state(_state_to_restore); }
+  virtual void destroy() { restore_state(); }
+};
+
+StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
+  StateRestorer* mark = new StateRestorer(this, _qualification_state);
+  if (_qualification_state == QUALIFIED) {
+    _method->record_qualified_method(mo);
+  } else {
+    _method->record_disqualified_method(mo);
+  }
+  // Everything found "above"??? this method in the hierarchy walk is set to
+  // disqualified
+  set_qualification_state(DISQUALIFIED);
+  return mark;
+}
+
+class StatefulMethodFamilies : public ResourceObj {
+ private:
+  GrowableArray<StatefulMethodFamily*> _methods;
+
+ public:
+  StatefulMethodFamily* find_matching(
+      generic::MethodDescriptor* md, generic::Context* ctx) {
+    for (int i = 0; i < _methods.length(); ++i) {
+      StatefulMethodFamily* existing = _methods.at(i);
+      if (existing->descriptor_matches(md, ctx)) {
+        return existing;
+      }
+    }
+    return NULL;
+  }
+
+  StatefulMethodFamily* find_matching_or_create(
+      generic::MethodDescriptor* md, generic::Context* ctx) {
+    StatefulMethodFamily* method = find_matching(md, ctx);
+    if (method == NULL) {
+      method = new StatefulMethodFamily(md, ctx);
+      _methods.append(method);
+    }
+    return method;
+  }
+
+  void extract_families_into(GrowableArray<MethodFamily*>* array) {
+    for (int i = 0; i < _methods.length(); ++i) {
+      array->append(_methods.at(i)->get_method_family());
+    }
+  }
+};
+
+// Represents a location corresponding to a vtable slot for methods that
+// neither the class nor any of it's ancestors provide an implementaion.
+// Default methods may be present to fill this slot.
+class EmptyVtableSlot : public ResourceObj {
+ private:
+  Symbol* _name;
+  Symbol* _signature;
+  int _size_of_parameters;
+  MethodFamily* _binding;
+
+ public:
+  EmptyVtableSlot(Method* method)
+      : _name(method->name()), _signature(method->signature()),
+        _size_of_parameters(method->size_of_parameters()), _binding(NULL) {}
+
+  Symbol* name() const { return _name; }
+  Symbol* signature() const { return _signature; }
+  int size_of_parameters() const { return _size_of_parameters; }
+
+  void bind_family(MethodFamily* lm) { _binding = lm; }
+  bool is_bound() { return _binding != NULL; }
+  MethodFamily* get_binding() { return _binding; }
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const {
+    print_slot(str, name(), signature());
+  }
+#endif // ndef PRODUCT
+};
+
+static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
+    InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
+
+  assert(klass != NULL, "Must be valid class");
+
+  GrowableArray<EmptyVtableSlot*>* slots = new GrowableArray<EmptyVtableSlot*>();
+
+  // All miranda methods are obvious candidates
+  for (int i = 0; i < mirandas->length(); ++i) {
+    EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i));
+    slots->append(slot);
+  }
+
+  // Also any overpasses in our superclasses, that we haven't implemented.
+  // (can't use the vtable because it is not guaranteed to be initialized yet)
+  InstanceKlass* super = klass->java_super();
+  while (super != NULL) {
+    for (int i = 0; i < super->methods()->length(); ++i) {
+      Method* m = super->methods()->at(i);
+      if (m->is_overpass()) {
+        // m is a method that would have been a miranda if not for the
+        // default method processing that occurred on behalf of our superclass,
+        // so it's a method we want to re-examine in this new context.  That is,
+        // unless we have a real implementation of it in the current class.
+        Method* impl = klass->lookup_method(m->name(), m->signature());
+        if (impl == NULL || impl->is_overpass()) {
+          slots->append(new EmptyVtableSlot(m));
+        }
+      }
+    }
+    super = super->java_super();
+  }
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Slots that need filling:");
+    streamIndentor si(tty);
+    for (int i = 0; i < slots->length(); ++i) {
+      tty->indent();
+      slots->at(i)->print_on(tty);
+      tty->print_cr("");
+    }
+  }
+#endif // ndef PRODUCT
+  return slots;
+}
+
+// Iterates over the type hierarchy looking for all methods with a specific
+// method name.  The result of this is a set of method families each of
+// which is populated with a set of methods that implement the same
+// language-level signature.
+class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
+ private:
+  // Context data
+  Thread* THREAD;
+  generic::DescriptorCache* _cache;
+  Symbol* _method_name;
+  generic::Context* _ctx;
+  StatefulMethodFamilies _families;
+
+ public:
+
+  FindMethodsByName(generic::DescriptorCache* cache, Symbol* name,
+      generic::Context* ctx, Thread* thread) :
+    _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
+
+  void get_discovered_families(GrowableArray<MethodFamily*>* methods) {
+    _families.extract_families_into(methods);
+  }
+
+  void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
+  void free_node_data(void* node_data) {
+    PseudoScope::cast(node_data)->destroy();
+  }
+
+  bool visit() {
+    PseudoScope* scope = PseudoScope::cast(current_data());
+    InstanceKlass* klass = current_class();
+    InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
+
+    ContextMark* cm = new ContextMark(_ctx->mark());
+    scope->add_mark(cm); // will restore context when scope is freed
+
+    _ctx->apply_type_arguments(sub, klass, THREAD);
+
+    int start, end = 0;
+    start = klass->find_method_by_name(_method_name, &end);
+    if (start != -1) {
+      for (int i = start; i < end; ++i) {
+        Method* m = klass->methods()->at(i);
+        // This gets the method's parameter list with its generic type
+        // parameters resolved
+        generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
+
+        // Find all methods on this hierarchy that match this method
+        // (name, signature).   This class collects other families of this
+        // method name.
+        StatefulMethodFamily* family =
+            _families.find_matching_or_create(md, _ctx);
+
+        if (klass->is_interface()) {
+          // ???
+          StateRestorer* restorer = family->record_method_and_dq_further(m);
+          scope->add_mark(restorer);
+        } else {
+          // This is the rule that methods in classes "win" (bad word) over
+          // methods in interfaces.  This works because of single inheritance
+          family->set_target_if_empty(m);
+        }
+      }
+    }
+    return true;
+  }
+};
+
+#ifndef PRODUCT
+static void print_families(
+    GrowableArray<MethodFamily*>* methods, Symbol* match) {
+  streamIndentor si(tty, 4);
+  if (methods->length() == 0) {
+    tty->indent();
+    tty->print_cr("No Logical Method found");
+  }
+  for (int i = 0; i < methods->length(); ++i) {
+    tty->indent();
+    MethodFamily* lm = methods->at(i);
+    if (lm->contains_signature(match)) {
+      tty->print_cr("<Matching>");
+    } else {
+      tty->print_cr("<Non-Matching>");
+    }
+    lm->print_on(tty, 1);
+  }
+}
+#endif // ndef PRODUCT
+
+static void merge_in_new_methods(InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS);
+static void create_overpasses(
+    GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
+
+// This is the guts of the default methods implementation.  This is called just
+// after the classfile has been parsed if some ancestor has default methods.
+//
+// First if finds any name/signature slots that need any implementation (either
+// because they are miranda or a superclass's implementation is an overpass
+// itself).  For each slot, iterate over the hierarchy, using generic signature
+// information to partition any methods that match the name into method families
+// where each family contains methods whose signatures are equivalent at the
+// language level (i.e., their reified parameters match and return values are
+// covariant). Check those sets to see if they contain a signature that matches
+// the slot we're looking at (if we're lucky, there might be other empty slots
+// that we can fill using the same analysis).
+//
+// For each slot filled, we generate an overpass method that either calls the
+// unique default method candidate using invokespecial, or throws an exception
+// (in the case of no default method candidates, or more than one valid
+// candidate).  These methods are then added to the class's method list.  If
+// the method set we're using contains methods (qualified or not) with a
+// different runtime signature than the method we're creating, then we have to
+// create bridges with those signatures too.
+void DefaultMethods::generate_default_methods(
+    InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
+
+  // This resource mark is the bound for all memory allocation that takes
+  // place during default method processing.  After this goes out of scope,
+  // all (Resource) objects' memory will be reclaimed.  Be careful if adding an
+  // embedded resource mark under here as that memory can't be used outside
+  // whatever scope it's in.
+  ResourceMark rm(THREAD);
+
+  generic::DescriptorCache cache;
+
+  // Keep entire hierarchy alive for the duration of the computation
+  KeepAliveRegistrar keepAlive(THREAD);
+  KeepAliveVisitor loadKeepAlive(&keepAlive);
+  loadKeepAlive.run(klass);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    ResourceMark rm;  // be careful with these!
+    tty->print_cr("Class %s requires default method processing",
+        klass->name()->as_klass_external_name());
+    PrintHierarchy printer;
+    printer.run(klass);
+  }
+#endif // ndef PRODUCT
+
+  GrowableArray<EmptyVtableSlot*>* empty_slots =
+      find_empty_vtable_slots(klass, mirandas, CHECK);
+
+  for (int i = 0; i < empty_slots->length(); ++i) {
+    EmptyVtableSlot* slot = empty_slots->at(i);
+#ifndef PRODUCT
+    if (TraceDefaultMethods) {
+      streamIndentor si(tty, 2);
+      tty->indent().print("Looking for default methods for slot ");
+      slot->print_on(tty);
+      tty->print_cr("");
+    }
+#endif // ndef PRODUCT
+    if (slot->is_bound()) {
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        streamIndentor si(tty, 4);
+        tty->indent().print_cr("Already bound to logical method:");
+        slot->get_binding()->print_on(tty, 1);
+      }
+#endif // ndef PRODUCT
+      continue; // covered by previous processing
+    }
+
+    generic::Context ctx(&cache);
+    FindMethodsByName visitor(&cache, slot->name(), &ctx, CHECK);
+    visitor.run(klass);
+
+    GrowableArray<MethodFamily*> discovered_families;
+    visitor.get_discovered_families(&discovered_families);
+
+#ifndef PRODUCT
+    if (TraceDefaultMethods) {
+      print_families(&discovered_families, slot->signature());
+    }
+#endif // ndef PRODUCT
+
+    // Find and populate any other slots that match the discovered families
+    for (int j = i; j < empty_slots->length(); ++j) {
+      EmptyVtableSlot* open_slot = empty_slots->at(j);
+
+      if (slot->name() == open_slot->name()) {
+        for (int k = 0; k < discovered_families.length(); ++k) {
+          MethodFamily* lm = discovered_families.at(k);
+
+          if (lm->contains_signature(open_slot->signature())) {
+            lm->determine_target(klass, CHECK);
+            open_slot->bind_family(lm);
+          }
+        }
+      }
+    }
+  }
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Creating overpasses...");
+  }
+#endif // ndef PRODUCT
+
+  create_overpasses(empty_slots, klass, CHECK);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Default method processing complete");
+  }
+#endif // ndef PRODUCT
+}
+
+
+/**
+ * Generic analysis was used upon interface '_target' and found a unique
+ * default method candidate with generic signature '_method_desc'.  This
+ * method is only viable if it would also be in the set of default method
+ * candidates if we ran a full analysis on the current class.
+ *
+ * The only reason that the method would not be in the set of candidates for
+ * the current class is if that there's another covariantly matching method
+ * which is "more specific" than the found method -- i.e., one could find a
+ * path in the interface hierarchy in which the matching method appears
+ * before we get to '_target'.
+ *
+ * In order to determine this, we examine all of the implemented
+ * interfaces.  If we find path that leads to the '_target' interface, then
+ * we examine that path to see if there are any methods that would shadow
+ * the selected method along that path.
+ */
+class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
+ private:
+  generic::DescriptorCache* _cache;
+  Thread* THREAD;
+
+  InstanceKlass* _target;
+
+  Symbol* _method_name;
+  InstanceKlass* _method_holder;
+  generic::MethodDescriptor* _method_desc;
+  bool _found_shadow;
+
+  bool path_has_shadow() {
+    generic::Context ctx(_cache);
+
+    for (int i = current_depth() - 1; i > 0; --i) {
+      InstanceKlass* ik = class_at_depth(i);
+      InstanceKlass* sub = class_at_depth(i + 1);
+      ctx.apply_type_arguments(sub, ik, THREAD);
+
+      if (ik->is_interface()) {
+        int end;
+        int start = ik->find_method_by_name(_method_name, &end);
+        if (start != -1) {
+          for (int j = start; j < end; ++j) {
+            Method* mo = ik->methods()->at(j);
+            generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
+            if (_method_desc->covariant_match(md, &ctx)) {
+              return true;
+            }
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+ public:
+
+  ShadowChecker(generic::DescriptorCache* cache, Thread* thread,
+      Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
+      InstanceKlass* target)
+    : _cache(cache), THREAD(thread), _method_name(name), _method_holder(holder),
+      _method_desc(desc), _target(target), _found_shadow(false) {}
+
+  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void free_node_data(void* data) { return; }
+
+  bool visit() {
+    InstanceKlass* ik = current_class();
+    if (ik == _target && current_depth() == 1) {
+      return false; // This was the specified super -- no need to search it
+    }
+    if (ik == _method_holder || ik == _target) {
+      // We found a path that should be examined to see if it shadows _method
+      if (path_has_shadow()) {
+        _found_shadow = true;
+        cancel_iteration();
+      }
+      return false; // no need to continue up hierarchy
+    }
+    return true;
+  }
+
+  bool found_shadow() { return _found_shadow; }
+};
+
+// This is called during linktime when we find an invokespecial call that
+// refers to a direct superinterface.  It indicates that we should find the
+// default method in the hierarchy of that superinterface, and if that method
+// would have been a candidate from the point of view of 'this' class, then we
+// return that method.
+Method* DefaultMethods::find_super_default(
+    Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
+
+  ResourceMark rm(THREAD);
+
+  assert(cls != NULL && super != NULL, "Need real classes");
+
+  InstanceKlass* current_class = InstanceKlass::cast(cls);
+  InstanceKlass* direction = InstanceKlass::cast(super);
+
+  // Keep entire hierarchy alive for the duration of the computation
+  KeepAliveRegistrar keepAlive(THREAD);
+  KeepAliveVisitor loadKeepAlive(&keepAlive);
+  loadKeepAlive.run(current_class);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Finding super default method %s.%s%s from %s",
+      direction->name()->as_C_string(),
+      method_name->as_C_string(), sig->as_C_string(),
+      current_class->name()->as_C_string());
+  }
+#endif // ndef PRODUCT
+
+  if (!direction->is_interface()) {
+    // We should not be here
+    return NULL;
+  }
+
+  generic::DescriptorCache cache;
+  generic::Context ctx(&cache);
+
+  // Prime the initial generic context for current -> direction
+  ctx.apply_type_arguments(current_class, direction, CHECK_NULL);
+
+  FindMethodsByName visitor(&cache, method_name, &ctx, CHECK_NULL);
+  visitor.run(direction);
+
+  GrowableArray<MethodFamily*> families;
+  visitor.get_discovered_families(&families);
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    print_families(&families, sig);
+  }
+#endif // ndef PRODUCT
+
+  MethodFamily* selected_family = NULL;
+
+  for (int i = 0; i < families.length(); ++i) {
+    MethodFamily* lm = families.at(i);
+    if (lm->contains_signature(sig)) {
+      lm->determine_target(current_class, CHECK_NULL);
+      selected_family = lm;
+    }
+  }
+
+  if (selected_family->has_target()) {
+    Method* target = selected_family->get_selected_target();
+    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
+
+    // Verify that the identified method is valid from the context of
+    // the current class
+    ShadowChecker checker(&cache, THREAD, target->name(),
+        holder, selected_family->descriptor(), direction);
+    checker.run(current_class);
+
+    if (checker.found_shadow()) {
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        tty->print_cr("    Only candidate found was shadowed.");
+      }
+#endif // ndef PRODUCT
+      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+                 "Accessible default method not found", NULL);
+    } else {
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        tty->print("    Returning ");
+        print_method(tty, target, true);
+        tty->print_cr("");
+      }
+#endif // ndef PRODUCT
+      return target;
+    }
+  } else {
+    assert(selected_family->throws_exception(), "must have target or throw");
+    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+               selected_family->get_exception_message()->as_C_string(), NULL);
+  }
+}
+
+
+static int assemble_redirect(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer,
+    Symbol* incoming, Method* target, TRAPS) {
+
+  BytecodeAssembler assem(buffer, cp);
+
+  SignatureStream in(incoming, true);
+  SignatureStream out(target->signature(), true);
+  u2 parameter_count = 0;
+
+  assem.aload(parameter_count++); // load 'this'
+
+  while (!in.at_return_type()) {
+    assert(!out.at_return_type(), "Parameter counts do not match");
+    BasicType bt = in.type();
+    assert(out.type() == bt, "Parameter types are not compatible");
+    assem.load(bt, parameter_count);
+    if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
+      assem.checkcast(out.as_symbol(THREAD));
+    } else if (bt == T_LONG || bt == T_DOUBLE) {
+      ++parameter_count; // longs and doubles use two slots
+    }
+    ++parameter_count;
+    in.next();
+    out.next();
+  }
+  assert(out.at_return_type(), "Parameter counts do not match");
+  assert(in.type() == out.type(), "Return types are not compatible");
+
+  if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
+    ++parameter_count; // need room for return value
+  }
+  if (target->method_holder()->is_interface()) {
+    assem.invokespecial(target);
+  } else {
+    assem.invokevirtual(target);
+  }
+
+  if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
+    assem.checkcast(in.as_symbol(THREAD));
+  }
+  assem._return(in.type());
+  return parameter_count;
+}
+
+static int assemble_abstract_method_error(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) {
+
+  Symbol* errorName = vmSymbols::java_lang_AbstractMethodError();
+  Symbol* init = vmSymbols::object_initializer_name();
+  Symbol* sig = vmSymbols::string_void_signature();
+
+  BytecodeAssembler assem(buffer, cp);
+
+  assem._new(errorName);
+  assem.dup();
+  assem.load_string(message);
+  assem.invokespecial(errorName, init, sig);
+  assem.athrow();
+
+  return 3; // max stack size: [ exception, exception, string ]
+}
+
+static Method* new_method(
+    BytecodeConstantPool* cp, BytecodeBuffer* bytecodes, Symbol* name,
+    Symbol* sig, AccessFlags flags, int max_stack, int params,
+    ConstMethod::MethodType mt, TRAPS) {
+
+  address code_start = static_cast<address>(bytecodes->adr_at(0));
+  int code_length = bytecodes->length();
+
+  Method* m = Method::allocate(cp->pool_holder()->class_loader_data(),
+      code_length, flags, 0, 0, 0, 0, mt, CHECK_NULL);
+
+  m->set_constants(NULL); // This will get filled in later
+  m->set_name_index(cp->utf8(name));
+  m->set_signature_index(cp->utf8(sig));
+  m->set_generic_signature_index(0);
+#ifdef CC_INTERP
+  ResultTypeFinder rtf(sig);
+  m->set_result_index(rtf.type());
+#endif
+  m->set_size_of_parameters(params);
+  m->set_max_stack(max_stack);
+  m->set_max_locals(params);
+  m->constMethod()->set_stackmap_data(NULL);
+  m->set_code(code_start);
+  m->set_force_inline(true);
+
+  return m;
+}
+
+static void switchover_constant_pool(BytecodeConstantPool* bpool,
+    InstanceKlass* klass, GrowableArray<Method*>* new_methods, TRAPS) {
+
+  if (new_methods->length() > 0) {
+    ConstantPool* cp = bpool->create_constant_pool(CHECK);
+    if (cp != klass->constants()) {
+      klass->class_loader_data()->add_to_deallocate_list(klass->constants());
+      klass->set_constants(cp);
+      cp->set_pool_holder(klass);
+
+      for (int i = 0; i < new_methods->length(); ++i) {
+        new_methods->at(i)->set_constants(cp);
+      }
+      for (int i = 0; i < klass->methods()->length(); ++i) {
+        Method* mo = klass->methods()->at(i);
+        mo->set_constants(cp);
+      }
+    }
+  }
+}
+
+// A "bridge" is a method created by javac to bridge the gap between
+// an implementation and a generically-compatible, but different, signature.
+// Bridges have actual bytecode implementation in classfiles.
+// An "overpass", on the other hand, performs the same function as a bridge
+// but does not occur in a classfile; the VM creates overpass itself,
+// when it needs a path to get from a call site to an default method, and
+// a bridge doesn't exist.
+static void create_overpasses(
+    GrowableArray<EmptyVtableSlot*>* slots,
+    InstanceKlass* klass, TRAPS) {
+
+  GrowableArray<Method*> overpasses;
+  BytecodeConstantPool bpool(klass->constants());
+
+  for (int i = 0; i < slots->length(); ++i) {
+    EmptyVtableSlot* slot = slots->at(i);
+
+    if (slot->is_bound()) {
+      MethodFamily* method = slot->get_binding();
+      int max_stack = 0;
+      BytecodeBuffer buffer;
+
+#ifndef PRODUCT
+      if (TraceDefaultMethods) {
+        tty->print("for slot: ");
+        slot->print_on(tty);
+        tty->print_cr("");
+        if (method->has_target()) {
+          method->print_selected(tty, 1);
+        } else {
+          method->print_exception(tty, 1);
+        }
+      }
+#endif // ndef PRODUCT
+      if (method->has_target()) {
+        Method* selected = method->get_selected_target();
+        max_stack = assemble_redirect(
+            &bpool, &buffer, slot->signature(), selected, CHECK);
+      } else if (method->throws_exception()) {
+        max_stack = assemble_abstract_method_error(
+            &bpool, &buffer, method->get_exception_message(), CHECK);
+      }
+      AccessFlags flags = accessFlags_from(
+          JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
+      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+          flags, max_stack, slot->size_of_parameters(),
+          ConstMethod::OVERPASS, CHECK);
+      if (m != NULL) {
+        overpasses.push(m);
+      }
+    }
+  }
+
+#ifndef PRODUCT
+  if (TraceDefaultMethods) {
+    tty->print_cr("Created %d overpass methods", overpasses.length());
+  }
+#endif // ndef PRODUCT
+
+  switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
+  merge_in_new_methods(klass, &overpasses, CHECK);
+}
+
+static void sort_methods(GrowableArray<Method*>* methods) {
+  // Note that this must sort using the same key as is used for sorting
+  // methods in InstanceKlass.
+  bool sorted = true;
+  for (int i = methods->length() - 1; i > 0; --i) {
+    for (int j = 0; j < i; ++j) {
+      Method* m1 = methods->at(j);
+      Method* m2 = methods->at(j + 1);
+      if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) {
+        methods->at_put(j, m2);
+        methods->at_put(j + 1, m1);
+        sorted = false;
+      }
+    }
+    if (sorted) break;
+    sorted = true;
+  }
+#ifdef ASSERT
+  uintptr_t prev = 0;
+  for (int i = 0; i < methods->length(); ++i) {
+    Method* mh = methods->at(i);
+    uintptr_t nv = (uintptr_t)mh->name();
+    assert(nv >= prev, "Incorrect overpass method ordering");
+    prev = nv;
+  }
+#endif
+}
+
+static void merge_in_new_methods(InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS) {
+
+  enum { ANNOTATIONS, PARAMETERS, DEFAULTS, NUM_ARRAYS };
+
+  Array<AnnotationArray*>* original_annots[NUM_ARRAYS];
+
+  Array<Method*>* original_methods = klass->methods();
+  Annotations* annots = klass->annotations();
+  original_annots[ANNOTATIONS] = annots->methods_annotations();
+  original_annots[PARAMETERS]  = annots->methods_parameter_annotations();
+  original_annots[DEFAULTS]    = annots->methods_default_annotations();
+
+  Array<int>* original_ordering = klass->method_ordering();
+  Array<int>* merged_ordering = Universe::the_empty_int_array();
+
+  int new_size = klass->methods()->length() + new_methods->length();
+
+  Array<AnnotationArray*>* merged_annots[NUM_ARRAYS];
+
+  Array<Method*>* merged_methods = MetadataFactory::new_array<Method*>(
+      klass->class_loader_data(), new_size, NULL, CHECK);
+  for (int i = 0; i < NUM_ARRAYS; ++i) {
+    if (original_annots[i] != NULL) {
+      merged_annots[i] = MetadataFactory::new_array<AnnotationArray*>(
+          klass->class_loader_data(), new_size, CHECK);
+    } else {
+      merged_annots[i] = NULL;
+    }
+  }
+  if (original_ordering != NULL && original_ordering->length() > 0) {
+    merged_ordering = MetadataFactory::new_array<int>(
+        klass->class_loader_data(), new_size, CHECK);
+  }
+  int method_order_index = klass->methods()->length();
+
+  sort_methods(new_methods);
+
+  // Perform grand merge of existing methods and new methods
+  int orig_idx = 0;
+  int new_idx = 0;
+
+  for (int i = 0; i < new_size; ++i) {
+    Method* orig_method = NULL;
+    Method* new_method = NULL;
+    if (orig_idx < original_methods->length()) {
+      orig_method = original_methods->at(orig_idx);
+    }
+    if (new_idx < new_methods->length()) {
+      new_method = new_methods->at(new_idx);
+    }
+
+    if (orig_method != NULL &&
+        (new_method == NULL || orig_method->name() < new_method->name())) {
+      merged_methods->at_put(i, orig_method);
+      original_methods->at_put(orig_idx, NULL);
+      for (int j = 0; j < NUM_ARRAYS; ++j) {
+        if (merged_annots[j] != NULL) {
+          merged_annots[j]->at_put(i, original_annots[j]->at(orig_idx));
+          original_annots[j]->at_put(orig_idx, NULL);
+        }
+      }
+      if (merged_ordering->length() > 0) {
+        merged_ordering->at_put(i, original_ordering->at(orig_idx));
+      }
+      ++orig_idx;
+    } else {
+      merged_methods->at_put(i, new_method);
+      if (merged_ordering->length() > 0) {
+        merged_ordering->at_put(i, method_order_index++);
+      }
+      ++new_idx;
+    }
+    // update idnum for new location
+    merged_methods->at(i)->set_method_idnum(i);
+  }
+
+  // Verify correct order
+#ifdef ASSERT
+  uintptr_t prev = 0;
+  for (int i = 0; i < merged_methods->length(); ++i) {
+    Method* mo = merged_methods->at(i);
+    uintptr_t nv = (uintptr_t)mo->name();
+    assert(nv >= prev, "Incorrect method ordering");
+    prev = nv;
+  }
+#endif
+
+  // Replace klass methods with new merged lists
+  klass->set_methods(merged_methods);
+  annots->set_methods_annotations(merged_annots[ANNOTATIONS]);
+  annots->set_methods_parameter_annotations(merged_annots[PARAMETERS]);
+  annots->set_methods_default_annotations(merged_annots[DEFAULTS]);
+
+  ClassLoaderData* cld = klass->class_loader_data();
+  MetadataFactory::free_array(cld, original_methods);
+  for (int i = 0; i < NUM_ARRAYS; ++i) {
+    MetadataFactory::free_array(cld, original_annots[i]);
+  }
+  if (original_ordering->length() > 0) {
+    klass->set_method_ordering(merged_ordering);
+    MetadataFactory::free_array(cld, original_ordering);
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/defaultMethods.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
+#define SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
+
+#include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/exceptions.hpp"
+
+class InstanceKlass;
+class Symbol;
+class Method;
+
+class DefaultMethods : AllStatic {
+ public:
+
+  // Analyzes class and determines which default methods are inherited
+  // from interfaces (and has no other implementation).  For each method
+  // (and each different signature the method could have), create an
+  // "overpass" method that is an instance method that redirects to the
+  // default method.  Overpass methods are added to the methods lists for
+  // the class.
+  static void generate_default_methods(
+      InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
+
+
+  // Called during linking when an invokespecial to an direct interface
+  // method is found.  Selects and returns a method if there is a unique
+  // default method in the 'super_iface' part of the hierarchy which is
+  // also a candidate default for 'this_klass'.  Otherwise throws an AME.
+  static Method* find_super_default(
+      Klass* this_klass, Klass* super_iface,
+      Symbol* method_name, Symbol* method_sig, TRAPS);
+};
+
+#endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/genericSignatures.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,1272 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/genericSignatures.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "memory/resourceArea.hpp"
+
+namespace generic {
+
+// Helper class for parsing the generic signature Symbol in klass and methods
+class DescriptorStream : public ResourceObj {
+ private:
+  Symbol* _symbol;
+  int _offset;
+  int _mark;
+  const char* _parse_error;
+
+  void set_parse_error(const char* error) {
+    assert(error != NULL, "Can't set NULL error string");
+    _parse_error = error;
+  }
+
+ public:
+  DescriptorStream(Symbol* sym)
+      : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {}
+
+  const char* parse_error() const {
+    return _parse_error;
+  }
+
+  bool at_end() { return _offset >= _symbol->utf8_length(); }
+
+  char peek() {
+    if (at_end()) {
+      set_parse_error("Peeking past end of signature");
+      return '\0';
+    } else {
+      return _symbol->byte_at(_offset);
+    }
+  }
+
+  char read() {
+    if (at_end()) {
+      set_parse_error("Reading past end of signature");
+      return '\0';
+    } else {
+      return _symbol->byte_at(_offset++);
+    }
+  }
+
+  void read(char expected) {
+    char c = read();
+    assert_char(c, expected, 0);
+  }
+
+  void assert_char(char c, char expected, int pos = -1) {
+    if (c != expected) {
+      const char* fmt = "Parse error at %d: expected %c but got %c";
+      size_t len = strlen(fmt) + 5;
+      char* buffer = NEW_RESOURCE_ARRAY(char, len);
+      jio_snprintf(buffer, len, fmt, _offset + pos, expected, c);
+      set_parse_error(buffer);
+    }
+  }
+
+  void push(char c) {
+    assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value");
+    --_offset;
+  }
+
+  void expect_end() {
+    if (!at_end()) {
+      set_parse_error("Unexpected data trailing signature");
+    }
+  }
+
+  bool has_mark() { return _mark != -1; }
+
+  void set_mark() {
+    _mark = _offset;
+  }
+
+  Identifier* identifier_from_mark() {
+    assert(has_mark(), "Mark should be set");
+    if (!has_mark()) {
+      set_parse_error("Expected mark to be set");
+      return NULL;
+    } else {
+      Identifier* id = new Identifier(_symbol, _mark, _offset - 1);
+      _mark = -1;
+      return id;
+    }
+  }
+};
+
+
+#define CHECK_FOR_PARSE_ERROR()         \
+  if (STREAM->parse_error() != NULL) {   \
+    if (VerifyGenericSignatures) {      \
+      fatal(STREAM->parse_error());      \
+    }                                   \
+    return NULL;                        \
+  } 0
+
+#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
+#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
+#define PUSH(c) STREAM->push(c)
+#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR()
+#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
+#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
+
+#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0
+
+#ifndef PRODUCT
+void Identifier::print_on(outputStream* str) const {
+  for (int i = _begin; i < _end; ++i) {
+    str->print("%c", (char)_sym->byte_at(i));
+  }
+}
+#endif // ndef PRODUCT
+
+bool Identifier::equals(Identifier* other) {
+  if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) {
+    return true;
+  } else if (_end - _begin != other->_end - other->_begin) {
+    return false;
+  } else {
+    size_t len = _end - _begin;
+    char* addr = ((char*)_sym->bytes()) + _begin;
+    char* oaddr = ((char*)other->_sym->bytes()) + other->_begin;
+    return strncmp(addr, oaddr, len) == 0;
+  }
+}
+
+bool Identifier::equals(Symbol* sym) {
+  Identifier id(sym, 0, sym->utf8_length());
+  return equals(&id);
+}
+
+/**
+ * A formal type parameter may be found in the the enclosing class, but it could
+ * also come from an enclosing method or outer class, in the case of inner-outer
+ * classes or anonymous classes.  For example:
+ *
+ * class Outer<T,V> {
+ *   class Inner<W> {
+ *     void m(T t, V v, W w);
+ *   }
+ * }
+ *
+ * In this case, the type variables in m()'s signature are not all found in the
+ * immediate enclosing class (Inner).  class Inner has only type parameter W,
+ * but it's outer_class field will reference Outer's descriptor which contains
+ * T & V (no outer_method in this case).
+ *
+ * If you have an anonymous class, it has both an enclosing method *and* an
+ * enclosing class where type parameters can be declared:
+ *
+ * class MOuter<T> {
+ *   <V> void bar(V v) {
+ *     Runnable r = new Runnable() {
+ *       public void run() {}
+ *       public void foo(T t, V v) { ... }
+ *     };
+ *   }
+ * }
+ *
+ * In this case, foo will be a member of some class, Runnable$1, which has no
+ * formal parameters itself, but has an outer_method (bar()) which provides
+ * type parameter V, and an outer class MOuter with type parameter T.
+ *
+ * It is also possible that the outer class is itself an inner class to some
+ * other class (or an anonymous class with an enclosing method), so we need to
+ * follow the outer_class/outer_method chain to it's end when looking for a
+ * type parameter.
+ */
+TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) {
+
+  int current_depth = 0;
+
+  MethodDescriptor* outer_method = as_method_signature();
+  ClassDescriptor* outer_class = as_class_signature();
+
+  if (outer_class == NULL) { // 'this' is a method signature; use the holder
+    outer_class = outer_method->outer_class();
+  }
+
+  while (outer_method != NULL || outer_class != NULL) {
+    if (outer_method != NULL) {
+      for (int i = 0; i < outer_method->type_parameters().length(); ++i) {
+        TypeParameter* p = outer_method->type_parameters().at(i);
+        if (p->identifier()->equals(id)) {
+          *depth = -1; // indicates this this is a method parameter
+          return p;
+        }
+      }
+    }
+    if (outer_class != NULL) {
+      for (int i = 0; i < outer_class->type_parameters().length(); ++i) {
+        TypeParameter* p = outer_class->type_parameters().at(i);
+        if (p->identifier()->equals(id)) {
+          *depth = current_depth;
+          return p;
+        }
+      }
+      outer_method = outer_class->outer_method();
+      outer_class = outer_class->outer_class();
+      ++current_depth;
+    }
+  }
+
+  if (VerifyGenericSignatures) {
+    fatal("Could not resolve identifier");
+  }
+
+  return NULL;
+}
+
+ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) {
+  return parse_generic_signature(klass, NULL, CHECK_NULL);
+}
+
+ClassDescriptor* ClassDescriptor::parse_generic_signature(
+      Klass* klass, Symbol* original_name, TRAPS) {
+
+  InstanceKlass* ik = InstanceKlass::cast(klass);
+  Symbol* sym = ik->generic_signature();
+
+  ClassDescriptor* spec;
+
+  if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) {
+    spec = ClassDescriptor::placeholder(ik);
+  }
+
+  u2 outer_index = get_outer_class_index(ik, CHECK_NULL);
+  if (outer_index != 0) {
+    if (original_name == NULL) {
+      original_name = ik->name();
+    }
+    Handle class_loader = Handle(THREAD, ik->class_loader());
+    Handle protection_domain = Handle(THREAD, ik->protection_domain());
+
+    Symbol* outer_name = ik->constants()->klass_name_at(outer_index);
+    Klass* outer = SystemDictionary::find(
+        outer_name, class_loader, protection_domain, CHECK_NULL);
+    if (outer == NULL && !THREAD->is_Compiler_thread()) {
+      outer = SystemDictionary::resolve_super_or_fail(original_name,
+          outer_name, class_loader, protection_domain, false, CHECK_NULL);
+    }
+
+    InstanceKlass* outer_ik;
+    ClassDescriptor* outer_spec = NULL;
+    if (outer == NULL) {
+      outer_spec = ClassDescriptor::placeholder(ik);
+      assert(false, "Outer class not loaded and not loadable from here");
+    } else {
+      outer_ik = InstanceKlass::cast(outer);
+      outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL);
+    }
+    spec->set_outer_class(outer_spec);
+
+    u2 encl_method_idx = ik->enclosing_method_method_index();
+    if (encl_method_idx != 0 && outer_ik != NULL) {
+      ConstantPool* cp = ik->constants();
+      u2 name_index = cp->name_ref_index_at(encl_method_idx);
+      u2 sig_index = cp->signature_ref_index_at(encl_method_idx);
+      Symbol* name = cp->symbol_at(name_index);
+      Symbol* sig = cp->symbol_at(sig_index);
+      Method* m = outer_ik->find_method(name, sig);
+      if (m != NULL) {
+        Symbol* gsig = m->generic_signature();
+        if (gsig != NULL) {
+          MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec);
+          spec->set_outer_method(gms);
+        }
+      } else if (VerifyGenericSignatures) {
+        ResourceMark rm;
+        stringStream ss;
+        ss.print("Could not find method %s %s in class %s",
+          name->as_C_string(), sig->as_C_string(), outer_name->as_C_string());
+        fatal(ss.as_string());
+      }
+    }
+  }
+
+  spec->bind_variables_to_parameters();
+  return spec;
+}
+
+ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) {
+  GrowableArray<TypeParameter*> formals;
+  GrowableArray<ClassType*> interfaces;
+  ClassType* super_type = NULL;
+
+  Klass* super_klass = klass->super();
+  if (super_klass != NULL) {
+    InstanceKlass* super = InstanceKlass::cast(super_klass);
+    super_type = ClassType::from_symbol(super->name());
+  }
+
+  for (int i = 0; i < klass->local_interfaces()->length(); ++i) {
+    InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i));
+    interfaces.append(ClassType::from_symbol(iface->name()));
+  }
+  return new ClassDescriptor(formals, super_type, interfaces);
+}
+
+ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) {
+
+  DescriptorStream ds(sym);
+  DescriptorStream* STREAM = &ds;
+
+  GrowableArray<TypeParameter*> parameters(8);
+  char c = READ();
+  if (c == '<') {
+    c = READ();
+    while (c != '>') {
+      PUSH(c);
+      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
+      parameters.append(ftp);
+      c = READ();
+    }
+  } else {
+    PUSH(c);
+  }
+
+  EXPECT('L');
+  ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM);
+
+  GrowableArray<ClassType*> signatures(2);
+  while (!STREAM->at_end()) {
+    EXPECT('L');
+    ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM);
+    signatures.append(iface);
+  }
+
+  EXPECT_END();
+
+  return new ClassDescriptor(parameters, super, signatures);
+}
+
+#ifndef PRODUCT
+void ClassDescriptor::print_on(outputStream* str) const {
+  str->indent().print_cr("ClassDescriptor {");
+  {
+    streamIndentor si(str);
+    if (_type_parameters.length() > 0) {
+      str->indent().print_cr("Formals {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _type_parameters.length(); ++i) {
+          _type_parameters.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    if (_super != NULL) {
+      str->indent().print_cr("Superclass: ");
+      {
+        streamIndentor si(str);
+        _super->print_on(str);
+      }
+    }
+    if (_interfaces.length() > 0) {
+      str->indent().print_cr("SuperInterfaces: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _interfaces.length(); ++i) {
+          _interfaces.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    if (_outer_method != NULL) {
+      str->indent().print_cr("Outer Method: {");
+      {
+        streamIndentor si(str);
+        _outer_method->print_on(str);
+      }
+      str->indent().print_cr("}");
+    }
+    if (_outer_class != NULL) {
+      str->indent().print_cr("Outer Class: {");
+      {
+        streamIndentor si(str);
+        _outer_class->print_on(str);
+      }
+      str->indent().print_cr("}");
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+ClassType* ClassDescriptor::interface_desc(Symbol* sym) {
+  for (int i = 0; i < _interfaces.length(); ++i) {
+    if (_interfaces.at(i)->identifier()->equals(sym)) {
+      return _interfaces.at(i);
+    }
+  }
+  if (VerifyGenericSignatures) {
+    fatal("Did not find expected interface");
+  }
+  return NULL;
+}
+
+void ClassDescriptor::bind_variables_to_parameters() {
+  if (_outer_class != NULL) {
+    _outer_class->bind_variables_to_parameters();
+  }
+  if (_outer_method != NULL) {
+    _outer_method->bind_variables_to_parameters();
+  }
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
+  }
+  if (_super != NULL) {
+    _super->bind_variables_to_parameters(this);
+  }
+  for (int i = 0; i < _interfaces.length(); ++i) {
+    _interfaces.at(i)->bind_variables_to_parameters(this);
+  }
+}
+
+ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) {
+
+  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
+  }
+
+  ClassDescriptor* outer = _outer_class == NULL ? NULL :
+      _outer_class->canonicalize(ctx);
+
+  ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0);
+
+  GrowableArray<ClassType*> interfaces(_interfaces.length());
+  for (int i = 0; i < _interfaces.length(); ++i) {
+    interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0));
+  }
+
+  MethodDescriptor* md = _outer_method == NULL ? NULL :
+      _outer_method->canonicalize(ctx);
+
+  return new ClassDescriptor(type_params, super, interfaces, outer, md);
+}
+
+u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) {
+  int inner_index = InstanceKlass::inner_class_inner_class_info_offset;
+  int outer_index = InstanceKlass::inner_class_outer_class_info_offset;
+  int name_offset = InstanceKlass::inner_class_inner_name_offset;
+  int next_offset = InstanceKlass::inner_class_next_offset;
+
+  if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) {
+    // No inner class info => no declaring class
+    return 0;
+  }
+
+  Array<u2>* i_icls = klass->inner_classes();
+  ConstantPool* i_cp = klass->constants();
+  int i_length = i_icls->length();
+
+  // Find inner_klass attribute
+  for (int i = 0; i + next_offset < i_length; i += next_offset) {
+    u2 ioff = i_icls->at(i + inner_index);
+    u2 ooff = i_icls->at(i + outer_index);
+    u2 noff = i_icls->at(i + name_offset);
+    if (ioff != 0) {
+      // Check to see if the name matches the class we're looking for
+      // before attempting to find the class.
+      if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) {
+        return ooff;
+      }
+    }
+  }
+
+  // It may be anonymous; try for that.
+  u2 encl_method_class_idx = klass->enclosing_method_class_index();
+  if (encl_method_class_idx != 0) {
+    return encl_method_class_idx;
+  }
+
+  return 0;
+}
+
+MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) {
+  Symbol* generic_sig = m->generic_signature();
+  MethodDescriptor* md = NULL;
+  if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) {
+    md = parse_generic_signature(m->signature(), outer);
+  }
+  assert(md != NULL, "Could not parse method signature");
+  md->bind_variables_to_parameters();
+  return md;
+}
+
+MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) {
+
+  DescriptorStream ds(sym);
+  DescriptorStream* STREAM = &ds;
+
+  GrowableArray<TypeParameter*> params(8);
+  char c = READ();
+  if (c == '<') {
+    c = READ();
+    while (c != '>') {
+      PUSH(c);
+      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
+      params.append(ftp);
+      c = READ();
+    }
+  } else {
+    PUSH(c);
+  }
+
+  EXPECT('(');
+
+  GrowableArray<Type*> parameters(8);
+  c = READ();
+  while (c != ')') {
+    PUSH(c);
+    Type* arg = Type::parse_generic_signature(CHECK_STREAM);
+    parameters.append(arg);
+    c = READ();
+  }
+
+  Type* rt = Type::parse_generic_signature(CHECK_STREAM);
+
+  GrowableArray<Type*> throws;
+  while (!STREAM->at_end()) {
+    EXPECT('^');
+    Type* spec = Type::parse_generic_signature(CHECK_STREAM);
+    throws.append(spec);
+  }
+
+  return new MethodDescriptor(params, outer, parameters, rt, throws);
+}
+
+void MethodDescriptor::bind_variables_to_parameters() {
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
+  }
+  for (int i = 0; i < _parameters.length(); ++i) {
+    _parameters.at(i)->bind_variables_to_parameters(this);
+  }
+  _return_type->bind_variables_to_parameters(this);
+  for (int i = 0; i < _throws.length(); ++i) {
+    _throws.at(i)->bind_variables_to_parameters(this);
+  }
+}
+
+bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) {
+
+  if (_parameters.length() == other->_parameters.length()) {
+    for (int i = 0; i < _parameters.length(); ++i) {
+      if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) {
+        return false;
+      }
+    }
+
+    if (_return_type->as_primitive() != NULL) {
+      return _return_type->covariant_match(other->_return_type, ctx);
+    } else {
+      // return type is a reference
+      return other->_return_type->as_class() != NULL ||
+             other->_return_type->as_variable() != NULL ||
+             other->_return_type->as_array() != NULL;
+    }
+  } else {
+    return false;
+  }
+}
+
+MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) {
+
+  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
+  for (int i = 0; i < _type_parameters.length(); ++i) {
+    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
+  }
+
+  ClassDescriptor* outer = _outer_class == NULL ? NULL :
+      _outer_class->canonicalize(ctx);
+
+  GrowableArray<Type*> params(_parameters.length());
+  for (int i = 0; i < _parameters.length(); ++i) {
+    params.append(_parameters.at(i)->canonicalize(ctx, 0));
+  }
+
+  Type* rt = _return_type->canonicalize(ctx, 0);
+
+  GrowableArray<Type*> throws(_throws.length());
+  for (int i = 0; i < _throws.length(); ++i) {
+    throws.append(_throws.at(i)->canonicalize(ctx, 0));
+  }
+
+  return new MethodDescriptor(type_params, outer, params, rt, throws);
+}
+
+#ifndef PRODUCT
+TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) {
+  stringStream ss(256);
+
+  ss.print("(");
+  for (int i = 0; i < _parameters.length(); ++i) {
+    _parameters.at(i)->reify_signature(&ss, ctx);
+  }
+  ss.print(")");
+  _return_type->reify_signature(&ss, ctx);
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
+}
+
+void MethodDescriptor::print_on(outputStream* str) const {
+  str->indent().print_cr("MethodDescriptor {");
+  {
+    streamIndentor si(str);
+    if (_type_parameters.length() > 0) {
+      str->indent().print_cr("Formals: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _type_parameters.length(); ++i) {
+          _type_parameters.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    str->indent().print_cr("Parameters: {");
+    {
+      streamIndentor si(str);
+      for (int i = 0; i < _parameters.length(); ++i) {
+        _parameters.at(i)->print_on(str);
+      }
+    }
+    str->indent().print_cr("}");
+    str->indent().print_cr("Return Type: ");
+    {
+      streamIndentor si(str);
+      _return_type->print_on(str);
+    }
+
+    if (_throws.length() > 0) {
+      str->indent().print_cr("Throws: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _throws.length(); ++i) {
+          _throws.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) {
+  STREAM->set_mark();
+  char c = READ();
+  while (c != ':') {
+    c = READ();
+  }
+
+  Identifier* id = STREAM->identifier_from_mark();
+
+  ClassType* class_bound = NULL;
+  GrowableArray<ClassType*> interface_bounds(8);
+
+  c = READ();
+  if (c != '>') {
+    if (c != ':') {
+      EXPECTED(c, 'L');
+      class_bound = ClassType::parse_generic_signature(CHECK_STREAM);
+      c = READ();
+    }
+
+    while (c == ':') {
+      EXPECT('L');
+      ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM);
+      interface_bounds.append(fts);
+      c = READ();
+    }
+  }
+  PUSH(c);
+
+  return new TypeParameter(id, class_bound, interface_bounds);
+}
+
+void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) {
+  if (_class_bound != NULL) {
+    _class_bound->bind_variables_to_parameters(sig);
+  }
+  for (int i = 0; i < _interface_bounds.length(); ++i) {
+    _interface_bounds.at(i)->bind_variables_to_parameters(sig);
+  }
+  _position = position;
+}
+
+Type* TypeParameter::resolve(
+    Context* ctx, int inner_depth, int ctx_depth) {
+
+  if (inner_depth == -1) {
+    // This indicates that the parameter is a method type parameter, which
+    // isn't resolveable using the class hierarchy context
+    return bound();
+  }
+
+  ClassType* provider = ctx->at_depth(ctx_depth);
+  if (provider != NULL) {
+    for (int i = 0; i < inner_depth && provider != NULL; ++i) {
+      provider = provider->outer_class();
+    }
+    if (provider != NULL) {
+      TypeArgument* arg = provider->type_argument_at(_position);
+      if (arg != NULL) {
+        Type* value = arg->lower_bound();
+        return value->canonicalize(ctx, ctx_depth + 1);
+      }
+    }
+  }
+
+  return bound();
+}
+
+TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) {
+  ClassType* bound = _class_bound == NULL ? NULL :
+     _class_bound->canonicalize(ctx, ctx_depth);
+
+  GrowableArray<ClassType*> ifaces(_interface_bounds.length());
+  for (int i = 0; i < _interface_bounds.length(); ++i) {
+    ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth));
+  }
+
+  TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces);
+  ret->_position = _position;
+  return ret;
+}
+
+ClassType* TypeParameter::bound() {
+  if (_class_bound != NULL) {
+    return _class_bound;
+  }
+
+  if (_interface_bounds.length() == 1) {
+    return _interface_bounds.at(0);
+  }
+
+  return ClassType::java_lang_Object(); // TODO: investigate this case
+}
+
+#ifndef PRODUCT
+void TypeParameter::print_on(outputStream* str) const {
+  str->indent().print_cr("Formal: {");
+  {
+    streamIndentor si(str);
+
+    str->indent().print("Identifier: ");
+    _identifier->print_on(str);
+    str->print_cr("");
+    if (_class_bound != NULL) {
+      str->indent().print_cr("Class Bound: ");
+      streamIndentor si(str);
+      _class_bound->print_on(str);
+    }
+    if (_interface_bounds.length() > 0) {
+      str->indent().print_cr("Interface Bounds: {");
+      {
+        streamIndentor si(str);
+        for (int i = 0; i < _interface_bounds.length(); ++i) {
+          _interface_bounds.at(i)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    str->indent().print_cr("Ordinal Position: %d", _position);
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+Type* Type::parse_generic_signature(DescriptorStream* STREAM) {
+  char c = READ();
+  switch (c) {
+    case 'L':
+      return ClassType::parse_generic_signature(CHECK_STREAM);
+    case 'T':
+      return TypeVariable::parse_generic_signature(CHECK_STREAM);
+    case '[':
+      return ArrayType::parse_generic_signature(CHECK_STREAM);
+    default:
+      return new PrimitiveType(c);
+  }
+}
+
+Identifier* ClassType::parse_generic_signature_simple(GrowableArray<TypeArgument*>* args,
+    bool* has_inner, DescriptorStream* STREAM) {
+  STREAM->set_mark();
+
+  char c = READ();
+  while (c != ';' && c != '.' && c != '<') { c = READ(); }
+  Identifier* id = STREAM->identifier_from_mark();
+
+  if (c == '<') {
+    c = READ();
+    while (c != '>') {
+      PUSH(c);
+      TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM);
+      args->append(arg);
+      c = READ();
+    }
+    c = READ();
+  }
+
+  *has_inner = (c == '.');
+  if (!(*has_inner)) {
+    EXPECTED(c, ';');
+  }
+
+  return id;
+}
+
+ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) {
+  return parse_generic_signature(NULL, CHECK_STREAM);
+}
+
+ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) {
+  GrowableArray<TypeArgument*> args;
+  ClassType* gct = NULL;
+  bool has_inner = false;
+
+  Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM);
+  if (id != NULL) {
+    gct = new ClassType(id, args, outer);
+
+    if (has_inner) {
+      gct = parse_generic_signature(gct, CHECK_STREAM);
+    }
+  }
+  return gct;
+}
+
+ClassType* ClassType::from_symbol(Symbol* sym) {
+  assert(sym != NULL, "Must not be null");
+  GrowableArray<TypeArgument*> args;
+  Identifier* id = new Identifier(sym, 0, sym->utf8_length());
+  return new ClassType(id, args, NULL);
+}
+
+ClassType* ClassType::java_lang_Object() {
+  return from_symbol(vmSymbols::java_lang_Object());
+}
+
+void ClassType::bind_variables_to_parameters(Descriptor* sig) {
+  for (int i = 0; i < _type_arguments.length(); ++i) {
+    _type_arguments.at(i)->bind_variables_to_parameters(sig);
+  }
+  if (_outer_class != NULL) {
+    _outer_class->bind_variables_to_parameters(sig);
+  }
+}
+
+TypeArgument* ClassType::type_argument_at(int i) {
+  if (i >= 0 && i < _type_arguments.length()) {
+    return _type_arguments.at(i);
+  } else {
+    return NULL;
+  }
+}
+
+#ifndef PRODUCT
+void ClassType::reify_signature(stringStream* ss, Context* ctx) {
+  ss->print("L");
+  _identifier->print_on(ss);
+  ss->print(";");
+}
+
+void ClassType::print_on(outputStream* str) const {
+  str->indent().print_cr("Class {");
+  {
+    streamIndentor si(str);
+    str->indent().print("Name: ");
+    _identifier->print_on(str);
+    str->print_cr("");
+    if (_type_arguments.length() != 0) {
+      str->indent().print_cr("Type Arguments: {");
+      {
+        streamIndentor si(str);
+        for (int j = 0; j < _type_arguments.length(); ++j) {
+          _type_arguments.at(j)->print_on(str);
+        }
+      }
+      str->indent().print_cr("}");
+    }
+    if (_outer_class != NULL) {
+      str->indent().print_cr("Outer Class: ");
+      streamIndentor sir(str);
+      _outer_class->print_on(str);
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+bool ClassType::covariant_match(Type* other, Context* ctx) {
+
+  if (other == this) {
+    return true;
+  }
+
+  TypeVariable* variable = other->as_variable();
+  if (variable != NULL) {
+    other = variable->resolve(ctx, 0);
+  }
+
+  ClassType* outer = outer_class();
+  ClassType* other_class = other->as_class();
+
+  if (other_class == NULL ||
+      (outer == NULL) != (other_class->outer_class() == NULL)) {
+    return false;
+  }
+
+  if (!_identifier->equals(other_class->_identifier)) {
+    return false;
+  }
+
+  if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) {
+    return false;
+  }
+
+  return true;
+}
+
+ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) {
+
+  GrowableArray<TypeArgument*> args(_type_arguments.length());
+  for (int i = 0; i < _type_arguments.length(); ++i) {
+    args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth));
+  }
+
+  ClassType* outer = _outer_class == NULL ? NULL :
+      _outer_class->canonicalize(ctx, ctx_depth);
+
+  return new ClassType(_identifier, args, outer);
+}
+
+TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) {
+  STREAM->set_mark();
+  char c = READ();
+  while (c != ';') {
+    c = READ();
+  }
+  Identifier* id = STREAM->identifier_from_mark();
+
+  return new TypeVariable(id);
+}
+
+void TypeVariable::bind_variables_to_parameters(Descriptor* sig) {
+  _parameter = sig->find_type_parameter(_id, &_inner_depth);
+  if (VerifyGenericSignatures && _parameter == NULL) {
+    fatal("Could not find formal parameter");
+  }
+}
+
+Type* TypeVariable::resolve(Context* ctx, int ctx_depth) {
+  if (parameter() != NULL) {
+    return parameter()->resolve(ctx, inner_depth(), ctx_depth);
+  } else {
+    if (VerifyGenericSignatures) {
+      fatal("Type variable matches no parameter");
+    }
+    return NULL;
+  }
+}
+
+bool TypeVariable::covariant_match(Type* other, Context* ctx) {
+
+  if (other == this) {
+    return true;
+  }
+
+  Context my_context(NULL); // empty, results in erasure
+  Type* my_type = resolve(&my_context, 0);
+  if (my_type == NULL) {
+    return false;
+  }
+
+  return my_type->covariant_match(other, ctx);
+}
+
+Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) {
+  return resolve(ctx, ctx_depth);
+}
+
+#ifndef PRODUCT
+void TypeVariable::reify_signature(stringStream* ss, Context* ctx) {
+  Type* type = resolve(ctx, 0);
+  if (type != NULL) {
+    type->reify_signature(ss, ctx);
+  }
+}
+
+void TypeVariable::print_on(outputStream* str) const {
+  str->indent().print_cr("Type Variable {");
+  {
+    streamIndentor si(str);
+    str->indent().print("Name: ");
+    _id->print_on(str);
+    str->print_cr("");
+    str->indent().print_cr("Inner depth: %d", _inner_depth);
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) {
+  Type* base = Type::parse_generic_signature(CHECK_STREAM);
+  return new ArrayType(base);
+}
+
+void ArrayType::bind_variables_to_parameters(Descriptor* sig) {
+  assert(_base != NULL, "Invalid base");
+  _base->bind_variables_to_parameters(sig);
+}
+
+bool ArrayType::covariant_match(Type* other, Context* ctx) {
+  assert(_base != NULL, "Invalid base");
+
+  if (other == this) {
+    return true;
+  }
+
+  ArrayType* other_array = other->as_array();
+  return (other_array != NULL && _base->covariant_match(other_array->_base, ctx));
+}
+
+ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) {
+  assert(_base != NULL, "Invalid base");
+  return new ArrayType(_base->canonicalize(ctx, ctx_depth));
+}
+
+#ifndef PRODUCT
+void ArrayType::reify_signature(stringStream* ss, Context* ctx) {
+  assert(_base != NULL, "Invalid base");
+  ss->print("[");
+  _base->reify_signature(ss, ctx);
+}
+
+void ArrayType::print_on(outputStream* str) const {
+  str->indent().print_cr("Array {");
+  {
+    streamIndentor si(str);
+    _base->print_on(str);
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+bool PrimitiveType::covariant_match(Type* other, Context* ctx) {
+
+  PrimitiveType* other_prim = other->as_primitive();
+  return (other_prim != NULL && _type == other_prim->_type);
+}
+
+PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) {
+  return this;
+}
+
+#ifndef PRODUCT
+void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) {
+  ss->print("%c", _type);
+}
+
+void PrimitiveType::print_on(outputStream* str) const {
+  str->indent().print_cr("Primitive: '%c'", _type);
+}
+#endif // ndef PRODUCT
+
+void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) {
+}
+
+TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) {
+  char c = READ();
+  Type* type = NULL;
+
+  switch (c) {
+    case '*':
+      return new TypeArgument(ClassType::java_lang_Object(), NULL);
+      break;
+    default:
+      PUSH(c);
+      // fall-through
+    case '+':
+    case '-':
+      type = Type::parse_generic_signature(CHECK_STREAM);
+      if (c == '+') {
+        return new TypeArgument(type, NULL);
+      } else if (c == '-') {
+        return new TypeArgument(ClassType::java_lang_Object(), type);
+      } else {
+        return new TypeArgument(type, type);
+      }
+  }
+}
+
+void TypeArgument::bind_variables_to_parameters(Descriptor* sig) {
+  assert(_lower_bound != NULL, "Invalid lower bound");
+  _lower_bound->bind_variables_to_parameters(sig);
+  if (_upper_bound != NULL && _upper_bound != _lower_bound) {
+    _upper_bound->bind_variables_to_parameters(sig);
+  }
+}
+
+bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) {
+  assert(_lower_bound != NULL, "Invalid lower bound");
+
+  if (other == this) {
+    return true;
+  }
+
+  if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) {
+    return false;
+  }
+  return true;
+}
+
+TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) {
+  assert(_lower_bound != NULL, "Invalid lower bound");
+  Type* lower = _lower_bound->canonicalize(ctx, ctx_depth);
+  Type* upper = NULL;
+
+  if (_upper_bound == _lower_bound) {
+    upper = lower;
+  } else if (_upper_bound != NULL) {
+    upper = _upper_bound->canonicalize(ctx, ctx_depth);
+  }
+
+  return new TypeArgument(lower, upper);
+}
+
+#ifndef PRODUCT
+void TypeArgument::print_on(outputStream* str) const {
+  str->indent().print_cr("TypeArgument {");
+  {
+    streamIndentor si(str);
+    if (_lower_bound != NULL) {
+      str->indent().print("Lower bound: ");
+      _lower_bound->print_on(str);
+    }
+    if (_upper_bound != NULL) {
+      str->indent().print("Upper bound: ");
+      _upper_bound->print_on(str);
+    }
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+void Context::Mark::destroy() {
+  if (is_active()) {
+    _context->reset_to_mark(_marked_size);
+  }
+  deactivate();
+}
+
+void Context::apply_type_arguments(
+    InstanceKlass* current, InstanceKlass* super, TRAPS) {
+  assert(_cache != NULL, "Cannot use an empty context");
+  ClassType* spec = NULL;
+  if (current != NULL) {
+    ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK);
+    if (super == current->super()) {
+      spec = descriptor->super();
+    } else {
+      spec = descriptor->interface_desc(super->name());
+    }
+    if (spec != NULL) {
+      _type_arguments.push(spec);
+    }
+  }
+}
+
+void Context::reset_to_mark(int size) {
+  _type_arguments.trunc_to(size);
+}
+
+ClassType* Context::at_depth(int i) const {
+  if (i < _type_arguments.length()) {
+    return _type_arguments.at(_type_arguments.length() - 1 - i);
+  }
+  return NULL;
+}
+
+#ifndef PRODUCT
+void Context::print_on(outputStream* str) const {
+  str->indent().print_cr("Context {");
+  for (int i = 0; i < _type_arguments.length(); ++i) {
+    streamIndentor si(str);
+    str->indent().print("leval %d: ", i);
+    ClassType* ct = at_depth(i);
+    if (ct == NULL) {
+      str->print_cr("<empty>");
+      continue;
+    } else {
+      str->print_cr("{");
+    }
+
+    for (int j = 0; j < ct->type_arguments_length(); ++j) {
+      streamIndentor si(str);
+      TypeArgument* ta = ct->type_argument_at(j);
+      Type* bound = ta->lower_bound();
+      bound->print_on(str);
+    }
+    str->indent().print_cr("}");
+  }
+  str->indent().print_cr("}");
+}
+#endif // ndef PRODUCT
+
+ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) {
+
+  ClassDescriptor** existing = _class_descriptors.get(ik);
+  if (existing == NULL) {
+    ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL);
+    _class_descriptors.put(ik, cd);
+    return cd;
+  } else {
+    return *existing;
+  }
+}
+
+MethodDescriptor* DescriptorCache::descriptor_for(
+    Method* mh, ClassDescriptor* cd, TRAPS) {
+  assert(mh != NULL && cd != NULL, "Should not be NULL");
+  MethodDescriptor** existing = _method_descriptors.get(mh);
+  if (existing == NULL) {
+    MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd);
+    _method_descriptors.put(mh, md);
+    return md;
+  } else {
+    return *existing;
+  }
+}
+MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) {
+  ClassDescriptor* cd = descriptor_for(
+      InstanceKlass::cast(mh->method_holder()), CHECK_NULL);
+  return descriptor_for(mh, cd, THREAD);
+}
+
+} // namespace generic
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/genericSignatures.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
+#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
+
+#include "classfile/symbolTable.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/signature.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/resourceHash.hpp"
+
+class stringStream;
+
+namespace generic {
+
+class Identifier;
+class ClassDescriptor;
+class MethodDescriptor;
+
+class TypeParameter; // a formal type parameter declared in generic signatures
+class TypeArgument;  // The "type value" passed to fill parameters in supertypes
+class TypeVariable;  // A usage of a type parameter as a value
+/**
+ * Example:
+ *
+ * <T, V> class Foo extends Bar<String> { int m(V v) {} }
+ * ^^^^^^                       ^^^^^^          ^^
+ * type parameters            type argument    type variable
+ *
+ * Note that a type variable could be passed as an argument too:
+ * <T, V> class Foo extends Bar<T> { int m(V v) {} }
+ *                             ^^^
+ *                             type argument's value is a type variable
+ */
+
+
+class Type;
+class ClassType;
+class ArrayType;
+class PrimitiveType;
+class Context;
+class DescriptorCache;
+
+class DescriptorStream;
+
+class Identifier : public ResourceObj {
+ private:
+  Symbol* _sym;
+  int _begin;
+  int _end;
+
+ public:
+  Identifier(Symbol* sym, int begin, int end) :
+    _sym(sym), _begin(begin), _end(end) {}
+
+  bool equals(Identifier* other);
+  bool equals(Symbol* sym);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif // ndef PRODUCT
+};
+
+class Descriptor : public ResourceObj {
+ protected:
+  GrowableArray<TypeParameter*> _type_parameters;
+  ClassDescriptor* _outer_class;
+
+  Descriptor(GrowableArray<TypeParameter*>& params,
+    ClassDescriptor* outer)
+    : _type_parameters(params), _outer_class(outer) {}
+
+ public:
+
+  ClassDescriptor* outer_class() { return _outer_class; }
+  void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
+
+  virtual ClassDescriptor* as_class_signature() { return NULL; }
+  virtual MethodDescriptor* as_method_signature() { return NULL; }
+
+  bool is_class_signature() { return as_class_signature() != NULL; }
+  bool is_method_signature() { return as_method_signature() != NULL; }
+
+  GrowableArray<TypeParameter*>& type_parameters() {
+    return _type_parameters;
+  }
+
+  TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
+
+  virtual void bind_variables_to_parameters() = 0;
+
+#ifndef PRODUCT
+  virtual void print_on(outputStream* str) const = 0;
+#endif
+};
+
+class ClassDescriptor : public Descriptor {
+ private:
+  ClassType* _super;
+  GrowableArray<ClassType*> _interfaces;
+  MethodDescriptor* _outer_method;
+
+  ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
+      GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
+      MethodDescriptor* outer_method = NULL)
+        : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
+          _outer_method(outer_method) {}
+
+  static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
+  static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
+
+ public:
+
+  virtual ClassDescriptor* as_class_signature() { return this; }
+
+  MethodDescriptor* outer_method() { return _outer_method; }
+  void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
+
+  ClassType* super() { return _super; }
+  ClassType* interface_desc(Symbol* sym);
+
+  static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
+  static ClassDescriptor* parse_generic_signature(Symbol* sym);
+
+  // For use in superclass chains in positions where this is no generic info
+  static ClassDescriptor* placeholder(InstanceKlass* klass);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+
+  ClassDescriptor* canonicalize(Context* ctx);
+
+  // Linking sets the position index in any contained TypeVariable type
+  // to correspond to the location of that identifier in the formal type
+  // parameters.
+  void bind_variables_to_parameters();
+};
+
+class MethodDescriptor : public Descriptor {
+ private:
+  GrowableArray<Type*> _parameters;
+  Type* _return_type;
+  GrowableArray<Type*> _throws;
+
+  MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
+      GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
+      : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
+        _throws(throws) {}
+
+ public:
+
+  static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
+  static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
+
+  MethodDescriptor* as_method_signature() { return this; }
+
+  // Performs generic analysis on the method parameters to determine
+  // if both methods refer to the same argument types.
+  bool covariant_match(MethodDescriptor* other, Context* ctx);
+
+  // Returns a new method descriptor with all generic variables
+  // removed and replaced with whatever is indicated using the Context.
+  MethodDescriptor* canonicalize(Context* ctx);
+
+  void bind_variables_to_parameters();
+
+#ifndef PRODUCT
+  TempNewSymbol reify_signature(Context* ctx, TRAPS);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class TypeParameter : public ResourceObj {
+ private:
+  Identifier* _identifier;
+  ClassType* _class_bound;
+  GrowableArray<ClassType*> _interface_bounds;
+
+  // The position is the ordinal location of the parameter within the
+  // formal parameter list (excluding outer classes).  It is only set for
+  // formal type parameters that are associated with a class -- method
+  // type parameters are left as -1.  When resolving a generic variable to
+  // find the actual type, this index is used to access the generic type
+  // argument in the provided context object.
+  int _position; // Assigned during variable linking
+
+  TypeParameter(Identifier* id, ClassType* class_bound,
+    GrowableArray<ClassType*>& interface_bounds) :
+      _identifier(id), _class_bound(class_bound),
+      _interface_bounds(interface_bounds), _position(-1) {}
+
+ public:
+  static TypeParameter* parse_generic_signature(DescriptorStream* str);
+
+  ClassType* bound();
+  int position() { return _position; }
+
+  void bind_variables_to_parameters(Descriptor* sig, int position);
+  Identifier* identifier() { return _identifier; }
+
+  Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
+  TypeParameter* canonicalize(Context* ctx, int ctx_depth);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class Type : public ResourceObj {
+ public:
+  static Type* parse_generic_signature(DescriptorStream* str);
+
+  virtual ClassType* as_class() { return NULL; }
+  virtual TypeVariable* as_variable() { return NULL; }
+  virtual ArrayType* as_array() { return NULL; }
+  virtual PrimitiveType* as_primitive() { return NULL; }
+
+  virtual bool covariant_match(Type* gt, Context* ctx) = 0;
+  virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
+
+  virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
+
+#ifndef PRODUCT
+  virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
+  virtual void print_on(outputStream* str) const = 0;
+#endif
+};
+
+class ClassType : public Type {
+  friend class ClassDescriptor;
+ protected:
+  Identifier* _identifier;
+  GrowableArray<TypeArgument*> _type_arguments;
+  ClassType* _outer_class;
+
+  ClassType(Identifier* identifier,
+      GrowableArray<TypeArgument*>& args,
+      ClassType* outer)
+      : _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
+
+  // Returns true if there are inner classes to read
+  static Identifier* parse_generic_signature_simple(
+      GrowableArray<TypeArgument*>* args,
+      bool* has_inner, DescriptorStream* str);
+
+  static ClassType* parse_generic_signature(ClassType* outer,
+      DescriptorStream* str);
+  static ClassType* from_symbol(Symbol* sym);
+
+ public:
+  ClassType* as_class() { return this; }
+
+  static ClassType* parse_generic_signature(DescriptorStream* str);
+  static ClassType* java_lang_Object();
+
+  Identifier* identifier() { return _identifier; }
+  int type_arguments_length() { return _type_arguments.length(); }
+  TypeArgument* type_argument_at(int i);
+
+  virtual ClassType* outer_class() { return _outer_class; }
+
+  bool covariant_match(Type* gt, Context* ctx);
+  ClassType* canonicalize(Context* ctx, int context_depth);
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class TypeVariable : public Type {
+ private:
+  Identifier* _id;
+  TypeParameter* _parameter; // assigned during linking
+
+  // how many steps "out" from inner classes, -1 if method
+  int _inner_depth;
+
+  TypeVariable(Identifier* id)
+      : _id(id), _parameter(NULL), _inner_depth(0) {}
+
+ public:
+  TypeVariable* as_variable() { return this; }
+
+  static TypeVariable* parse_generic_signature(DescriptorStream* str);
+
+  Identifier* identifier() { return _id; }
+  TypeParameter* parameter() { return _parameter; }
+  int inner_depth() { return _inner_depth; }
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+  Type* resolve(Context* ctx, int ctx_depth);
+  bool covariant_match(Type* gt, Context* ctx);
+  Type* canonicalize(Context* ctx, int ctx_depth);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class ArrayType : public Type {
+ private:
+  Type* _base;
+
+  ArrayType(Type* base) : _base(base) {}
+
+ public:
+  ArrayType* as_array() { return this; }
+
+  static ArrayType* parse_generic_signature(DescriptorStream* str);
+
+  bool covariant_match(Type* gt, Context* ctx);
+  ArrayType* canonicalize(Context* ctx, int ctx_depth);
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class PrimitiveType : public Type {
+  friend class Type;
+ private:
+  char _type; // includes V for void
+
+  PrimitiveType(char& type) : _type(type) {}
+
+ public:
+  PrimitiveType* as_primitive() { return this; }
+
+  bool covariant_match(Type* gt, Context* ctx);
+  PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
+
+  void bind_variables_to_parameters(Descriptor* sig);
+
+#ifndef PRODUCT
+  void reify_signature(stringStream* ss, Context* ctx);
+  void print_on(outputStream* str) const;
+#endif
+};
+
+class TypeArgument : public ResourceObj {
+ private:
+  Type* _lower_bound;
+  Type* _upper_bound; // may be null or == _lower_bound
+
+  TypeArgument(Type* lower_bound, Type* upper_bound)
+      : _lower_bound(lower_bound), _upper_bound(upper_bound) {}
+
+ public:
+
+  static TypeArgument* parse_generic_signature(DescriptorStream* str);
+
+  Type* lower_bound() { return _lower_bound; }
+  Type* upper_bound() { return _upper_bound; }
+
+  void bind_variables_to_parameters(Descriptor* sig);
+  TypeArgument* canonicalize(Context* ctx, int ctx_depth);
+
+  bool covariant_match(TypeArgument* a, Context* ctx);
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+};
+
+
+class Context : public ResourceObj {
+ private:
+  DescriptorCache* _cache;
+  GrowableArray<ClassType*> _type_arguments;
+
+  void reset_to_mark(int size);
+
+ public:
+  // When this object goes out of scope or 'destroy' is
+  // called, then the application of the type to the
+  // context is wound-back (unless it's been deactivated).
+  class Mark : public StackObj {
+   private:
+    mutable Context* _context;
+    int _marked_size;
+
+    bool is_active() const { return _context != NULL; }
+    void deactivate() const { _context = NULL; }
+
+   public:
+    Mark() : _context(NULL), _marked_size(0) {}
+    Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
+    Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
+      m.deactivate(); // Ownership is transferred
+    }
+
+    Mark& operator=(const Mark& cm) {
+      destroy();
+      _context = cm._context;
+      _marked_size = cm._marked_size;
+      cm.deactivate();
+      return *this;
+    }
+
+    void destroy();
+    ~Mark() { destroy(); }
+  };
+
+  Context(DescriptorCache* cache) : _cache(cache) {}
+
+  Mark mark() { return Mark(this, _type_arguments.length()); }
+  void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
+
+  ClassType* at_depth(int i) const;
+
+#ifndef PRODUCT
+  void print_on(outputStream* str) const;
+#endif
+};
+
+/**
+ * Contains a cache of descriptors for classes and methods so they can be
+ * looked-up instead of reparsing each time they are needed.
+ */
+class DescriptorCache : public ResourceObj {
+ private:
+  ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
+  ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
+
+ public:
+  ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
+
+  MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
+  // Class descriptor derived from method holder
+  MethodDescriptor* descriptor_for(Method* mh, TRAPS);
+};
+
+} // namespace generic
+
+#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
+
--- a/src/share/vm/classfile/systemDictionary.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/classfile/systemDictionary.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -137,6 +137,7 @@
   /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */                              \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */                                                   \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
+  do_klass(lambda_MagicLambdaImpl_klass,                java_lang_invoke_MagicLambdaImpl, Opt ) \
   do_klass(reflect_MagicAccessorImpl_klass,             sun_reflect_MagicAccessorImpl,             Opt                 ) \
   do_klass(reflect_MethodAccessorImpl_klass,            sun_reflect_MethodAccessorImpl,            Opt_Only_JDK14NewRef) \
   do_klass(reflect_ConstructorAccessorImpl_klass,       sun_reflect_ConstructorAccessorImpl,       Opt_Only_JDK14NewRef) \
--- a/src/share/vm/classfile/verifier.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/classfile/verifier.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -555,9 +555,10 @@
     if (was_recursively_verified())  return;
 
     Method* m = methods->at(index);
-    if (m->is_native() || m->is_abstract()) {
+    if (m->is_native() || m->is_abstract() || m->is_overpass()) {
       // If m is native or abstract, skip it.  It is checked in class file
-      // parser that methods do not override a final method.
+      // parser that methods do not override a final method.  Overpass methods
+      // are trusted since the VM generates them.
       continue;
     }
     verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
@@ -2304,11 +2305,21 @@
   // Make sure the constant pool item is the right type
   u2 index = bcs->get_index_u2();
   Bytecodes::Code opcode = bcs->raw_code();
-  unsigned int types = (opcode == Bytecodes::_invokeinterface
-                                ? 1 << JVM_CONSTANT_InterfaceMethodref
-                      : opcode == Bytecodes::_invokedynamic
-                                ? 1 << JVM_CONSTANT_InvokeDynamic
-                                : 1 << JVM_CONSTANT_Methodref);
+  unsigned int types;
+  switch (opcode) {
+    case Bytecodes::_invokeinterface:
+      types = 1 << JVM_CONSTANT_InterfaceMethodref;
+      break;
+    case Bytecodes::_invokedynamic:
+      types = 1 << JVM_CONSTANT_InvokeDynamic;
+      break;
+    case Bytecodes::_invokespecial:
+      types = (1 << JVM_CONSTANT_InterfaceMethodref) |
+              (1 << JVM_CONSTANT_Methodref);
+      break;
+    default:
+      types = 1 << JVM_CONSTANT_Methodref;
+  }
   verify_cp_type(bcs->bci(), index, cp, types, CHECK_VERIFY(this));
 
   // Get method name and signature
--- a/src/share/vm/classfile/vmSymbols.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/classfile/vmSymbols.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -110,10 +110,12 @@
   template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")              \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(sun_misc_PostVMInitHook,                   "sun/misc/PostVMInitHook")                  \
+  template(sun_misc_Launcher_ExtClassLoader,          "sun/misc/Launcher$ExtClassLoader")         \
                                                                                                   \
   /* Java runtime version access */                                                               \
   template(sun_misc_Version,                          "sun/misc/Version")                         \
   template(java_runtime_name_name,                    "java_runtime_name")                        \
+  template(java_runtime_version_name,                 "java_runtime_version")                     \
                                                                                                   \
   /* class file format tags */                                                                    \
   template(tag_source_file,                           "SourceFile")                               \
@@ -257,6 +259,7 @@
   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
+  template(java_lang_invoke_MagicLambdaImpl,          "java/lang/invoke/MagicLambdaImpl")         \
   /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */         \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature,       "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
@@ -723,6 +726,21 @@
   /* java/lang/ref/Reference */                                                                                         \
   do_intrinsic(_Reference_get,            java_lang_ref_Reference, get_name,    void_object_signature, F_R)             \
                                                                                                                         \
+  /* support for com.sum.crypto.provider.AESCrypt and some of its callers */                                            \
+  do_class(com_sun_crypto_provider_aescrypt,      "com/sun/crypto/provider/AESCrypt")                                   \
+  do_intrinsic(_aescrypt_encryptBlock, com_sun_crypto_provider_aescrypt, encryptBlock_name, byteArray_int_byteArray_int_signature, F_R)   \
+  do_intrinsic(_aescrypt_decryptBlock, com_sun_crypto_provider_aescrypt, decryptBlock_name, byteArray_int_byteArray_int_signature, F_R)   \
+   do_name(     encryptBlock_name,                                 "encryptBlock")                                      \
+   do_name(     decryptBlock_name,                                 "decryptBlock")                                      \
+   do_signature(byteArray_int_byteArray_int_signature,             "([BI[BI)V")                                         \
+                                                                                                                        \
+  do_class(com_sun_crypto_provider_cipherBlockChaining,            "com/sun/crypto/provider/CipherBlockChaining")       \
+   do_intrinsic(_cipherBlockChaining_encryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, encrypt_name, byteArray_int_int_byteArray_int_signature, F_R)   \
+   do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R)   \
+   do_name(     encrypt_name,                                      "encrypt")                                           \
+   do_name(     decrypt_name,                                      "decrypt")                                           \
+   do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)V")                                        \
+                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
--- a/src/share/vm/code/dependencies.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/code/dependencies.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -1160,7 +1160,11 @@
 
   // We could also return false if m does not yet appear to be
   // executed, if the VM version supports this distinction also.
-  return !m->is_abstract();
+  return !m->is_abstract() &&
+         !InstanceKlass::cast(m->method_holder())->is_interface();
+         // TODO: investigate whether default methods should be
+         // considered as "concrete" in this situation.  For now they
+         // are not.
 }
 
 
--- a/src/share/vm/compiler/compilerOracle.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/compiler/compilerOracle.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -574,7 +574,7 @@
   char token[1024];
   int  pos = 0;
   int  c = getc(stream);
-  while(c != EOF) {
+  while(c != EOF && pos < (int)(sizeof(token)-1)) {
     if (c == '\n') {
       token[pos++] = '\0';
       parse_from_line(token);
@@ -595,7 +595,7 @@
   int  pos = 0;
   const char* sp = str;
   int  c = *sp++;
-  while (c != '\0') {
+  while (c != '\0' && pos < (int)(sizeof(token)-1)) {
     if (c == '\n') {
       token[pos++] = '\0';
       parse_line(token);
--- a/src/share/vm/compiler/disassembler.cpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/compiler/disassembler.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -55,16 +55,18 @@
 bool        Disassembler::_tried_to_load_library = false;
 
 // This routine is in the shared library:
+Disassembler::decode_func_virtual Disassembler::_decode_instructions_virtual = NULL;
 Disassembler::decode_func Disassembler::_decode_instructions = NULL;
 
 static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH;
-static const char decode_instructions_name[] = "decode_instructions_virtual";
-
+static const char decode_instructions_virtual_name[] = "decode_instructions_virtual";
+static const char decode_instructions_name[] = "decode_instructions";
+static bool use_new_version = true;
 #define COMMENT_COLUMN  40 LP64_ONLY(+8) /*could be an option*/
 #define BYTES_COMMENT   ";..."  /* funky byte display comment */
 
 bool Disassembler::load_library() {
-  if (_decode_instructions != NULL) {
+  if (_decode_instructions_virtual != NULL || _decode_instructions != NULL) {
     // Already succeeded.
     return true;
   }
@@ -123,11 +125,19 @@
     _library = os::dll_load(buf, ebuf, sizeof ebuf);
   }
   if (_library != NULL) {
+    _decode_instructions_virtual = CAST_TO_FN_PTR(Disassembler::decode_func_virtual,
+                                          os::dll_lookup(_library, decode_instructions_virtual_name));
+  }
+  if (_decode_instructions_virtual == NULL) {
+    // could not spot in new version, try old version
     _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func,
                                           os::dll_lookup(_library, decode_instructions_name));
+    use_new_version = false;
+  } else {
+    use_new_version = true;
   }
   _tried_to_load_library = true;
-  if (_decode_instructions == NULL) {
+  if (_decode_instructions_virtual == NULL && _decode_instructions == NULL) {
     tty->print_cr("Could not load %s; %s; %s", buf,
                   ((_library != NULL)
                    ? "entry point is missing"
@@ -450,17 +460,31 @@
     // This is mainly for debugging the library itself.
     FILE* out = stdout;
     FILE* xmlout = (_print_raw > 1 ? out : NULL);
-    return (address)
-      (*Disassembler::_decode_instructions)((uintptr_t)start, (uintptr_t)end,
-                                            start, end - start,
+    return use_new_version ?
+      (address)
+      (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
+                                                    start, end - start,
+                                                    NULL, (void*) xmlout,
+                                                    NULL, (void*) out,
+                                                    options(), 0/*nice new line*/)
+      :
+      (address)
+      (*Disassembler::_decode_instructions)(start, end,
                                             NULL, (void*) xmlout,
                                             NULL, (void*) out,
                                             options());
   }
 
-  return (address)
-    (*Disassembler::_decode_instructions)((uintptr_t)start, (uintptr_t)end,
-                                          start, end - start,
+  return use_new_version ?
+    (address)
+    (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
+                                                  start, end - start,
+                                                  &event_to_env,  (void*) this,
+                                                  &printf_to_env, (void*) this,
+                                                  options(), 0/*nice new line*/)
+    :
+    (address)
+    (*Disassembler::_decode_instructions)(start, end,
                                           &event_to_env,  (void*) this,
                                           &printf_to_env, (void*) this,
                                           options());
--- a/src/share/vm/compiler/disassembler.hpp	Mon Nov 05 15:30:22 2012 -0500
+++ b/src/share/vm/compiler/disassembler.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -49,18 +49,27 @@
   friend class decode_env;
  private:
   // this is the type of the dll entry point:
-  typedef void* (*decode_func)(uintptr_t start_va, uintptr_t end_va,
+  typedef void* (*decode_func_virtual)(uintptr_t start_va, uintptr_t end_va,
                                unsigned char* buffer, uintptr_t length,
                                void* (*event_callback)(void*, const char*, void*),
                                void* event_stream,
                                int (*printf_callback)(void*, const char*, ...),
                                void* printf_stream,
+                               const char* options,
+                               int newline);
+  // this is the type of the dll entry point for old version:
+  typedef void* (*decode_func)(void* start_va, void* end_va,
+                               void* (*event_callback)(void*, const char*, void*),
+                               void* event_stream,
+                               int (*printf_callback)(void*, const char*, ...),
+                               void* printf_stream,
                                const char* options);
   // points to the library.
   static void*    _library;
   // bailout
   static bool     _tried_to_load_library;
   // points to the decode function.
+  static decode_func_virtual _decode_instructions_virtual;
   static decode_func _decode_instructions;
   // tries to load library and return whether it succedded.
   static bool load_library();
@@ -85,7 +94,9 @@
 
  public:
   static bool can_decode() {
-    return (_decode_instructions != NULL) || load_library();
+    return (_decode_instructions_virtual != NULL) ||
+           (_decode_instructions != NULL) ||
+           load_library();
   }
   static void decode(CodeBlob *cb,               outputStream* st = NULL);
   static void decode(nmethod* nm,                outputStream* st = NULL);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
+#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
+#include "memory/freeBlockDictionary.hpp"
+#include "memory/sharedHeap.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/vmThread.hpp"
+
+template <>
+void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
+  if (c != NULL) {
+    st->print("%16s", c);
+  } else {
+    st->print(SIZE_FORMAT_W(16), size());
+  }
+  st->print("\t"
+           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
+           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
+           bfr_surp(),             surplus(),             desired(),             prev_sweep(),           before_sweep(),
+           count(),               coal_births(),          coal_deaths(),          split_births(),         split_deaths());
+}
+
+template <class Chunk>
+AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
+  init_statistics();
+}
+
+template <class Chunk>
+AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
+  init_statistics();
+#ifndef PRODUCT
+  _allocation_stats.set_returned_bytes(size() * HeapWordSize);
+#endif
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::initialize() {
+  FreeList<Chunk>::initialize();
+  set_hint(0);
+  init_statistics(true /* split_birth */);
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::reset(size_t hint) {
+  FreeList<Chunk>::reset();
+  set_hint(hint);
+}
+
+#ifndef PRODUCT
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
+  assert(protecting_lock() != NULL, "Don't call this directly");
+  assert(ParallelGCThreads > 0, "Don't call this directly");
+  Thread* thr = Thread::current();
+  if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
+    // assert that we are holding the freelist lock
+  } else if (thr->is_GC_task_thread()) {
+    assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
+  } else if (thr->is_Java_thread()) {
+    assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
+  } else {
+    ShouldNotReachHere();  // unaccounted thread type?
+  }
+}
+#endif
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
+  _allocation_stats.initialize(split_birth);
+}
+
+template <class Chunk>
+size_t AdaptiveFreeList<Chunk>::get_better_size() {
+
+  // A candidate chunk has been found.  If it is already under
+  // populated and there is a hinT, REturn the hint().  Else
+  // return the size of this chunk.
+  if (surplus() <= 0) {
+    if (hint() != 0) {
+      return hint();
+    } else {
+      return size();
+    }
+  } else {
+    // This list has a surplus so use it.
+    return size();
+  }
+}
+
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
+  assert_proper_lock_protection();
+  return_chunk_at_head(chunk, true);
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
+  FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
+#ifdef ASSERT
+  if (record_return) {
+    increment_returned_bytes_by(size()*HeapWordSize);
+  }
+#endif
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
+  return_chunk_at_tail(chunk, true);
+}
+
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
+  FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
+#ifdef ASSERT
+  if (record_return) {
+    increment_returned_bytes_by(size()*HeapWordSize);
+  }
+#endif
+}
+
+#ifndef PRODUCT
+template <class Chunk>
+void AdaptiveFreeList<Chunk>::verify_stats() const {
+  // The +1 of the LH comparand is to allow some "looseness" in
+  // checking: we usually call this interface when adding a block
+  // and we'll subsequently update the stats; we cannot update the
+  // stats beforehand because in the case of the large-block BT
+  // dictionary for example, this might be the first block and
+  // in that case there would be no place that we could record
+  // the stats (which are kept in the block itself).
+  assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+          + _allocation_stats.coal_births() + 1)   // Total Production Stock + 1
+         >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+             + (ssize_t)count()),                // Total Current Stock + depletion
+         err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
+                 " violates Conservation Principle: "
+                 "prev_sweep(" SIZE_FORMAT ")"
+                 " + split_births(" SIZE_FORMAT ")"
+                 " + coal_births(" SIZE_FORMAT ") + 1 >= "
+                 " split_deaths(" SIZE_FORMAT ")"
+                 " coal_deaths(" SIZE_FORMAT ")"
+                 " + count(" SSIZE_FORMAT ")",
+                 this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
+                 _allocation_stats.split_births(), _allocation_stats.split_deaths(),
+                 _allocation_stats.coal_deaths(), count()));
+}
+#endif
+
+// Needs to be after the definitions have been seen.
+template class AdaptiveFreeList<FreeChunk>;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Mon Nov 05 13:55:31 2012 -0800
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
+#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
+
+#include "memory/freeList.hpp"
+#include "gc_implementation/shared/allocationStats.hpp"
+
+class CompactibleFreeListSpace;
+
+// A class for maintaining a free list of Chunk's.  The FreeList
+// maintains a the structure of the list (head, tail, etc.) plus
+// statistics for allocations from the list.  The links between items
+// are not part of FreeList.  The statistics are
+// used to make decisions about coalescing Chunk's when they
+// are swept during collection.
+//
+// See the corresponding .cpp file for a description of the specifics
+// for that implementation.
+
+class Mutex;
+
+template <class Chunk>
+class AdaptiveFreeList : public FreeList<Chunk> {
+  friend class CompactibleFreeListSpace;
+  friend class VMStructs;
+  // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
+
+  size_t        _hint;          // next larger size list with a positive surplus
+
+  AllocationStats _allocation_stats; // allocation-related statistics
+
+ public:
+
+  AdaptiveFreeList();
+  AdaptiveFreeList(Chunk* fc);
+
+  using FreeList<Chunk>::assert_proper_lock_protection;
+#ifdef ASSERT
+  using FreeList<Chunk>::protecting_lock;
+#endif
+  using FreeList<Chunk>::count;
+  using FreeList<Chunk>::size;
+  using FreeList<Chunk>::verify_chunk_in_free_list;
+  using FreeList<Chunk>::getFirstNChunksFromList;
+  using FreeList<Chunk>::print_on;
+  void return_chunk_at_head(Chunk* fc, bool record_return);
+  void return_chunk_at_head(Chunk* fc);
+  void return_chunk_at_tail(Chunk* fc, bool record_return);
+  void return_chunk_at_tail(Chunk* fc);
+  using FreeList<Chunk>::return_chunk_at_tail;
+  using FreeList<Chunk>::remove_chunk;
+  using FreeList<Chunk>::prepend;
+  using FreeList<Chunk>::print_labels_on;
+  using FreeList<Chunk>::get_chunk_at_head;
+
+  // Initialize.
+  void initialize();
+
+  // Reset the head, tail, hint, and count of a free list.
+  void reset(size_t hint);
+
+  void assert_proper_lock_protection_work() const PRODUCT_RETURN;
+
+  void print_on(outputStream* st, const char* c = NULL) const;
+
+  size_t hint() const {
+    return _hint;
+  }
+  void set_hint(size_t v) {
+    assert_proper_lock_protection();
+    assert(v == 0 || size() < v, "Bad hint");
+    _hint = v;
+  }
+
+  size_t get_better_size();
+
+  // Accessors for statistics
+  void init_statistics(bool split_birth = false);
+
+  AllocationStats* allocation_stats() {
+    assert_proper_lock_protection();
+    return &_allocation_stats;
+  }
+
+  ssize_t desired() const {
+    return _allocation_stats.desired();
+  }
+  void set_desired(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_desired(v);
+  }
+  void compute_desired(float inter_sweep_current,
+                       float inter_sweep_estimate,
+                       float intra_sweep_estimate) {
+    assert_proper_lock_protection();
+    _allocation_stats.compute_desired(count(),
+                                      inter_sweep_current,
+                                      inter_sweep_estimate,
+                                      intra_sweep_estimate);
+  }
+  ssize_t coal_desired() const {
+    return _allocation_stats.coal_desired();
+  }
+  void set_coal_desired(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coal_desired(v);
+  }
+
+  ssize_t surplus() const {
+    return _allocation_stats.surplus();
+  }
+  void set_surplus(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_surplus(v);
+  }
+  void increment_surplus() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_surplus();
+  }
+  void decrement_surplus() {
+    assert_proper_lock_protection();
+    _allocation_stats.decrement_surplus();
+  }
+
+  ssize_t bfr_surp() const {
+    return _allocation_stats.bfr_surp();
+  }
+  void set_bfr_surp(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_bfr_surp(v);
+  }
+  ssize_t prev_sweep() const {
+    return _allocation_stats.prev_sweep();
+  }
+  void set_prev_sweep(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_prev_sweep(v);
+  }
+  ssize_t before_sweep() const {
+    return _allocation_stats.before_sweep();
+  }
+  void set_before_sweep(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_before_sweep(v);
+  }
+