changeset 37026:4b57da77f080

Merge
author bharadwaj
date Tue, 05 Apr 2016 15:39:34 -0400
parents e0533c567b27 6072af7a98be
children 997acd1b593e 0e25f6f1d8f0
files hotspot/test/compiler/compilercontrol/jcmd/StressAddSequentiallyTest.java jdk/make/src/classes/build/tools/module/Module.java jdk/make/src/classes/build/tools/module/ModuleInfoReader.java jdk/make/src/classes/build/tools/module/ModulesXmlReader.java jdk/make/src/classes/build/tools/module/ModulesXmlWriter.java jdk/make/src/native/genconstants/ch/genSocketOptionRegistry.c jdk/make/src/native/genconstants/fs/genSolarisConstants.c jdk/make/src/native/genconstants/fs/genUnixConstants.c jdk/src/java.base/share/classes/sun/misc/InvalidJarIndexException.java jdk/src/java.base/share/classes/sun/misc/JarIndex.java jdk/src/java.base/share/classes/sun/misc/Resource.java jdk/src/java.base/share/classes/sun/misc/URLClassPath.java jdk/src/java.base/share/classes/sun/misc/resources/Messages.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_de.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_es.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_fr.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_it.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_ja.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_ko.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_pt_BR.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_sv.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_zh_CN.java jdk/src/java.base/share/classes/sun/misc/resources/Messages_zh_TW.java jdk/src/java.base/unix/classes/sun/misc/FileURLMapper.java jdk/src/java.base/windows/classes/sun/misc/FileURLMapper.java jdk/src/jdk.deploy.osx/macosx/classes/module-info.java langtools/test/tools/lib/ToolBox.java nashorn/src/jdk.dynalink/share/classes/jdk/dynalink/beans/messages.properties
diffstat 1159 files changed, 109867 insertions(+), 14245 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Apr 04 02:10:46 2016 -0700
+++ b/.hgtags	Tue Apr 05 15:39:34 2016 -0400
@@ -353,3 +353,5 @@
 257b579d813201682931d6b42f0445ffe5b4210d jdk-9+108
 c870cb782aca71093d2584376f27f0cfbfec0e3a jdk-9+109
 4a95f4b1bd8bfce85dc02a593896749feab96c34 jdk-9+110
+a6614ff7bf09da74be1d0ef3d9755090d244697a jdk-9+111
+7359994942f8d8e723b584d66a3a92c2e9e95e5c jdk-9+112
--- a/.hgtags-top-repo	Mon Apr 04 02:10:46 2016 -0700
+++ b/.hgtags-top-repo	Tue Apr 05 15:39:34 2016 -0400
@@ -354,3 +354,4 @@
 1787bdaabb2b6f4193406e25a50cb0419ea8e8f3 jdk-9+109
 925be13b3740d07a5958ccb5ab3c0ae1baba7055 jdk-9+110
 f900d5afd9c83a0df8f36161c27c5e4c86a66f4c jdk-9+111
+03543a758cd5890f2266e4b9678378a925dde22a jdk-9+112
--- a/common/autoconf/generated-configure.sh	Mon Apr 04 02:10:46 2016 -0700
+++ b/common/autoconf/generated-configure.sh	Tue Apr 05 15:39:34 2016 -0400
@@ -4950,7 +4950,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1458558778
+DATE_WHEN_GENERATED=1458755892
 
 ###############################################################################
 #
@@ -61707,10 +61707,10 @@
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking for which zlib to use" >&5
 $as_echo_n "checking for which zlib to use... " >&6; }
 
-  DEFAULT_ZLIB=bundled
-  if test "x$OPENJDK_TARGET_OS" = xmacosx; then
-    # On macosx default is system...on others default is bundled
-    DEFAULT_ZLIB=system
+  DEFAULT_ZLIB=system
+  if test "x$OPENJDK_TARGET_OS" = xwindows; then
+    # On windows default is bundled...on others default is system
+    DEFAULT_ZLIB=bundled
   fi
 
   if test "x${ZLIB_FOUND}" != "xyes"; then
--- a/common/autoconf/lib-bundled.m4	Mon Apr 04 02:10:46 2016 -0700
+++ b/common/autoconf/lib-bundled.m4	Tue Apr 05 15:39:34 2016 -0400
@@ -157,10 +157,10 @@
 
   AC_MSG_CHECKING([for which zlib to use])
 
-  DEFAULT_ZLIB=bundled
-  if test "x$OPENJDK_TARGET_OS" = xmacosx; then
-    # On macosx default is system...on others default is bundled
-    DEFAULT_ZLIB=system
+  DEFAULT_ZLIB=system
+  if test "x$OPENJDK_TARGET_OS" = xwindows; then
+    # On windows default is bundled...on others default is system
+    DEFAULT_ZLIB=bundled
   fi
 
   if test "x${ZLIB_FOUND}" != "xyes"; then
--- a/common/conf/jib-profiles.js	Mon Apr 04 02:10:46 2016 -0700
+++ b/common/conf/jib-profiles.js	Tue Apr 05 15:39:34 2016 -0400
@@ -241,7 +241,7 @@
             target_os: "linux",
             target_cpu: "x64",
             dependencies: concat(common.dependencies, "devkit"),
-            configure_args: common.configure_args,
+            configure_args: concat(common.configure_args, "--with-zlib=system"),
             make_args: common.make_args
         },
 
@@ -250,7 +250,8 @@
             target_cpu: "x86",
             build_cpu: "x64",
             dependencies: concat(common.dependencies, "devkit"),
-            configure_args: concat(common.configure_args, common.configure_args_32bit),
+            configure_args: concat(common.configure_args, common.configure_args_32bit,
+                "--with-zlib=system"),
             make_args: common.make_args
         },
 
@@ -258,7 +259,7 @@
             target_os: "macosx",
             target_cpu: "x64",
             dependencies: concat(common.dependencies, "devkit"),
-            configure_args: common.configure_args,
+            configure_args: concat(common.configure_args, "--with-zlib=system"),
             make_args: common.make_args
         },
 
@@ -266,7 +267,7 @@
             target_os: "solaris",
             target_cpu: "x64",
             dependencies: concat(common.dependencies, "devkit", "cups"),
-            configure_args: common.configure_args,
+            configure_args: concat(common.configure_args, "--with-zlib=system"),
             make_args: common.make_args
         },
 
@@ -274,7 +275,7 @@
             target_os: "solaris",
             target_cpu: "sparcv9",
             dependencies: concat(common.dependencies, "devkit", "cups"),
-            configure_args: common.configure_args,
+            configure_args: concat(common.configure_args, "--with-zlib=system"),
             make_args: common.make_args
         },
 
--- a/corba/.hgtags	Mon Apr 04 02:10:46 2016 -0700
+++ b/corba/.hgtags	Tue Apr 05 15:39:34 2016 -0400
@@ -353,3 +353,5 @@
 84f2862a25eb3232ff36c376b4e2bf2a83dfced3 jdk-9+108
 b75afa17aefe480c23c616a6a2497063312f7189 jdk-9+109
 9666775734fb6028ee86df9972626b3667b6a318 jdk-9+110
+2bb92dd44275679edb29fdbffc3b7cbebc9a6bf0 jdk-9+111
+780d0620add32bf545471cf65038c9ac6d9c036d jdk-9+112
--- a/hotspot/.hgtags	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/.hgtags	Tue Apr 05 15:39:34 2016 -0400
@@ -513,3 +513,5 @@
 934f6793f5f7dca44f69b4559d525fa64b31840d jdk-9+108
 7e7e50ac4faf19899fc811569e32cfa478759ebb jdk-9+109
 2f5d1578b24060ea06bd1f340a124db95d1475b2 jdk-9+110
+c558850fac5750d8ca98a45180121980f57cdd28 jdk-9+111
+76582e8dc9e6374e4f99ab797c8d364b6e9449b4 jdk-9+112
--- a/hotspot/make/lib/Lib-jdk.hotspot.agent.gmk	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/make/lib/Lib-jdk.hotspot.agent.gmk	Tue Apr 05 15:39:34 2016 -0400
@@ -91,7 +91,7 @@
   ifeq ($(OPENJDK_TARGET_CPU), x86_64)
     SA_CXXFLAGS += -DWIN64
   else
-    SA_CXXFLAGS += -RTC1 -ZI
+    SA_CXXFLAGS += -RTC1
     SA_LDFLAGS += -SAFESEH
   endif
 endif
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Tue Apr 05 15:39:34 2016 -0400
@@ -3425,9 +3425,6 @@
 // false => size gets scaled to BytesPerLong, ok.
 const bool Matcher::init_array_count_is_in_bytes = false;
 
-// Threshold size for cleararray.
-const int Matcher::init_array_short_size = 18 * BytesPerLong;
-
 // Use conditional move (CMOVL)
 const int Matcher::long_cmove_cost() {
   // long cmoves are no more expensive than int cmoves
@@ -4135,14 +4132,14 @@
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
+               Assembler::xword, /*acquire*/ false, /*release*/ true);
   %}
 
   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
+               Assembler::word, /*acquire*/ false, /*release*/ true);
   %}
 
 
@@ -4154,14 +4151,14 @@
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
+               Assembler::xword, /*acquire*/ true, /*release*/ true);
   %}
 
   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
+               Assembler::word, /*acquire*/ true, /*release*/ true);
   %}
 
 
@@ -4679,8 +4676,14 @@
 
     // Compare object markOop with mark and if equal exchange scratch1
     // with object markOop.
-    {
+    if (UseLSE) {
+      __ mov(tmp, disp_hdr);
+      __ casal(Assembler::xword, tmp, box, oop);
+      __ cmp(tmp, disp_hdr);
+      __ br(Assembler::EQ, cont);
+    } else {
       Label retry_load;
+      __ prfm(Address(oop), PSTL1STRM);
       __ bind(retry_load);
       __ ldaxr(tmp, oop);
       __ cmp(tmp, disp_hdr);
@@ -4729,8 +4732,13 @@
       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
       __ mov(disp_hdr, zr);
 
-      {
+      if (UseLSE) {
+        __ mov(rscratch1, disp_hdr);
+        __ casal(Assembler::xword, rscratch1, rthread, tmp);
+        __ cmp(rscratch1, disp_hdr);
+      } else {
         Label retry_load, fail;
+        __ prfm(Address(tmp), PSTL1STRM);
         __ bind(retry_load);
         __ ldaxr(rscratch1, tmp);
         __ cmp(disp_hdr, rscratch1);
@@ -4818,8 +4826,13 @@
     // see the stack address of the basicLock in the markOop of the
     // object.
 
-      {
+      if (UseLSE) {
+        __ mov(tmp, box);
+        __ casl(Assembler::xword, tmp, disp_hdr, oop);
+        __ cmp(tmp, box);
+      } else {
         Label retry_load;
+        __ prfm(Address(oop), PSTL1STRM);
         __ bind(retry_load);
         __ ldxr(tmp, oop);
         __ cmp(box, tmp);
--- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -972,7 +972,7 @@
 
   // System
   void system(int op0, int op1, int CRn, int CRm, int op2,
-              Register rt = (Register)0b11111)
+              Register rt = dummy_reg)
   {
     starti;
     f(0b11010101000, 31, 21);
@@ -1082,7 +1082,7 @@
 
 #define INSN(NAME, opc)                         \
   void NAME() {                 \
-    branch_reg((Register)0b11111, opc);         \
+    branch_reg(dummy_reg, opc);         \
   }
 
   INSN(eret, 0b0100);
@@ -1094,10 +1094,22 @@
   enum operand_size { byte, halfword, word, xword };
 
   void load_store_exclusive(Register Rs, Register Rt1, Register Rt2,
-    Register Rn, enum operand_size sz, int op, int o0) {
+    Register Rn, enum operand_size sz, int op, bool ordered) {
     starti;
     f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21);
-    rf(Rs, 16), f(o0, 15), rf(Rt2, 10), rf(Rn, 5), rf(Rt1, 0);
+    rf(Rs, 16), f(ordered, 15), rf(Rt2, 10), rf(Rn, 5), rf(Rt1, 0);
+  }
+
+  void load_exclusive(Register dst, Register addr,
+                      enum operand_size sz, bool ordered) {
+    load_store_exclusive(dummy_reg, dst, dummy_reg, addr,
+                         sz, 0b010, ordered);
+  }
+
+  void store_exclusive(Register status, Register new_val, Register addr,
+                       enum operand_size sz, bool ordered) {
+    load_store_exclusive(status, new_val, dummy_reg, addr,
+                         sz, 0b000, ordered);
   }
 
 #define INSN4(NAME, sz, op, o0) /* Four registers */                    \
@@ -1109,19 +1121,19 @@
 #define INSN3(NAME, sz, op, o0) /* Three registers */                   \
   void NAME(Register Rs, Register Rt, Register Rn) {                    \
     guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction");       \
-    load_store_exclusive(Rs, Rt, (Register)0b11111, Rn, sz, op, o0);    \
+    load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \
   }
 
 #define INSN2(NAME, sz, op, o0) /* Two registers */                     \
   void NAME(Register Rt, Register Rn) {                                 \
-    load_store_exclusive((Register)0b11111, Rt, (Register)0b11111,      \
+    load_store_exclusive(dummy_reg, Rt, dummy_reg, \
                          Rn, sz, op, o0);                               \
   }
 
 #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \
   void NAME(Register Rt1, Register Rt2, Register Rn) {                  \
     guarantee(Rt1 != Rt2, "unpredictable instruction");                 \
-    load_store_exclusive((Register)0b11111, Rt1, Rt2, Rn, sz, op, o0);  \
+    load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0);          \
   }
 
   // bytes
@@ -1169,6 +1181,46 @@
 #undef INSN4
 #undef INSN_FOO
 
+  // 8.1 Compare and swap extensions
+  void lse_cas(Register Rs, Register Rt, Register Rn,
+                        enum operand_size sz, bool a, bool r, bool not_pair) {
+    starti;
+    if (! not_pair) { // Pair
+      assert(sz == word || sz == xword, "invalid size");
+      /* The size bit is in bit 30, not 31 */
+      sz = (operand_size)(sz == word ? 0b00:0b01);
+    }
+    f(sz, 31, 30), f(0b001000, 29, 24), f(1, 23), f(a, 22), f(1, 21);
+    rf(Rs, 16), f(r, 15), f(0b11111, 14, 10), rf(Rn, 5), rf(Rt, 0);
+  }
+
+  // CAS
+#define INSN(NAME, a, r)                                                \
+  void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) {   \
+    assert(Rs != Rn && Rs != Rt, "unpredictable instruction");          \
+    lse_cas(Rs, Rt, Rn, sz, a, r, true);                                \
+  }
+  INSN(cas,    false, false)
+  INSN(casa,   true,  false)
+  INSN(casl,   false, true)
+  INSN(casal,  true,  true)
+#undef INSN
+
+  // CASP
+#define INSN(NAME, a, r)                                                \
+  void NAME(operand_size sz, Register Rs, Register Rs1,                 \
+            Register Rt, Register Rt1, Register Rn) {                   \
+    assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 &&    \
+           Rs->successor() == Rs1 && Rt->successor() == Rt1 &&          \
+           Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers");     \
+    lse_cas(Rs, Rt, Rn, sz, a, r, false);                               \
+  }
+  INSN(casp,    false, false)
+  INSN(caspa,   true,  false)
+  INSN(caspl,   false, true)
+  INSN(caspal,  true,  true)
+#undef INSN
+
   // Load register (literal)
 #define INSN(NAME, opc, V)                                              \
   void NAME(Register Rt, address dest) {                                \
--- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1556,38 +1556,54 @@
 }
 
 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
-  Label retry_load, nope;
-  // flush and load exclusive from the memory location
-  // and fail if it is not what we expect
-  __ bind(retry_load);
-  __ ldaxrw(rscratch1, addr);
-  __ cmpw(rscratch1, cmpval);
-  __ cset(rscratch1, Assembler::NE);
-  __ br(Assembler::NE, nope);
-  // if we store+flush with no intervening write rscratch1 wil be zero
-  __ stlxrw(rscratch1, newval, addr);
-  // retry so we only ever return after a load fails to compare
-  // ensures we don't return a stale value after a failed write.
-  __ cbnzw(rscratch1, retry_load);
-  __ bind(nope);
+  if (UseLSE) {
+    __ mov(rscratch1, cmpval);
+    __ casal(Assembler::word, rscratch1, newval, addr);
+    __ cmpw(rscratch1, cmpval);
+    __ cset(rscratch1, Assembler::NE);
+  } else {
+    Label retry_load, nope;
+    // flush and load exclusive from the memory location
+    // and fail if it is not what we expect
+    __ prfm(Address(addr), PSTL1STRM);
+    __ bind(retry_load);
+    __ ldaxrw(rscratch1, addr);
+    __ cmpw(rscratch1, cmpval);
+    __ cset(rscratch1, Assembler::NE);
+    __ br(Assembler::NE, nope);
+    // if we store+flush with no intervening write rscratch1 wil be zero
+    __ stlxrw(rscratch1, newval, addr);
+    // retry so we only ever return after a load fails to compare
+    // ensures we don't return a stale value after a failed write.
+    __ cbnzw(rscratch1, retry_load);
+    __ bind(nope);
+  }
   __ membar(__ AnyAny);
 }
 
 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
-  Label retry_load, nope;
-  // flush and load exclusive from the memory location
-  // and fail if it is not what we expect
-  __ bind(retry_load);
-  __ ldaxr(rscratch1, addr);
-  __ cmp(rscratch1, cmpval);
-  __ cset(rscratch1, Assembler::NE);
-  __ br(Assembler::NE, nope);
-  // if we store+flush with no intervening write rscratch1 wil be zero
-  __ stlxr(rscratch1, newval, addr);
-  // retry so we only ever return after a load fails to compare
-  // ensures we don't return a stale value after a failed write.
-  __ cbnz(rscratch1, retry_load);
-  __ bind(nope);
+  if (UseLSE) {
+    __ mov(rscratch1, cmpval);
+    __ casal(Assembler::xword, rscratch1, newval, addr);
+    __ cmp(rscratch1, cmpval);
+    __ cset(rscratch1, Assembler::NE);
+  } else {
+    Label retry_load, nope;
+    // flush and load exclusive from the memory location
+    // and fail if it is not what we expect
+    __ prfm(Address(addr), PSTL1STRM);
+    __ bind(retry_load);
+    __ ldaxr(rscratch1, addr);
+    __ cmp(rscratch1, cmpval);
+    __ cset(rscratch1, Assembler::NE);
+    __ br(Assembler::NE, nope);
+    // if we store+flush with no intervening write rscratch1 wil be zero
+    __ stlxr(rscratch1, newval, addr);
+    // retry so we only ever return after a load fails to compare
+    // ensures we don't return a stale value after a failed write.
+    __ cbnz(rscratch1, retry_load);
+    __ bind(nope);
+  }
   __ membar(__ AnyAny);
 }
 
@@ -3156,6 +3172,7 @@
       }
       Label again;
       __ lea(tmp, addr);
+      __ prfm(Address(tmp), PSTL1STRM);
       __ bind(again);
       (_masm->*lda)(dst, tmp);
       (_masm->*add)(rscratch1, dst, inc);
@@ -3175,6 +3192,7 @@
       assert_different_registers(obj, addr.base(), tmp, rscratch2, dst);
       Label again;
       __ lea(tmp, addr);
+      __ prfm(Address(tmp), PSTL1STRM);
       __ bind(again);
       (_masm->*lda)(dst, tmp);
       (_masm->*stl)(rscratch2, obj, tmp);
--- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -76,6 +76,8 @@
 // avoid biased locking while we are bootstrapping the aarch64 build
 define_pd_global(bool, UseBiasedLocking, false);
 
+define_pd_global(intx, InitArrayShortSize, 18*BytesPerLong);
+
 #if defined(COMPILER1) || defined(COMPILER2)
 define_pd_global(intx, InlineSmallCode,          1000);
 #endif
@@ -101,9 +103,13 @@
                                                                         \
   product(bool, UseCRC32, false,                                        \
           "Use CRC32 instructions for CRC32 computation")               \
+                                                                        \
+  product(bool, UseLSE, false,                                          \
+          "Use LSE instructions")                                       \
 
 // Don't attempt to use Neon on builtin sim until builtin sim supports it
 #define UseCRC32 false
+#define UseSIMDForMemoryOps    false
 
 #else
 #define UseBuiltinSim           false
@@ -121,6 +127,10 @@
           "Use Neon for CRC32 computation")                             \
   product(bool, UseCRC32, false,                                        \
           "Use CRC32 instructions for CRC32 computation")               \
+  product(bool, UseSIMDForMemoryOps, false,                             \
+          "Use SIMD instructions in generated memory move code")        \
+  product(bool, UseLSE, false,                                          \
+          "Use LSE instructions")                                       \
   product(bool, TraceTraps, false, "Trace all traps the signal handler")
 
 #endif
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1638,6 +1638,7 @@
 
 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) {
   Label retry_load;
+  prfm(Address(counter_addr), PSTL1STRM);
   bind(retry_load);
   // flush and load exclusive from the memory location
   ldxrw(tmp, counter_addr);
@@ -2070,25 +2071,32 @@
   // oldv holds comparison value
   // newv holds value to write in exchange
   // addr identifies memory word to compare against/update
-  // tmp returns 0/1 for success/failure
-  Label retry_load, nope;
-
-  bind(retry_load);
-  // flush and load exclusive from the memory location
-  // and fail if it is not what we expect
-  ldaxr(tmp, addr);
-  cmp(tmp, oldv);
-  br(Assembler::NE, nope);
-  // if we store+flush with no intervening write tmp wil be zero
-  stlxr(tmp, newv, addr);
-  cbzw(tmp, succeed);
-  // retry so we only ever return after a load fails to compare
-  // ensures we don't return a stale value after a failed write.
-  b(retry_load);
-  // if the memory word differs we return it in oldv and signal a fail
-  bind(nope);
-  membar(AnyAny);
-  mov(oldv, tmp);
+  if (UseLSE) {
+    mov(tmp, oldv);
+    casal(Assembler::xword, oldv, newv, addr);
+    cmp(tmp, oldv);
+    br(Assembler::EQ, succeed);
+    membar(AnyAny);
+  } else {
+    Label retry_load, nope;
+    prfm(Address(addr), PSTL1STRM);
+    bind(retry_load);
+    // flush and load exclusive from the memory location
+    // and fail if it is not what we expect
+    ldaxr(tmp, addr);
+    cmp(tmp, oldv);
+    br(Assembler::NE, nope);
+    // if we store+flush with no intervening write tmp wil be zero
+    stlxr(tmp, newv, addr);
+    cbzw(tmp, succeed);
+    // retry so we only ever return after a load fails to compare
+    // ensures we don't return a stale value after a failed write.
+    b(retry_load);
+    // if the memory word differs we return it in oldv and signal a fail
+    bind(nope);
+    membar(AnyAny);
+    mov(oldv, tmp);
+  }
   if (fail)
     b(*fail);
 }
@@ -2099,28 +2107,64 @@
   // newv holds value to write in exchange
   // addr identifies memory word to compare against/update
   // tmp returns 0/1 for success/failure
-  Label retry_load, nope;
-
-  bind(retry_load);
-  // flush and load exclusive from the memory location
-  // and fail if it is not what we expect
-  ldaxrw(tmp, addr);
-  cmp(tmp, oldv);
-  br(Assembler::NE, nope);
-  // if we store+flush with no intervening write tmp wil be zero
-  stlxrw(tmp, newv, addr);
-  cbzw(tmp, succeed);
-  // retry so we only ever return after a load fails to compare
-  // ensures we don't return a stale value after a failed write.
-  b(retry_load);
-  // if the memory word differs we return it in oldv and signal a fail
-  bind(nope);
-  membar(AnyAny);
-  mov(oldv, tmp);
+  if (UseLSE) {
+    mov(tmp, oldv);
+    casal(Assembler::word, oldv, newv, addr);
+    cmp(tmp, oldv);
+    br(Assembler::EQ, succeed);
+    membar(AnyAny);
+  } else {
+    Label retry_load, nope;
+    prfm(Address(addr), PSTL1STRM);
+    bind(retry_load);
+    // flush and load exclusive from the memory location
+    // and fail if it is not what we expect
+    ldaxrw(tmp, addr);
+    cmp(tmp, oldv);
+    br(Assembler::NE, nope);
+    // if we store+flush with no intervening write tmp wil be zero
+    stlxrw(tmp, newv, addr);
+    cbzw(tmp, succeed);
+    // retry so we only ever return after a load fails to compare
+    // ensures we don't return a stale value after a failed write.
+    b(retry_load);
+    // if the memory word differs we return it in oldv and signal a fail
+    bind(nope);
+    membar(AnyAny);
+    mov(oldv, tmp);
+  }
   if (fail)
     b(*fail);
 }
 
+// A generic CAS; success or failure is in the EQ flag.
+void MacroAssembler::cmpxchg(Register addr, Register expected,
+                             Register new_val,
+                             enum operand_size size,
+                             bool acquire, bool release,
+                             Register tmp) {
+  if (UseLSE) {
+    mov(tmp, expected);
+    lse_cas(tmp, new_val, addr, size, acquire, release, /*not_pair*/ true);
+    cmp(tmp, expected);
+  } else {
+    BLOCK_COMMENT("cmpxchg {");
+    Label retry_load, done;
+    prfm(Address(addr), PSTL1STRM);
+    bind(retry_load);
+    load_exclusive(tmp, addr, size, acquire);
+    if (size == xword)
+      cmp(tmp, expected);
+    else
+      cmpw(tmp, expected);
+    br(Assembler::NE, done);
+    store_exclusive(tmp, new_val, addr, size, release);
+    cbnzw(tmp, retry_load);
+    bind(done);
+    BLOCK_COMMENT("} cmpxchg");
+  }
+}
+
 static bool different(Register a, RegisterOrConstant b, Register c) {
   if (b.is_constant())
     return a != c;
@@ -2135,6 +2179,7 @@
     result = different(prev, incr, addr) ? prev : rscratch2;            \
                                                                         \
   Label retry_load;                                                     \
+  prfm(Address(addr), PSTL1STRM);                                       \
   bind(retry_load);                                                     \
   LDXR(result, addr);                                                   \
   OP(rscratch1, result, incr);                                          \
@@ -2157,6 +2202,7 @@
     result = different(prev, newv, addr) ? prev : rscratch2;            \
                                                                         \
   Label retry_load;                                                     \
+  prfm(Address(addr), PSTL1STRM);                                       \
   bind(retry_load);                                                     \
   LDXR(result, addr);                                                   \
   STXR(rscratch1, newv, addr);                                          \
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -971,21 +971,10 @@
   }
 
   // A generic CAS; success or failure is in the EQ flag.
-  template <typename T1, typename T2>
   void cmpxchg(Register addr, Register expected, Register new_val,
-               T1 load_insn,
-               void (MacroAssembler::*cmp_insn)(Register, Register),
-               T2 store_insn,
-               Register tmp = rscratch1) {
-    Label retry_load, done;
-    bind(retry_load);
-    (this->*load_insn)(tmp, addr);
-    (this->*cmp_insn)(tmp, expected);
-    br(Assembler::NE, done);
-    (this->*store_insn)(tmp, new_val, addr);
-    cbnzw(tmp, retry_load);
-    bind(done);
-  }
+               enum operand_size size,
+               bool acquire, bool release,
+               Register tmp = rscratch1);
 
   // Calls
 
--- a/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -107,6 +107,9 @@
 CONSTANT_REGISTER_DECLARATION(Register, zr,  (32));
 CONSTANT_REGISTER_DECLARATION(Register, sp,  (33));
 
+// Used as a filler in instructions where a register field is unused.
+const Register dummy_reg = r31_sp;
+
 // Use FloatRegister as shortcut
 class FloatRegisterImpl;
 typedef FloatRegisterImpl* FloatRegister;
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -729,7 +729,7 @@
   //
   // count is a count of words.
   //
-  // Precondition: count >= 2
+  // Precondition: count >= 8
   //
   // Postconditions:
   //
@@ -741,6 +741,7 @@
   void generate_copy_longs(Label &start, Register s, Register d, Register count,
                            copy_direction direction) {
     int unit = wordSize * direction;
+    int bias = (UseSIMDForMemoryOps ? 4:2) * wordSize;
 
     int offset;
     const Register t0 = r3, t1 = r4, t2 = r5, t3 = r6,
@@ -750,7 +751,7 @@
     assert_different_registers(rscratch1, t0, t1, t2, t3, t4, t5, t6, t7);
     assert_different_registers(s, d, count, rscratch1);
 
-    Label again, large, small;
+    Label again, drain;
     const char *stub_name;
     if (direction == copy_forwards)
       stub_name = "foward_copy_longs";
@@ -759,57 +760,35 @@
     StubCodeMark mark(this, "StubRoutines", stub_name);
     __ align(CodeEntryAlignment);
     __ bind(start);
-    __ cmp(count, 8);
-    __ br(Assembler::LO, small);
     if (direction == copy_forwards) {
-      __ sub(s, s, 2 * wordSize);
-      __ sub(d, d, 2 * wordSize);
+      __ sub(s, s, bias);
+      __ sub(d, d, bias);
     }
+
+#ifdef ASSERT
+    // Make sure we are never given < 8 words
+    {
+      Label L;
+      __ cmp(count, 8);
+      __ br(Assembler::GE, L);
+      __ stop("genrate_copy_longs called with < 8 words");
+      __ bind(L);
+    }
+#endif
+
+    // Fill 8 registers
+    if (UseSIMDForMemoryOps) {
+      __ ldpq(v0, v1, Address(s, 4 * unit));
+      __ ldpq(v2, v3, Address(__ pre(s, 8 * unit)));
+    } else {
+      __ ldp(t0, t1, Address(s, 2 * unit));
+      __ ldp(t2, t3, Address(s, 4 * unit));
+      __ ldp(t4, t5, Address(s, 6 * unit));
+      __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+    }
+
     __ subs(count, count, 16);
-    __ br(Assembler::GE, large);
-
-    // 8 <= count < 16 words.  Copy 8.
-    __ ldp(t0, t1, Address(s, 2 * unit));
-    __ ldp(t2, t3, Address(s, 4 * unit));
-    __ ldp(t4, t5, Address(s, 6 * unit));
-    __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
-
-    __ stp(t0, t1, Address(d, 2 * unit));
-    __ stp(t2, t3, Address(d, 4 * unit));
-    __ stp(t4, t5, Address(d, 6 * unit));
-    __ stp(t6, t7, Address(__ pre(d, 8 * unit)));
-
-    if (direction == copy_forwards) {
-      __ add(s, s, 2 * wordSize);
-      __ add(d, d, 2 * wordSize);
-    }
-
-    {
-      Label L1, L2;
-      __ bind(small);
-      __ tbz(count, exact_log2(4), L1);
-      __ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
-      __ ldp(t2, t3, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
-      __ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
-      __ stp(t2, t3, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
-      __ bind(L1);
-
-      __ tbz(count, 1, L2);
-      __ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
-      __ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
-      __ bind(L2);
-    }
-
-    __ ret(lr);
-
-    __ align(CodeEntryAlignment);
-    __ bind(large);
-
-    // Fill 8 registers
-    __ ldp(t0, t1, Address(s, 2 * unit));
-    __ ldp(t2, t3, Address(s, 4 * unit));
-    __ ldp(t4, t5, Address(s, 6 * unit));
-    __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+    __ br(Assembler::LO, drain);
 
     int prefetch = PrefetchCopyIntervalInBytes;
     bool use_stride = false;
@@ -824,38 +803,56 @@
     if (PrefetchCopyIntervalInBytes > 0)
       __ prfm(use_stride ? Address(s, stride) : Address(s, prefetch), PLDL1KEEP);
 
-    __ stp(t0, t1, Address(d, 2 * unit));
-    __ ldp(t0, t1, Address(s, 2 * unit));
-    __ stp(t2, t3, Address(d, 4 * unit));
-    __ ldp(t2, t3, Address(s, 4 * unit));
-    __ stp(t4, t5, Address(d, 6 * unit));
-    __ ldp(t4, t5, Address(s, 6 * unit));
-    __ stp(t6, t7, Address(__ pre(d, 8 * unit)));
-    __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+    if (UseSIMDForMemoryOps) {
+      __ stpq(v0, v1, Address(d, 4 * unit));
+      __ ldpq(v0, v1, Address(s, 4 * unit));
+      __ stpq(v2, v3, Address(__ pre(d, 8 * unit)));
+      __ ldpq(v2, v3, Address(__ pre(s, 8 * unit)));
+    } else {
+      __ stp(t0, t1, Address(d, 2 * unit));
+      __ ldp(t0, t1, Address(s, 2 * unit));
+      __ stp(t2, t3, Address(d, 4 * unit));
+      __ ldp(t2, t3, Address(s, 4 * unit));
+      __ stp(t4, t5, Address(d, 6 * unit));
+      __ ldp(t4, t5, Address(s, 6 * unit));
+      __ stp(t6, t7, Address(__ pre(d, 8 * unit)));
+      __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+    }
 
     __ subs(count, count, 8);
     __ br(Assembler::HS, again);
 
     // Drain
-    __ stp(t0, t1, Address(d, 2 * unit));
-    __ stp(t2, t3, Address(d, 4 * unit));
-    __ stp(t4, t5, Address(d, 6 * unit));
-    __ stp(t6, t7, Address(__ pre(d, 8 * unit)));
-
-    if (direction == copy_forwards) {
-      __ add(s, s, 2 * wordSize);
-      __ add(d, d, 2 * wordSize);
+    __ bind(drain);
+    if (UseSIMDForMemoryOps) {
+      __ stpq(v0, v1, Address(d, 4 * unit));
+      __ stpq(v2, v3, Address(__ pre(d, 8 * unit)));
+    } else {
+      __ stp(t0, t1, Address(d, 2 * unit));
+      __ stp(t2, t3, Address(d, 4 * unit));
+      __ stp(t4, t5, Address(d, 6 * unit));
+      __ stp(t6, t7, Address(__ pre(d, 8 * unit)));
     }
 
     {
       Label L1, L2;
       __ tbz(count, exact_log2(4), L1);
-      __ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
-      __ ldp(t2, t3, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
-      __ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
-      __ stp(t2, t3, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
+      if (UseSIMDForMemoryOps) {
+        __ ldpq(v0, v1, Address(__ pre(s, 4 * unit)));
+        __ stpq(v0, v1, Address(__ pre(d, 4 * unit)));
+      } else {
+        __ ldp(t0, t1, Address(s, 2 * unit));
+        __ ldp(t2, t3, Address(__ pre(s, 4 * unit)));
+        __ stp(t0, t1, Address(d, 2 * unit));
+        __ stp(t2, t3, Address(__ pre(d, 4 * unit)));
+      }
       __ bind(L1);
 
+      if (direction == copy_forwards) {
+        __ add(s, s, bias);
+        __ add(d, d, bias);
+      }
+
       __ tbz(count, 1, L2);
       __ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
       __ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
@@ -931,16 +928,135 @@
     int granularity = uabs(step);
     const Register t0 = r3, t1 = r4;
 
+    // <= 96 bytes do inline. Direction doesn't matter because we always
+    // load all the data before writing anything
+    Label copy4, copy8, copy16, copy32, copy80, copy128, copy_big, finish;
+    const Register t2 = r5, t3 = r6, t4 = r7, t5 = r8;
+    const Register t6 = r9, t7 = r10, t8 = r11, t9 = r12;
+    const Register send = r17, dend = r18;
+
+    if (PrefetchCopyIntervalInBytes > 0)
+      __ prfm(Address(s, 0), PLDL1KEEP);
+    __ cmp(count, (UseSIMDForMemoryOps ? 96:80)/granularity);
+    __ br(Assembler::HI, copy_big);
+
+    __ lea(send, Address(s, count, Address::lsl(exact_log2(granularity))));
+    __ lea(dend, Address(d, count, Address::lsl(exact_log2(granularity))));
+
+    __ cmp(count, 16/granularity);
+    __ br(Assembler::LS, copy16);
+
+    __ cmp(count, 64/granularity);
+    __ br(Assembler::HI, copy80);
+
+    __ cmp(count, 32/granularity);
+    __ br(Assembler::LS, copy32);
+
+    // 33..64 bytes
+    if (UseSIMDForMemoryOps) {
+      __ ldpq(v0, v1, Address(s, 0));
+      __ ldpq(v2, v3, Address(send, -32));
+      __ stpq(v0, v1, Address(d, 0));
+      __ stpq(v2, v3, Address(dend, -32));
+    } else {
+      __ ldp(t0, t1, Address(s, 0));
+      __ ldp(t2, t3, Address(s, 16));
+      __ ldp(t4, t5, Address(send, -32));
+      __ ldp(t6, t7, Address(send, -16));
+
+      __ stp(t0, t1, Address(d, 0));
+      __ stp(t2, t3, Address(d, 16));
+      __ stp(t4, t5, Address(dend, -32));
+      __ stp(t6, t7, Address(dend, -16));
+    }
+    __ b(finish);
+
+    // 17..32 bytes
+    __ bind(copy32);
+    __ ldp(t0, t1, Address(s, 0));
+    __ ldp(t2, t3, Address(send, -16));
+    __ stp(t0, t1, Address(d, 0));
+    __ stp(t2, t3, Address(dend, -16));
+    __ b(finish);
+
+    // 65..80/96 bytes
+    // (96 bytes if SIMD because we do 32 byes per instruction)
+    __ bind(copy80);
+    if (UseSIMDForMemoryOps) {
+      __ ldpq(v0, v1, Address(s, 0));
+      __ ldpq(v2, v3, Address(s, 32));
+      __ ldpq(v4, v5, Address(send, -32));
+      __ stpq(v0, v1, Address(d, 0));
+      __ stpq(v2, v3, Address(d, 32));
+      __ stpq(v4, v5, Address(dend, -32));
+    } else {
+      __ ldp(t0, t1, Address(s, 0));
+      __ ldp(t2, t3, Address(s, 16));
+      __ ldp(t4, t5, Address(s, 32));
+      __ ldp(t6, t7, Address(s, 48));
+      __ ldp(t8, t9, Address(send, -16));
+
+      __ stp(t0, t1, Address(d, 0));
+      __ stp(t2, t3, Address(d, 16));
+      __ stp(t4, t5, Address(d, 32));
+      __ stp(t6, t7, Address(d, 48));
+      __ stp(t8, t9, Address(dend, -16));
+    }
+    __ b(finish);
+
+    // 0..16 bytes
+    __ bind(copy16);
+    __ cmp(count, 8/granularity);
+    __ br(Assembler::LO, copy8);
+
+    // 8..16 bytes
+    __ ldr(t0, Address(s, 0));
+    __ ldr(t1, Address(send, -8));
+    __ str(t0, Address(d, 0));
+    __ str(t1, Address(dend, -8));
+    __ b(finish);
+
+    if (granularity < 8) {
+      // 4..7 bytes
+      __ bind(copy8);
+      __ tbz(count, 2 - exact_log2(granularity), copy4);
+      __ ldrw(t0, Address(s, 0));
+      __ ldrw(t1, Address(send, -4));
+      __ strw(t0, Address(d, 0));
+      __ strw(t1, Address(dend, -4));
+      __ b(finish);
+      if (granularity < 4) {
+        // 0..3 bytes
+        __ bind(copy4);
+        __ cbz(count, finish); // get rid of 0 case
+        if (granularity == 2) {
+          __ ldrh(t0, Address(s, 0));
+          __ strh(t0, Address(d, 0));
+        } else { // granularity == 1
+          // Now 1..3 bytes. Handle the 1 and 2 byte case by copying
+          // the first and last byte.
+          // Handle the 3 byte case by loading and storing base + count/2
+          // (count == 1 (s+0)->(d+0), count == 2,3 (s+1) -> (d+1))
+          // This does means in the 1 byte case we load/store the same
+          // byte 3 times.
+          __ lsr(count, count, 1);
+          __ ldrb(t0, Address(s, 0));
+          __ ldrb(t1, Address(send, -1));
+          __ ldrb(t2, Address(s, count));
+          __ strb(t0, Address(d, 0));
+          __ strb(t1, Address(dend, -1));
+          __ strb(t2, Address(d, count));
+        }
+        __ b(finish);
+      }
+    }
+
+    __ bind(copy_big);
     if (is_backwards) {
       __ lea(s, Address(s, count, Address::lsl(exact_log2(-step))));
       __ lea(d, Address(d, count, Address::lsl(exact_log2(-step))));
     }
 
-    Label tail;
-
-    __ cmp(count, 16/granularity);
-    __ br(Assembler::LO, tail);
-
     // Now we've got the small case out of the way we can align the
     // source address on a 2-word boundary.
 
@@ -986,8 +1102,6 @@
 #endif
     }
 
-    __ cmp(count, 16/granularity);
-    __ br(Assembler::LT, tail);
     __ bind(aligned);
 
     // s is now 2-word-aligned.
@@ -1001,9 +1115,11 @@
       __ bl(copy_b);
 
     // And the tail.
-
-    __ bind(tail);
     copy_memory_small(s, d, count, tmp, step);
+
+    if (granularity >= 8) __ bind(copy8);
+    if (granularity >= 4) __ bind(copy4);
+    __ bind(finish);
   }
 
 
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1984,6 +1984,7 @@
   __ push(rscratch3);
   Label L;
   __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
+  __ prfm(Address(rscratch2), PSTL1STRM);
   __ bind(L);
   __ ldxr(rscratch1, rscratch2);
   __ add(rscratch1, rscratch1, 1);
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -61,6 +61,10 @@
 #define HWCAP_CRC32 (1<<7)
 #endif
 
+#ifndef HWCAP_ATOMICS
+#define HWCAP_ATOMICS (1<<8)
+#endif
+
 int VM_Version::_cpu;
 int VM_Version::_model;
 int VM_Version::_model2;
@@ -172,6 +176,7 @@
   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
+  if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
 
   _features_string = os::strdup(buf);
 
@@ -191,6 +196,15 @@
     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
   }
 
+  if (auxv & HWCAP_ATOMICS) {
+    if (FLAG_IS_DEFAULT(UseLSE))
+      FLAG_SET_DEFAULT(UseLSE, true);
+  } else {
+    if (UseLSE) {
+      warning("UseLSE specified, but not supported on this CPU");
+    }
+  }
+
   if (auxv & HWCAP_AES) {
     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
     UseAESIntrinsics =
--- a/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -47,7 +47,7 @@
 // The expected size in bytes of a cache line, used to pad data structures.
 #define DEFAULT_CACHE_LINE_SIZE 128
 
-#if defined(COMPILER2) && defined(AIX)
+#if defined(COMPILER2) && (defined(AIX) || defined(linux))
 // Include Transactional Memory lock eliding optimization
 #define INCLUDE_RTM_OPT 1
 #endif
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -76,6 +76,8 @@
 
 define_pd_global(bool, CompactStrings, true);
 
+define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
+
 // Platform dependent flag handling: flags only defined on this platform.
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint)  \
                                                                             \
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Tue Apr 05 15:39:34 2016 -0400
@@ -2137,8 +2137,6 @@
   return decode;
 }
 */
-// Threshold size for cleararray.
-const int Matcher::init_array_short_size = 8 * BytesPerLong;
 
 // false => size gets scaled to BytesPerLong, ok.
 const bool Matcher::init_array_count_is_in_bytes = false;
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -255,7 +255,16 @@
     }
 #endif
 #ifdef linux
-    // TODO: check kernel version (we currently have too old versions only)
+    // At least Linux kernel 4.2, as the problematic behavior of syscalls
+    // being called in the middle of a transaction has been addressed.
+    // Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
+    // in Linux kernel source tree: https://goo.gl/Kc5i7A
+    if (os::Linux::os_version_is_known()) {
+      if (os::Linux::os_version() >= 0x040200)
+        os_too_old = false;
+    } else {
+      vm_exit_during_initialization("RTM can not be enabled: kernel version is unknown.");
+    }
 #endif
     if (os_too_old) {
       vm_exit_during_initialization("RTM is not supported on this OS version.");
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -90,6 +90,8 @@
 
 define_pd_global(bool, CompactStrings, true);
 
+define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
                                                                             \
   product(intx, UseVIS, 99,                                                 \
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Tue Apr 05 15:39:34 2016 -0400
@@ -1980,9 +1980,6 @@
 // No scaling for the parameter the ClearArray node.
 const bool Matcher::init_array_count_is_in_bytes = true;
 
-// Threshold size for cleararray.
-const int Matcher::init_array_short_size = 8 * BytesPerLong;
-
 // No additional cost for CMOVL.
 const int Matcher::long_cmove_cost() { return 0; }
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -777,6 +777,7 @@
     case 0x6E: // movd
     case 0x7E: // movd
     case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
+    case 0xFE: // paddd
       debug_only(has_disp32 = true);
       break;
 
@@ -926,6 +927,7 @@
     ip++; // skip P2, move to opcode
     // To find the end of instruction (which == end_pc_operand).
     switch (0xFF & *ip) {
+    case 0x22: // pinsrd r, r/a, #8
     case 0x61: // pcmpestri r, r/a, #8
     case 0x70: // pshufd r, r/a, #8
     case 0x73: // psrldq r, #8
@@ -3953,6 +3955,83 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
+  assert(VM_Version::supports_ssse3(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
+void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
+  assert(VM_Version::supports_sse4_1(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8((unsigned char)0x0E);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
+void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8((unsigned char)0xCC);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)imm8);
+}
+
+void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  emit_int8((unsigned char)0xC8);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  emit_int8((unsigned char)0xC9);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  emit_int8((unsigned char)0xCA);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+// xmm0 is implicit additional source to this instruction.
+void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  emit_int8((unsigned char)0xCB);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  emit_int8((unsigned char)0xCC);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) {
+  assert(VM_Version::supports_sha(), "");
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  emit_int8((unsigned char)0xCD);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+
 void Assembler::shll(Register dst, int imm8) {
   assert(isShiftCount(imm8), "illegal shift count");
   int encode = prefix_and_encode(dst->encoding());
@@ -4931,6 +5010,15 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+void Assembler::paddd(XMMRegister dst, Address src) {
+  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+  InstructionMark im(this);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xFE);
+  emit_operand(dst, src);
+}
+
 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -5611,8 +5699,9 @@
 }
 
 
-void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
+void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
@@ -5621,11 +5710,12 @@
   emit_int8((unsigned char)(0xC0 | encode));
   // 0x00 - insert into lower 128 bits
   // 0x01 - insert into upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -5633,26 +5723,29 @@
   emit_int8((unsigned char)(0xC0 | encode));
   // 0x00 - insert into lower 256 bits
   // 0x01 - insert into upper 256 bits
-  emit_int8(value & 0x01);
-}
-
-void Assembler::vinsertf64x4h(XMMRegister dst, Address src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(dst != xnoreg, "sanity");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
   // swap src<->dst for encoding
-  vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x1A);
   emit_operand(dst, src);
   // 0x00 - insert into lower 256 bits
-  // 0x01 - insert into upper 128 bits
-  emit_int8(value & 0x01);
-}
-
-void Assembler::vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
+  // 0x01 - insert into upper 256 bits
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -5662,57 +5755,64 @@
   // 0x01 - insert into q1 128 bits (128..255)
   // 0x02 - insert into q2 128 bits (256..383)
   // 0x03 - insert into q3 128 bits (384..511)
-  emit_int8(value & 0x3);
-}
-
-void Assembler::vinsertf32x4h(XMMRegister dst, Address src, int value) {
+  emit_int8(imm8 & 0x03);
+}
+
+void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
   assert(dst != xnoreg, "sanity");
+  assert(imm8 <= 0x03, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
+  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   // swap src<->dst for encoding
-  vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x18);
   emit_operand(dst, src);
   // 0x00 - insert into q0 128 bits (0..127)
   // 0x01 - insert into q1 128 bits (128..255)
   // 0x02 - insert into q2 128 bits (256..383)
   // 0x03 - insert into q3 128 bits (384..511)
-  emit_int8(value & 0x3);
-}
-
-void Assembler::vinsertf128h(XMMRegister dst, Address src) {
+  emit_int8(imm8 & 0x03);
+}
+
+void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
   assert(dst != xnoreg, "sanity");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
+  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   // swap src<->dst for encoding
-  vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x18);
   emit_operand(dst, src);
+  // 0x00 - insert into lower 128 bits
   // 0x01 - insert into upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x19);
   emit_int8((unsigned char)(0xC0 | encode));
-  // 0x00 - insert into lower 128 bits
-  // 0x01 - insert into upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vextractf128h(Address dst, XMMRegister src) {
+  // 0x00 - extract from lower 128 bits
+  // 0x01 - extract from upper 128 bits
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
   assert(src != xnoreg, "sanity");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
@@ -5720,12 +5820,14 @@
   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x19);
   emit_operand(src, dst);
+  // 0x00 - extract from lower 128 bits
   // 0x01 - extract from upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx2(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
@@ -5734,11 +5836,12 @@
   emit_int8((unsigned char)(0xC0 | encode));
   // 0x00 - insert into lower 128 bits
   // 0x01 - insert into upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -5746,39 +5849,44 @@
   emit_int8((unsigned char)(0xC0 | encode));
   // 0x00 - insert into lower 256 bits
   // 0x01 - insert into upper 256 bits
-  emit_int8(value & 0x01);
-}
-
-void Assembler::vinserti128h(XMMRegister dst, Address src) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
   assert(VM_Version::supports_avx2(), "");
   assert(dst != xnoreg, "sanity");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
+  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   // swap src<->dst for encoding
-  vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x38);
   emit_operand(dst, src);
+  // 0x00 - insert into lower 128 bits
   // 0x01 - insert into upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x39);
   emit_int8((unsigned char)(0xC0 | encode));
-  // 0x00 - insert into lower 128 bits
-  // 0x01 - insert into upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vextracti128h(Address dst, XMMRegister src) {
+  // 0x00 - extract from lower 128 bits
+  // 0x01 - extract from upper 128 bits
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx2(), "");
   assert(src != xnoreg, "sanity");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
@@ -5786,47 +5894,53 @@
   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x39);
   emit_operand(src, dst);
+  // 0x00 - extract from lower 128 bits
   // 0x01 - extract from upper 128 bits
-  emit_int8(0x01);
-}
-
-void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x3B);
   emit_int8((unsigned char)(0xC0 | encode));
   // 0x00 - extract from lower 256 bits
   // 0x01 - extract from upper 256 bits
-  emit_int8(value & 0x01);
-}
-
-void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x39);
   emit_int8((unsigned char)(0xC0 | encode));
+  // 0x00 - extract from bits 127:0
   // 0x01 - extract from bits 255:128
   // 0x02 - extract from bits 383:256
   // 0x03 - extract from bits 511:384
-  emit_int8(value & 0x3);
-}
-
-void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x03);
+}
+
+void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x1B);
   emit_int8((unsigned char)(0xC0 | encode));
   // 0x00 - extract from lower 256 bits
   // 0x01 - extract from upper 256 bits
-  emit_int8(value & 0x1);
-}
-
-void Assembler::vextractf64x4h(Address dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(src != xnoreg, "sanity");
+  assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
@@ -5835,11 +5949,12 @@
   emit_operand(src, dst);
   // 0x00 - extract from lower 256 bits
   // 0x01 - extract from upper 256 bits
-  emit_int8(value & 0x01);
-}
-
-void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x01);
+}
+
+void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
+  assert(imm8 <= 0x03, "imm8: %u", imm8);
   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -5849,12 +5964,13 @@
   // 0x01 - extract from bits 255:128
   // 0x02 - extract from bits 383:256
   // 0x03 - extract from bits 511:384
-  emit_int8(value & 0x3);
-}
-
-void Assembler::vextractf32x4h(Address dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x03);
+}
+
+void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(src != xnoreg, "sanity");
+  assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionMark im(this);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
@@ -5865,19 +5981,21 @@
   // 0x01 - extract from bits 255:128
   // 0x02 - extract from bits 383:256
   // 0x03 - extract from bits 511:384
-  emit_int8(value & 0x3);
-}
-
-void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) {
+  emit_int8(imm8 & 0x03);
+}
+
+void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
+  assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x19);
   emit_int8((unsigned char)(0xC0 | encode));
+  // 0x00 - extract from bits 127:0
   // 0x01 - extract from bits 255:128
   // 0x02 - extract from bits 383:256
   // 0x03 - extract from bits 511:384
-  emit_int8(value & 0x3);
+  emit_int8(imm8 & 0x03);
 }
 
 // duplicate 4-bytes integer data from src into 8 locations in dest
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1672,6 +1672,18 @@
 
   void setb(Condition cc, Register dst);
 
+  void palignr(XMMRegister dst, XMMRegister src, int imm8);
+  void pblendw(XMMRegister dst, XMMRegister src, int imm8);
+
+  void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
+  void sha1nexte(XMMRegister dst, XMMRegister src);
+  void sha1msg1(XMMRegister dst, XMMRegister src);
+  void sha1msg2(XMMRegister dst, XMMRegister src);
+  // xmm0 is implicit additional source to the following instruction.
+  void sha256rnds2(XMMRegister dst, XMMRegister src);
+  void sha256msg1(XMMRegister dst, XMMRegister src);
+  void sha256msg2(XMMRegister dst, XMMRegister src);
+
   void shldl(Register dst, Register src);
   void shldl(Register dst, Register src, int8_t imm8);
 
@@ -1868,6 +1880,7 @@
   void paddb(XMMRegister dst, XMMRegister src);
   void paddw(XMMRegister dst, XMMRegister src);
   void paddd(XMMRegister dst, XMMRegister src);
+  void paddd(XMMRegister dst, Address src);
   void paddq(XMMRegister dst, XMMRegister src);
   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
@@ -1958,33 +1971,31 @@
   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
 
-  // Copy low 128bit into high 128bit of YMM registers.
-  void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
-  void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
-  void vextractf128h(XMMRegister dst, XMMRegister src);
-  void vextracti128h(XMMRegister dst, XMMRegister src);
-
-  // Load/store high 128bit of YMM registers which does not destroy other half.
-  void vinsertf128h(XMMRegister dst, Address src);
-  void vinserti128h(XMMRegister dst, Address src);
-  void vextractf128h(Address dst, XMMRegister src);
-  void vextracti128h(Address dst, XMMRegister src);
-
-  // Copy low 256bit into high 256bit of ZMM registers.
-  void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
-  void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
-  void vextracti64x4h(XMMRegister dst, XMMRegister src, int value);
-  void vextractf64x4h(XMMRegister dst, XMMRegister src, int value);
-  void vextractf64x4h(Address dst, XMMRegister src, int value);
-  void vinsertf64x4h(XMMRegister dst, Address src, int value);
-
-  // Copy targeted 128bit segments of the ZMM registers
-  void vextracti64x2h(XMMRegister dst, XMMRegister src, int value);
-  void vextractf64x2h(XMMRegister dst, XMMRegister src, int value);
-  void vextractf32x4h(XMMRegister dst, XMMRegister src, int value);
-  void vextractf32x4h(Address dst, XMMRegister src, int value);
-  void vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
-  void vinsertf32x4h(XMMRegister dst, Address src, int value);
+  // 128bit copy from/to 256bit (YMM) vector registers
+  void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
+  void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
+  void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
+  void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
+  void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
+  void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
+
+  // 256bit copy from/to 512bit (ZMM) vector registers
+  void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
+  void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
+  void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
+  void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
+
+  // 128bit copy from/to 256bit (YMM) or 512bit (ZMM) vector registers
+  void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
+  void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
+  void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
+  void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
 
   // duplicate 4-bytes integer data from src into 8 locations in dest
   void vpbroadcastd(XMMRegister dst, XMMRegister src);
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -97,6 +97,8 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
+define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
                                                                             \
   develop(bool, IEEEPrecision, true,                                        \
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -3445,7 +3445,7 @@
 
 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) {
-    Assembler::vextractf32x4h(dst, src, 0);
+    Assembler::vextractf32x4(dst, src, 0);
   } else {
     Assembler::movdqu(dst, src);
   }
@@ -3453,7 +3453,7 @@
 
 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) {
-    Assembler::vinsertf32x4h(dst, src, 0);
+    Assembler::vinsertf32x4(dst, dst, src, 0);
   } else {
     Assembler::movdqu(dst, src);
   }
@@ -3478,7 +3478,7 @@
 
 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) {
-    Assembler::vextractf64x4h(dst, src, 0);
+    vextractf64x4_low(dst, src);
   } else {
     Assembler::vmovdqu(dst, src);
   }
@@ -3486,7 +3486,7 @@
 
 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) {
-    Assembler::vinsertf64x4h(dst, src, 0);
+    vinsertf64x4_low(dst, src);
   } else {
     Assembler::vmovdqu(dst, src);
   }
@@ -5649,14 +5649,14 @@
         // Save upper half of ZMM registers
         subptr(rsp, 32*num_xmm_regs);
         for (int n = 0; n < num_xmm_regs; n++) {
-          vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1);
+          vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
         }
       }
       assert(UseAVX > 0, "256 bit vectors are supported only with AVX");
       // Save upper half of YMM registers
       subptr(rsp, 16*num_xmm_regs);
       for (int n = 0; n < num_xmm_regs; n++) {
-        vextractf128h(Address(rsp, n*16), as_XMMRegister(n));
+        vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
       }
     }
 #endif
@@ -5665,7 +5665,7 @@
 #ifdef _LP64
     if (VM_Version::supports_evex()) {
       for (int n = 0; n < num_xmm_regs; n++) {
-        vextractf32x4h(Address(rsp, n*16), as_XMMRegister(n), 0);
+        vextractf32x4(Address(rsp, n*16), as_XMMRegister(n), 0);
       }
     } else {
       for (int n = 0; n < num_xmm_regs; n++) {
@@ -5753,7 +5753,7 @@
 #ifdef _LP64
   if (VM_Version::supports_evex()) {
     for (int n = 0; n < num_xmm_regs; n++) {
-      vinsertf32x4h(as_XMMRegister(n), Address(rsp, n*16), 0);
+      vinsertf32x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, n*16), 0);
     }
   } else {
     for (int n = 0; n < num_xmm_regs; n++) {
@@ -5771,12 +5771,12 @@
     if (MaxVectorSize > 16) {
       // Restore upper half of YMM registers.
       for (int n = 0; n < num_xmm_regs; n++) {
-        vinsertf128h(as_XMMRegister(n), Address(rsp, n*16));
+        vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
       }
       addptr(rsp, 16*num_xmm_regs);
       if(UseAVX > 2) {
         for (int n = 0; n < num_xmm_regs; n++) {
-          vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1);
+          vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
         }
         addptr(rsp, 32*num_xmm_regs);
       }
@@ -7198,21 +7198,50 @@
 
 }
 
-void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) {
+void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, bool is_large) {
   // cnt - number of qwords (8-byte words).
   // base - start address, qword aligned.
+  // is_large - if optimizers know cnt is larger than InitArrayShortSize
   assert(base==rdi, "base register must be edi for rep stos");
   assert(tmp==rax,   "tmp register must be eax for rep stos");
   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
+  assert(InitArrayShortSize % BytesPerLong == 0,
+    "InitArrayShortSize should be the multiple of BytesPerLong");
+
+  Label DONE;
 
   xorptr(tmp, tmp);
+
+  if (!is_large) {
+    Label LOOP, LONG;
+    cmpptr(cnt, InitArrayShortSize/BytesPerLong);
+    jccb(Assembler::greater, LONG);
+
+    NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
+
+    decrement(cnt);
+    jccb(Assembler::negative, DONE); // Zero length
+
+    // Use individual pointer-sized stores for small counts:
+    BIND(LOOP);
+    movptr(Address(base, cnt, Address::times_ptr), tmp);
+    decrement(cnt);
+    jccb(Assembler::greaterEqual, LOOP);
+    jmpb(DONE);
+
+    BIND(LONG);
+  }
+
+  // Use longer rep-prefixed ops for non-small counts:
   if (UseFastStosb) {
-    shlptr(cnt,3); // convert to number of bytes
+    shlptr(cnt, 3); // convert to number of bytes
     rep_stosb();
   } else {
-    NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM
+    NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
     rep_stos();
   }
+
+  BIND(DONE);
 }
 
 #ifdef COMPILER2
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,6 @@
   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
   // may customize this version by overriding it for its purposes (e.g., to save/restore
   // additional registers when doing a VM call).
-#define COMMA ,
 
   virtual void call_VM_leaf_base(
     address entry_point,               // the entry point
@@ -903,35 +902,66 @@
   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
   void ldmxcsr(AddressLiteral src);
 
+  void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
+                 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
+                 Register buf, Register state, Register ofs, Register limit, Register rsp,
+                 bool multi_block);
+
+#ifdef _LP64
+  void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
+                   XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
+                   Register buf, Register state, Register ofs, Register limit, Register rsp,
+                   bool multi_block, XMMRegister shuf_mask);
+#else
+  void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
+                   XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
+                   Register buf, Register state, Register ofs, Register limit, Register rsp,
+                   bool multi_block);
+#endif
+
   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
                 Register rax, Register rcx, Register rdx, Register tmp);
 
+#ifdef _LP64
   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
-                Register rax, Register rcx, Register rdx, Register tmp1 LP64_ONLY(COMMA Register tmp2));
+                Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
 
   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
-                Register rdx NOT_LP64(COMMA  Register tmp) LP64_ONLY(COMMA  Register tmp1)
-                LP64_ONLY(COMMA  Register tmp2) LP64_ONLY(COMMA  Register tmp3) LP64_ONLY(COMMA  Register tmp4));
+                Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
 
   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
-                Register rax, Register rbx LP64_ONLY(COMMA  Register rcx), Register rdx
-                LP64_ONLY(COMMA Register tmp1) LP64_ONLY(COMMA Register tmp2)
-                LP64_ONLY(COMMA Register tmp3) LP64_ONLY(COMMA Register tmp4));
+                Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
+                Register tmp3, Register tmp4);
 
   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
-                Register rax, Register rcx, Register rdx NOT_LP64(COMMA Register tmp)
-                LP64_ONLY(COMMA Register r8) LP64_ONLY(COMMA Register r9)
-                LP64_ONLY(COMMA Register r10) LP64_ONLY(COMMA Register r11));
+                Register rax, Register rcx, Register rdx, Register tmp1,
+                Register tmp2, Register tmp3, Register tmp4);
+#else
+  void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
+                XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
+                Register rax, Register rcx, Register rdx, Register tmp1);
 
-#ifndef _LP64
+  void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
+                XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
+                Register rdx, Register tmp);
+
+  void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
+                XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
+                Register rax, Register rbx, Register rdx);
+
+  void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
+                XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
+                Register rax, Register rcx, Register rdx, Register tmp);
+
   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
                         Register edx, Register ebx, Register esi, Register edi,
                         Register ebp, Register esp);
+
   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
                          Register esi, Register edi, Register ebp, Register esp);
 #endif
@@ -1185,14 +1215,131 @@
   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
 
-  // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
-  void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
-    if (UseAVX > 1) // vinserti128h is available only in AVX2
-      Assembler::vinserti128h(dst, nds, src);
-    else
-      Assembler::vinsertf128h(dst, nds, src);
+  void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
+    if (UseAVX > 1) { // vinserti128 is available only in AVX2
+      Assembler::vinserti128(dst, nds, src, imm8);
+    } else {
+      Assembler::vinsertf128(dst, nds, src, imm8);
+    }
   }
 
+  void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
+    if (UseAVX > 1) { // vinserti128 is available only in AVX2
+      Assembler::vinserti128(dst, nds, src, imm8);
+    } else {
+      Assembler::vinsertf128(dst, nds, src, imm8);
+    }
+  }
+
+  void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
+    if (UseAVX > 1) { // vextracti128 is available only in AVX2
+      Assembler::vextracti128(dst, src, imm8);
+    } else {
+      Assembler::vextractf128(dst, src, imm8);
+    }
+  }
+
+  void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
+    if (UseAVX > 1) { // vextracti128 is available only in AVX2
+      Assembler::vextracti128(dst, src, imm8);
+    } else {
+      Assembler::vextractf128(dst, src, imm8);
+    }
+  }
+
+  // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
+  void vinserti128_high(XMMRegister dst, XMMRegister src) {
+    vinserti128(dst, dst, src, 1);
+  }
+  void vinserti128_high(XMMRegister dst, Address src) {
+    vinserti128(dst, dst, src, 1);
+  }
+  void vextracti128_high(XMMRegister dst, XMMRegister src) {
+    vextracti128(dst, src, 1);
+  }
+  void vextracti128_high(Address dst, XMMRegister src) {
+    vextracti128(dst, src, 1);
+  }
+  void vinsertf128_high(XMMRegister dst, XMMRegister src) {
+    vinsertf128(dst, dst, src, 1);
+  }
+  void vinsertf128_high(XMMRegister dst, Address src) {
+    vinsertf128(dst, dst, src, 1);
+  }
+  void vextractf128_high(XMMRegister dst, XMMRegister src) {
+    vextractf128(dst, src, 1);
+  }
+  void vextractf128_high(Address dst, XMMRegister src) {
+    vextractf128(dst, src, 1);
+  }
+
+  // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
+  void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
+    vinserti64x4(dst, dst, src, 1);
+  }
+  void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
+    vinsertf64x4(dst, dst, src, 1);
+  }
+  void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
+    vextracti64x4(dst, src, 1);
+  }
+  void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
+    vextractf64x4(dst, src, 1);
+  }
+  void vextractf64x4_high(Address dst, XMMRegister src) {
+    vextractf64x4(dst, src, 1);
+  }
+  void vinsertf64x4_high(XMMRegister dst, Address src) {
+    vinsertf64x4(dst, dst, src, 1);
+  }
+
+  // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
+  void vinserti128_low(XMMRegister dst, XMMRegister src) {
+    vinserti128(dst, dst, src, 0);
+  }
+  void vinserti128_low(XMMRegister dst, Address src) {
+    vinserti128(dst, dst, src, 0);
+  }
+  void vextracti128_low(XMMRegister dst, XMMRegister src) {
+    vextracti128(dst, src, 0);
+  }
+  void vextracti128_low(Address dst, XMMRegister src) {
+    vextracti128(dst, src, 0);
+  }
+  void vinsertf128_low(XMMRegister dst, XMMRegister src) {
+    vinsertf128(dst, dst, src, 0);
+  }
+  void vinsertf128_low(XMMRegister dst, Address src) {
+    vinsertf128(dst, dst, src, 0);
+  }
+  void vextractf128_low(XMMRegister dst, XMMRegister src) {
+    vextractf128(dst, src, 0);
+  }
+  void vextractf128_low(Address dst, XMMRegister src) {
+    vextractf128(dst, src, 0);
+  }
+
+  // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
+  void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
+    vinserti64x4(dst, dst, src, 0);
+  }
+  void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
+    vinsertf64x4(dst, dst, src, 0);
+  }
+  void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
+    vextracti64x4(dst, src, 0);
+  }
+  void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
+    vextractf64x4(dst, src, 0);
+  }
+  void vextractf64x4_low(Address dst, XMMRegister src) {
+    vextractf64x4(dst, src, 0);
+  }
+  void vinsertf64x4_low(XMMRegister dst, Address src) {
+    vinsertf64x4(dst, dst, src, 0);
+  }
+
+
   // Carry-Less Multiplication Quadword
   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
     // 0x00 - multiply lower 64 bits [0:63]
@@ -1284,8 +1431,9 @@
   // C2 compiled method's prolog code.
   void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
 
-  // clear memory of size 'cnt' qwords, starting at 'base'.
-  void clear_mem(Register base, Register cnt, Register rtmp);
+  // clear memory of size 'cnt' qwords, starting at 'base';
+  // if 'is_large' is set, do not try to produce short loop
+  void clear_mem(Register base, Register cnt, Register rtmp, bool is_large);
 
 #ifdef COMPILER2
   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86_sha.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -0,0 +1,495 @@
+/*
+* Copyright (c) 2016, Intel Corporation.
+*
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/assembler.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "macroAssembler_x86.hpp"
+
+// ofs and limit are used for multi-block byte array.
+// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+void MacroAssembler::fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
+  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
+  Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block) {
+
+  Label start, done_hash, loop0;
+
+  address upper_word_mask = StubRoutines::x86::upper_word_mask_addr();
+  address shuffle_byte_flip_mask = StubRoutines::x86::shuffle_byte_flip_mask_addr();
+
+  bind(start);
+  movdqu(abcd, Address(state, 0));
+  pinsrd(e0, Address(state, 16), 3);
+  movdqu(shuf_mask, ExternalAddress(upper_word_mask)); // 0xFFFFFFFF000000000000000000000000
+  pand(e0, shuf_mask);
+  pshufd(abcd, abcd, 0x1B);
+  movdqu(shuf_mask, ExternalAddress(shuffle_byte_flip_mask)); //0x000102030405060708090a0b0c0d0e0f
+
+  bind(loop0);
+  // Save hash values for addition after rounds
+  movdqu(Address(rsp, 0), e0);
+  movdqu(Address(rsp, 16), abcd);
+
+
+  // Rounds 0 - 3
+  movdqu(msg0, Address(buf, 0));
+  pshufb(msg0, shuf_mask);
+  paddd(e0, msg0);
+  movdqa(e1, abcd);
+  sha1rnds4(abcd, e0, 0);
+
+  // Rounds 4 - 7
+  movdqu(msg1, Address(buf, 16));
+  pshufb(msg1, shuf_mask);
+  sha1nexte(e1, msg1);
+  movdqa(e0, abcd);
+  sha1rnds4(abcd, e1, 0);
+  sha1msg1(msg0, msg1);
+
+  // Rounds 8 - 11
+  movdqu(msg2, Address(buf, 32));
+  pshufb(msg2, shuf_mask);
+  sha1nexte(e0, msg2);
+  movdqa(e1, abcd);
+  sha1rnds4(abcd, e0, 0);
+  sha1msg1(msg1, msg2);
+  pxor(msg0, msg2);
+
+  // Rounds 12 - 15
+  movdqu(msg3, Address(buf, 48));
+  pshufb(msg3, shuf_mask);
+  sha1nexte(e1, msg3);
+  movdqa(e0, abcd);
+  sha1msg2(msg0, msg3);
+  sha1rnds4(abcd, e1, 0);
+  sha1msg1(msg2, msg3);
+  pxor(msg1, msg3);
+
+  // Rounds 16 - 19
+  sha1nexte(e0, msg0);
+  movdqa(e1, abcd);
+  sha1msg2(msg1, msg0);
+  sha1rnds4(abcd, e0, 0);
+  sha1msg1(msg3, msg0);
+  pxor(msg2, msg0);
+
+  // Rounds 20 - 23
+  sha1nexte(e1, msg1);
+  movdqa(e0, abcd);
+  sha1msg2(msg2, msg1);
+  sha1rnds4(abcd, e1, 1);
+  sha1msg1(msg0, msg1);
+  pxor(msg3, msg1);
+
+  // Rounds 24 - 27
+  sha1nexte(e0, msg2);
+  movdqa(e1, abcd);
+  sha1msg2(msg3, msg2);
+  sha1rnds4(abcd, e0, 1);
+  sha1msg1(msg1, msg2);
+  pxor(msg0, msg2);
+
+  // Rounds 28 - 31
+  sha1nexte(e1, msg3);
+  movdqa(e0, abcd);
+  sha1msg2(msg0, msg3);
+  sha1rnds4(abcd, e1, 1);
+  sha1msg1(msg2, msg3);
+  pxor(msg1, msg3);
+
+  // Rounds 32 - 35
+  sha1nexte(e0, msg0);
+  movdqa(e1, abcd);
+  sha1msg2(msg1, msg0);
+  sha1rnds4(abcd, e0, 1);
+  sha1msg1(msg3, msg0);
+  pxor(msg2, msg0);
+
+  // Rounds 36 - 39
+  sha1nexte(e1, msg1);
+  movdqa(e0, abcd);
+  sha1msg2(msg2, msg1);
+  sha1rnds4(abcd, e1, 1);
+  sha1msg1(msg0, msg1);
+  pxor(msg3, msg1);
+
+  // Rounds 40 - 43
+  sha1nexte(e0, msg2);
+  movdqa(e1, abcd);
+  sha1msg2(msg3, msg2);
+  sha1rnds4(abcd, e0, 2);
+  sha1msg1(msg1, msg2);
+  pxor(msg0, msg2);
+
+  // Rounds 44 - 47
+  sha1nexte(e1, msg3);
+  movdqa(e0, abcd);
+  sha1msg2(msg0, msg3);
+  sha1rnds4(abcd, e1, 2);
+  sha1msg1(msg2, msg3);
+  pxor(msg1, msg3);
+
+  // Rounds 48 - 51
+  sha1nexte(e0, msg0);
+  movdqa(e1, abcd);
+  sha1msg2(msg1, msg0);
+  sha1rnds4(abcd, e0, 2);
+  sha1msg1(msg3, msg0);
+  pxor(msg2, msg0);
+
+  // Rounds 52 - 55
+  sha1nexte(e1, msg1);
+  movdqa(e0, abcd);
+  sha1msg2(msg2, msg1);
+  sha1rnds4(abcd, e1, 2);
+  sha1msg1(msg0, msg1);
+  pxor(msg3, msg1);
+
+  // Rounds 56 - 59
+  sha1nexte(e0, msg2);
+  movdqa(e1, abcd);
+  sha1msg2(msg3, msg2);
+  sha1rnds4(abcd, e0, 2);
+  sha1msg1(msg1, msg2);
+  pxor(msg0, msg2);
+
+  // Rounds 60 - 63
+  sha1nexte(e1, msg3);
+  movdqa(e0, abcd);
+  sha1msg2(msg0, msg3);
+  sha1rnds4(abcd, e1, 3);
+  sha1msg1(msg2, msg3);
+  pxor(msg1, msg3);
+
+  // Rounds 64 - 67
+  sha1nexte(e0, msg0);
+  movdqa(e1, abcd);
+  sha1msg2(msg1, msg0);
+  sha1rnds4(abcd, e0, 3);
+  sha1msg1(msg3, msg0);
+  pxor(msg2, msg0);
+
+  // Rounds 68 - 71
+  sha1nexte(e1, msg1);
+  movdqa(e0, abcd);
+  sha1msg2(msg2, msg1);
+  sha1rnds4(abcd, e1, 3);
+  pxor(msg3, msg1);
+
+  // Rounds 72 - 75
+  sha1nexte(e0, msg2);
+  movdqa(e1, abcd);
+  sha1msg2(msg3, msg2);
+  sha1rnds4(abcd, e0, 3);
+
+  // Rounds 76 - 79
+  sha1nexte(e1, msg3);
+  movdqa(e0, abcd);
+  sha1rnds4(abcd, e1, 3);
+
+  // add current hash values with previously saved
+  movdqu(msg0, Address(rsp, 0));
+  sha1nexte(e0, msg0);
+  movdqu(msg0, Address(rsp, 16));
+  paddd(abcd, msg0);
+
+  if (multi_block) {
+    // increment data pointer and loop if more to process
+    addptr(buf, 64);
+    addptr(ofs, 64);
+    cmpptr(ofs, limit);
+    jcc(Assembler::belowEqual, loop0);
+    movptr(rax, ofs); //return ofs
+  }
+  // write hash values back in the correct order
+  pshufd(abcd, abcd, 0x1b);
+  movdqu(Address(state, 0), abcd);
+  pextrd(Address(state, 16), e0, 3);
+
+  bind(done_hash);
+
+}
+
+// xmm0 (msg) is used as an implicit argument to sh256rnds2
+// and state0 and state1 can never use xmm0 register.
+// ofs and limit are used for multi-block byte array.
+// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+#ifdef _LP64
+void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
+  XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
+  Register buf, Register state, Register ofs, Register limit, Register rsp,
+  bool multi_block, XMMRegister shuf_mask) {
+#else
+void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
+  XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
+  Register buf, Register state, Register ofs, Register limit, Register rsp,
+  bool multi_block) {
+#endif
+  Label start, done_hash, loop0;
+
+  address K256 = StubRoutines::x86::k256_addr();
+  address pshuffle_byte_flip_mask = StubRoutines::x86::pshuffle_byte_flip_mask_addr();
+
+  bind(start);
+  movdqu(state0, Address(state, 0));
+  movdqu(state1, Address(state, 16));
+
+  pshufd(state0, state0, 0xB1);
+  pshufd(state1, state1, 0x1B);
+  movdqa(msgtmp4, state0);
+  palignr(state0, state1, 8);
+  pblendw(state1, msgtmp4, 0xF0);
+
+#ifdef _LP64
+  movdqu(shuf_mask, ExternalAddress(pshuffle_byte_flip_mask));
+#endif
+  lea(rax, ExternalAddress(K256));
+
+  bind(loop0);
+  movdqu(Address(rsp, 0), state0);
+  movdqu(Address(rsp, 16), state1);
+
+  // Rounds 0-3
+  movdqu(msg, Address(buf, 0));
+#ifdef _LP64
+  pshufb(msg, shuf_mask);
+#else
+  pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
+#endif
+  movdqa(msgtmp0, msg);
+  paddd(msg, Address(rax, 0));
+  sha256rnds2(state1, state0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+
+  // Rounds 4-7
+  movdqu(msg, Address(buf, 16));
+#ifdef _LP64
+  pshufb(msg, shuf_mask);
+#else
+  pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
+#endif
+  movdqa(msgtmp1, msg);
+  paddd(msg, Address(rax, 16));
+  sha256rnds2(state1, state0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp0, msgtmp1);
+
+  // Rounds 8-11
+  movdqu(msg, Address(buf, 32));
+#ifdef _LP64
+  pshufb(msg, shuf_mask);
+#else
+  pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
+#endif
+  movdqa(msgtmp2, msg);
+  paddd(msg, Address(rax, 32));
+  sha256rnds2(state1, state0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp1, msgtmp2);
+
+  // Rounds 12-15
+  movdqu(msg, Address(buf, 48));
+#ifdef _LP64
+  pshufb(msg, shuf_mask);
+#else
+  pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
+#endif
+  movdqa(msgtmp3, msg);
+  paddd(msg, Address(rax, 48));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp3);
+  palignr(msgtmp4, msgtmp2, 4);
+  paddd(msgtmp0, msgtmp4);
+  sha256msg2(msgtmp0, msgtmp3);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp2, msgtmp3);
+
+  // Rounds 16-19
+  movdqa(msg, msgtmp0);
+  paddd(msg, Address(rax, 64));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp0);
+  palignr(msgtmp4, msgtmp3, 4);
+  paddd(msgtmp1, msgtmp4);
+  sha256msg2(msgtmp1, msgtmp0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp3, msgtmp0);
+
+  // Rounds 20-23
+  movdqa(msg, msgtmp1);
+  paddd(msg, Address(rax, 80));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp1);
+  palignr(msgtmp4, msgtmp0, 4);
+  paddd(msgtmp2, msgtmp4);
+  sha256msg2(msgtmp2, msgtmp1);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp0, msgtmp1);
+
+  // Rounds 24-27
+  movdqa(msg, msgtmp2);
+  paddd(msg, Address(rax, 96));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp2);
+  palignr(msgtmp4, msgtmp1, 4);
+  paddd(msgtmp3, msgtmp4);
+  sha256msg2(msgtmp3, msgtmp2);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp1, msgtmp2);
+
+  // Rounds 28-31
+  movdqa(msg, msgtmp3);
+  paddd(msg, Address(rax, 112));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp3);
+  palignr(msgtmp4, msgtmp2, 4);
+  paddd(msgtmp0, msgtmp4);
+  sha256msg2(msgtmp0, msgtmp3);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp2, msgtmp3);
+
+  // Rounds 32-35
+  movdqa(msg, msgtmp0);
+  paddd(msg, Address(rax, 128));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp0);
+  palignr(msgtmp4, msgtmp3, 4);
+  paddd(msgtmp1, msgtmp4);
+  sha256msg2(msgtmp1, msgtmp0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp3, msgtmp0);
+
+  // Rounds 36-39
+  movdqa(msg, msgtmp1);
+  paddd(msg, Address(rax, 144));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp1);
+  palignr(msgtmp4, msgtmp0, 4);
+  paddd(msgtmp2, msgtmp4);
+  sha256msg2(msgtmp2, msgtmp1);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp0, msgtmp1);
+
+  // Rounds 40-43
+  movdqa(msg, msgtmp2);
+  paddd(msg, Address(rax, 160));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp2);
+  palignr(msgtmp4, msgtmp1, 4);
+  paddd(msgtmp3, msgtmp4);
+  sha256msg2(msgtmp3, msgtmp2);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp1, msgtmp2);
+
+  // Rounds 44-47
+  movdqa(msg, msgtmp3);
+  paddd(msg, Address(rax, 176));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp3);
+  palignr(msgtmp4, msgtmp2, 4);
+  paddd(msgtmp0, msgtmp4);
+  sha256msg2(msgtmp0, msgtmp3);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp2, msgtmp3);
+
+  // Rounds 48-51
+  movdqa(msg, msgtmp0);
+  paddd(msg, Address(rax, 192));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp0);
+  palignr(msgtmp4, msgtmp3, 4);
+  paddd(msgtmp1, msgtmp4);
+  sha256msg2(msgtmp1, msgtmp0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  sha256msg1(msgtmp3, msgtmp0);
+
+  // Rounds 52-55
+  movdqa(msg, msgtmp1);
+  paddd(msg, Address(rax, 208));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp1);
+  palignr(msgtmp4, msgtmp0, 4);
+  paddd(msgtmp2, msgtmp4);
+  sha256msg2(msgtmp2, msgtmp1);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+
+  // Rounds 56-59
+  movdqa(msg, msgtmp2);
+  paddd(msg, Address(rax, 224));
+  sha256rnds2(state1, state0);
+  movdqa(msgtmp4, msgtmp2);
+  palignr(msgtmp4, msgtmp1, 4);
+  paddd(msgtmp3, msgtmp4);
+  sha256msg2(msgtmp3, msgtmp2);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+
+  // Rounds 60-63
+  movdqa(msg, msgtmp3);
+  paddd(msg, Address(rax, 240));
+  sha256rnds2(state1, state0);
+  pshufd(msg, msg, 0x0E);
+  sha256rnds2(state0, state1);
+  movdqu(msg, Address(rsp, 0));
+  paddd(state0, msg);
+  movdqu(msg, Address(rsp, 16));
+  paddd(state1, msg);
+
+  if (multi_block) {
+    // increment data pointer and loop if more to process
+    addptr(buf, 64);
+    addptr(ofs, 64);
+    cmpptr(ofs, limit);
+    jcc(Assembler::belowEqual, loop0);
+    movptr(rax, ofs); //return ofs
+  }
+
+  pshufd(state0, state0, 0x1B);
+  pshufd(state1, state1, 0xB1);
+  movdqa(msgtmp4, state0);
+  pblendw(state0, state1, 0xF0);
+  palignr(state1, msgtmp4, 8);
+
+  movdqu(Address(state, 0), state0);
+  movdqu(Address(state, 16), state1);
+
+  bind(done_hash);
+
+}
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -208,13 +208,13 @@
     __ subptr(rsp, ymm_bytes);
     // Save upper half of YMM registers
     for (int n = 0; n < num_xmm_regs; n++) {
-      __ vextractf128h(Address(rsp, n*16), as_XMMRegister(n));
+      __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
     }
     if (UseAVX > 2) {
       __ subptr(rsp, zmm_bytes);
       // Save upper half of ZMM registers
       for (int n = 0; n < num_xmm_regs; n++) {
-        __ vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1);
+        __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
       }
     }
   }
@@ -304,13 +304,13 @@
     if (UseAVX > 2) {
       // Restore upper half of ZMM registers.
       for (int n = 0; n < num_xmm_regs; n++) {
-        __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1);
+        __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
       }
       __ addptr(rsp, zmm_bytes);
     }
     // Restore upper half of YMM registers.
     for (int n = 0; n < num_xmm_regs; n++) {
-      __ vinsertf128h(as_XMMRegister(n), Address(rsp, n*16));
+      __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
     }
     __ addptr(rsp, ymm_bytes);
   }
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -179,13 +179,13 @@
     // Save upper half of YMM registers(0..15)
     int base_addr = XSAVE_AREA_YMM_BEGIN;
     for (int n = 0; n < 16; n++) {
-      __ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n));
+      __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
     }
     if (VM_Version::supports_evex()) {
       // Save upper half of ZMM registers(0..15)
       base_addr = XSAVE_AREA_ZMM_BEGIN;
       for (int n = 0; n < 16; n++) {
-        __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1);
+        __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
       }
       // Save full ZMM registers(16..num_xmm_regs)
       base_addr = XSAVE_AREA_UPPERBANK;
@@ -333,13 +333,13 @@
     // Restore upper half of YMM registers (0..15)
     int base_addr = XSAVE_AREA_YMM_BEGIN;
     for (int n = 0; n < 16; n++) {
-      __ vinsertf128h(as_XMMRegister(n), Address(rsp,  base_addr+n*16));
+      __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
     }
     if (VM_Version::supports_evex()) {
       // Restore upper half of ZMM registers (0..15)
       base_addr = XSAVE_AREA_ZMM_BEGIN;
       for (int n = 0; n < 16; n++) {
-        __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1);
+        __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
       }
       // Restore full ZMM registers(16..num_xmm_regs)
       base_addr = XSAVE_AREA_UPPERBANK;
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -3068,6 +3068,136 @@
     return start;
   }
 
+  address generate_upper_word_mask() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
+    address start = __ pc();
+    __ emit_data(0x00000000, relocInfo::none, 0);
+    __ emit_data(0x00000000, relocInfo::none, 0);
+    __ emit_data(0x00000000, relocInfo::none, 0);
+    __ emit_data(0xFFFFFFFF, relocInfo::none, 0);
+    return start;
+  }
+
+  address generate_shuffle_byte_flip_mask() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask");
+    address start = __ pc();
+    __ emit_data(0x0c0d0e0f, relocInfo::none, 0);
+    __ emit_data(0x08090a0b, relocInfo::none, 0);
+    __ emit_data(0x04050607, relocInfo::none, 0);
+    __ emit_data(0x00010203, relocInfo::none, 0);
+    return start;
+  }
+
+  // ofs and limit are use for multi-block byte array.
+  // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+  address generate_sha1_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Register buf   = rax;
+    Register state = rdx;
+    Register ofs   = rcx;
+    Register limit = rdi;
+
+    const Address  buf_param(rbp, 8 + 0);
+    const Address  state_param(rbp, 8 + 4);
+    const Address  ofs_param(rbp, 8 + 8);
+    const Address  limit_param(rbp, 8 + 12);
+
+    const XMMRegister abcd = xmm0;
+    const XMMRegister e0 = xmm1;
+    const XMMRegister e1 = xmm2;
+    const XMMRegister msg0 = xmm3;
+
+    const XMMRegister msg1 = xmm4;
+    const XMMRegister msg2 = xmm5;
+    const XMMRegister msg3 = xmm6;
+    const XMMRegister shuf_mask = xmm7;
+
+    __ enter();
+    __ subptr(rsp, 8 * wordSize);
+    if (multi_block) {
+      __ push(limit);
+    }
+    __ movptr(buf, buf_param);
+    __ movptr(state, state_param);
+    if (multi_block) {
+      __ movptr(ofs, ofs_param);
+      __ movptr(limit, limit_param);
+    }
+
+    __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
+      buf, state, ofs, limit, rsp, multi_block);
+
+    if (multi_block) {
+      __ pop(limit);
+    }
+    __ addptr(rsp, 8 * wordSize);
+    __ leave();
+    __ ret(0);
+    return start;
+  }
+
+  address generate_pshuffle_byte_flip_mask() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask");
+    address start = __ pc();
+    __ emit_data(0x00010203, relocInfo::none, 0);
+    __ emit_data(0x04050607, relocInfo::none, 0);
+    __ emit_data(0x08090a0b, relocInfo::none, 0);
+    __ emit_data(0x0c0d0e0f, relocInfo::none, 0);
+    return start;
+  }
+
+  // ofs and limit are use for multi-block byte array.
+  // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+ address generate_sha256_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Register buf = rbx;
+    Register state = rsi;
+    Register ofs = rdx;
+    Register limit = rcx;
+
+    const Address  buf_param(rbp, 8 + 0);
+    const Address  state_param(rbp, 8 + 4);
+    const Address  ofs_param(rbp, 8 + 8);
+    const Address  limit_param(rbp, 8 + 12);
+
+    const XMMRegister msg = xmm0;
+    const XMMRegister state0 = xmm1;
+    const XMMRegister state1 = xmm2;
+    const XMMRegister msgtmp0 = xmm3;
+
+    const XMMRegister msgtmp1 = xmm4;
+    const XMMRegister msgtmp2 = xmm5;
+    const XMMRegister msgtmp3 = xmm6;
+    const XMMRegister msgtmp4 = xmm7;
+
+    __ enter();
+    __ subptr(rsp, 8 * wordSize);
+    handleSOERegisters(true /*saving*/);
+    __ movptr(buf, buf_param);
+    __ movptr(state, state_param);
+    if (multi_block) {
+     __ movptr(ofs, ofs_param);
+     __ movptr(limit, limit_param);
+    }
+
+    __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
+      buf, state, ofs, limit, rsp, multi_block);
+
+    handleSOERegisters(false);
+    __ addptr(rsp, 8 * wordSize);
+    __ leave();
+    __ ret(0);
+    return start;
+  }
 
   // byte swap x86 long
   address generate_ghash_long_swap_mask() {
@@ -3772,6 +3902,19 @@
       StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
     }
 
+    if (UseSHA1Intrinsics) {
+      StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
+      StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
+      StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress");
+      StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB");
+    }
+    if (UseSHA256Intrinsics) {
+      StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
+      StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask();
+      StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
+      StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
+    }
+
     // Generate GHASH intrinsics code
     if (UseGHASHIntrinsics) {
       StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask();
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -275,7 +275,7 @@
     }
     if (VM_Version::supports_evex()) {
       for (int i = xmm_save_first; i <= last_reg; i++) {
-        __ vextractf32x4h(xmm_save(i), as_XMMRegister(i), 0);
+        __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0);
       }
     } else {
       for (int i = xmm_save_first; i <= last_reg; i++) {
@@ -393,7 +393,7 @@
     // emit the restores for xmm regs
     if (VM_Version::supports_evex()) {
       for (int i = xmm_save_first; i <= last_reg; i++) {
-        __ vinsertf32x4h(as_XMMRegister(i), xmm_save(i), 0);
+        __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0);
       }
     } else {
       for (int i = xmm_save_first; i <= last_reg; i++) {
@@ -3695,6 +3695,133 @@
     return start;
   }
 
+  address generate_upper_word_mask() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
+    address start = __ pc();
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0xFFFFFFFF00000000, relocInfo::none);
+    return start;
+  }
+
+  address generate_shuffle_byte_flip_mask() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask");
+    address start = __ pc();
+    __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
+    __ emit_data64(0x0001020304050607, relocInfo::none);
+    return start;
+  }
+
+  // ofs and limit are use for multi-block byte array.
+  // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+  address generate_sha1_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Register buf = c_rarg0;
+    Register state = c_rarg1;
+    Register ofs = c_rarg2;
+    Register limit = c_rarg3;
+
+    const XMMRegister abcd = xmm0;
+    const XMMRegister e0 = xmm1;
+    const XMMRegister e1 = xmm2;
+    const XMMRegister msg0 = xmm3;
+
+    const XMMRegister msg1 = xmm4;
+    const XMMRegister msg2 = xmm5;
+    const XMMRegister msg3 = xmm6;
+    const XMMRegister shuf_mask = xmm7;
+
+    __ enter();
+
+#ifdef _WIN64
+    // save the xmm registers which must be preserved 6-7
+    __ subptr(rsp, 4 * wordSize);
+    __ movdqu(Address(rsp, 0), xmm6);
+    __ movdqu(Address(rsp, 2 * wordSize), xmm7);
+#endif
+
+    __ subptr(rsp, 4 * wordSize);
+
+    __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
+      buf, state, ofs, limit, rsp, multi_block);
+
+    __ addptr(rsp, 4 * wordSize);
+#ifdef _WIN64
+    // restore xmm regs belonging to calling function
+    __ movdqu(xmm6, Address(rsp, 0));
+    __ movdqu(xmm7, Address(rsp, 2 * wordSize));
+    __ addptr(rsp, 4 * wordSize);
+#endif
+
+    __ leave();
+    __ ret(0);
+    return start;
+  }
+
+  address generate_pshuffle_byte_flip_mask() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask");
+    address start = __ pc();
+    __ emit_data64(0x0405060700010203, relocInfo::none);
+    __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
+    return start;
+  }
+
+// ofs and limit are use for multi-block byte array.
+// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+  address generate_sha256_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Register buf = c_rarg0;
+    Register state = c_rarg1;
+    Register ofs = c_rarg2;
+    Register limit = c_rarg3;
+
+    const XMMRegister msg = xmm0;
+    const XMMRegister state0 = xmm1;
+    const XMMRegister state1 = xmm2;
+    const XMMRegister msgtmp0 = xmm3;
+
+    const XMMRegister msgtmp1 = xmm4;
+    const XMMRegister msgtmp2 = xmm5;
+    const XMMRegister msgtmp3 = xmm6;
+    const XMMRegister msgtmp4 = xmm7;
+
+    const XMMRegister shuf_mask = xmm8;
+
+    __ enter();
+#ifdef _WIN64
+    // save the xmm registers which must be preserved 6-7
+    __ subptr(rsp, 6 * wordSize);
+    __ movdqu(Address(rsp, 0), xmm6);
+    __ movdqu(Address(rsp, 2 * wordSize), xmm7);
+    __ movdqu(Address(rsp, 4 * wordSize), xmm8);
+#endif
+
+    __ subptr(rsp, 4 * wordSize);
+
+    __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
+      buf, state, ofs, limit, rsp, multi_block, shuf_mask);
+
+    __ addptr(rsp, 4 * wordSize);
+#ifdef _WIN64
+    // restore xmm regs belonging to calling function
+    __ movdqu(xmm6, Address(rsp, 0));
+    __ movdqu(xmm7, Address(rsp, 2 * wordSize));
+    __ movdqu(xmm8, Address(rsp, 4 * wordSize));
+    __ addptr(rsp, 6 * wordSize);
+#endif
+    __ leave();
+    __ ret(0);
+    return start;
+  }
+
   // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time
   // to hide instruction latency
   //
@@ -4974,6 +5101,19 @@
       StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
     }
 
+    if (UseSHA1Intrinsics) {
+      StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
+      StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
+      StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress");
+      StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB");
+    }
+    if (UseSHA256Intrinsics) {
+      StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
+      StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask();
+      StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
+      StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
+    }
+
     // Generate GHASH intrinsics code
     if (UseGHASHIntrinsics) {
       StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask();
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -29,6 +29,12 @@
 #include "runtime/thread.inline.hpp"
 #include "crc32c.h"
 
+#ifdef _MSC_VER
+#define ALIGNED_(x) __declspec(align(x))
+#else
+#define ALIGNED_(x) __attribute__ ((aligned(x)))
+#endif
+
 // Implementation of the platform-specific part of StubRoutines - for
 // a description of how to extend it, see the stubRoutines.hpp file.
 
@@ -37,6 +43,10 @@
 address StubRoutines::x86::_counter_shuffle_mask_addr = NULL;
 address StubRoutines::x86::_ghash_long_swap_mask_addr = NULL;
 address StubRoutines::x86::_ghash_byte_swap_mask_addr = NULL;
+address StubRoutines::x86::_upper_word_mask_addr = NULL;
+address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL;
+address StubRoutines::x86::_k256_adr = NULL;
+address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
 
 uint64_t StubRoutines::x86::_crc_by128_masks[] =
 {
@@ -236,3 +246,23 @@
     _crc32c_table = (juint*)pclmulqdq_table;
   }
 }
+
+ALIGNED_(64) juint StubRoutines::x86::_k256[] =
+{
+    0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
+    0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
+    0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
+    0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
+    0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+    0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
+    0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
+    0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
+    0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
+    0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+    0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
+    0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
+    0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
+    0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
+    0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+    0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -46,6 +46,17 @@
   static address _ghash_long_swap_mask_addr;
   static address _ghash_byte_swap_mask_addr;
 
+  // upper word mask for sha1
+  static address _upper_word_mask_addr;
+  // byte flip mask for sha1
+  static address _shuffle_byte_flip_mask_addr;
+
+  //k256 table for sha256
+  static juint _k256[];
+  static address _k256_adr;
+  // byte flip mask for sha256
+  static address _pshuffle_byte_flip_mask_addr;
+
  public:
   static address verify_mxcsr_entry()    { return _verify_mxcsr_entry; }
   static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
@@ -53,5 +64,9 @@
   static address crc_by128_masks_addr()  { return (address)_crc_by128_masks; }
   static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; }
   static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; }
+  static address upper_word_mask_addr() { return _upper_word_mask_addr; }
+  static address shuffle_byte_flip_mask_addr() { return _shuffle_byte_flip_mask_addr; }
+  static address k256_addr()      { return _k256_adr; }
+  static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
   static void generate_CRC32C_table(bool is_pclmulqdq_supported);
 #endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
--- a/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -68,10 +68,11 @@
   declare_constant(VM_Version::CPU_AVX512DQ)                        \
   declare_constant(VM_Version::CPU_AVX512PF)                        \
   declare_constant(VM_Version::CPU_AVX512ER)                        \
-  declare_constant(VM_Version::CPU_AVX512CD)                        \
-  declare_constant(VM_Version::CPU_AVX512BW)
+  declare_constant(VM_Version::CPU_AVX512CD)
 
 #define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
-  declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL)
+  declare_preprocessor_constant("VM_Version::CPU_AVX512BW", CPU_AVX512BW) \
+  declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL) \
+  declare_preprocessor_constant("VM_Version::CPU_SHA", CPU_SHA)
 
 #endif // CPU_X86_VM_VMSTRUCTS_X86_HPP
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -385,7 +385,7 @@
 
     __ movdl(xmm0, rcx);
     __ pshufd(xmm0, xmm0, 0x00);
-    __ vinsertf128h(xmm0, xmm0, xmm0);
+    __ vinsertf128_high(xmm0, xmm0);
     __ vmovdqu(xmm7, xmm0);
 #ifdef _LP64
     __ vmovdqu(xmm8, xmm0);
@@ -577,7 +577,7 @@
   }
 
   char buf[256];
-  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                cores_per_cpu(), threads_per_core(),
                cpu_family(), _model, _stepping,
                (supports_cmov() ? ", cmov" : ""),
@@ -608,7 +608,8 @@
                (supports_bmi1() ? ", bmi1" : ""),
                (supports_bmi2() ? ", bmi2" : ""),
                (supports_adx() ? ", adx" : ""),
-               (supports_evex() ? ", evex" : ""));
+               (supports_evex() ? ", evex" : ""),
+               (supports_sha() ? ", sha" : ""));
   _features_string = os::strdup(buf);
 
   // UseSSE is set to the smaller of what hardware supports and what
@@ -730,17 +731,29 @@
     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
   }
 
-  if (UseSHA) {
+  if (supports_sha()) {
+    if (FLAG_IS_DEFAULT(UseSHA)) {
+      UseSHA = true;
+    }
+  } else if (UseSHA) {
     warning("SHA instructions are not available on this CPU");
     FLAG_SET_DEFAULT(UseSHA, false);
   }
 
-  if (UseSHA1Intrinsics) {
+  if (UseSHA) {
+    if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
+      FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
+    }
+  } else if (UseSHA1Intrinsics) {
     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
   }
 
-  if (UseSHA256Intrinsics) {
+  if (UseSHA) {
+    if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
+      FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
+    }
+  } else if (UseSHA256Intrinsics) {
     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
   }
@@ -750,6 +763,10 @@
     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
   }
 
+  if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+
   if (UseAdler32Intrinsics) {
     warning("Adler32Intrinsics not available on this CPU.");
     FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -221,7 +221,7 @@
                avx512pf : 1,
                avx512er : 1,
                avx512cd : 1,
-                        : 1,
+                    sha : 1,
                avx512bw : 1,
                avx512vl : 1;
     } bits;
@@ -282,11 +282,13 @@
     CPU_AVX512DQ = (1 << 27),
     CPU_AVX512PF = (1 << 28),
     CPU_AVX512ER = (1 << 29),
-    CPU_AVX512CD = (1 << 30),
-    CPU_AVX512BW = (1 << 31)
+    CPU_AVX512CD = (1 << 30)
+    // Keeping sign bit 31 unassigned.
   };
 
-#define CPU_AVX512VL UCONST64(0x100000000) // EVEX instructions with smaller vector length : enums are limited to 32bit
+#define CPU_AVX512BW ((uint64_t)UCONST64(0x100000000)) // enums are limited to 31 bit
+#define CPU_AVX512VL ((uint64_t)UCONST64(0x200000000)) // EVEX instructions with smaller vector length
+#define CPU_SHA ((uint64_t)UCONST64(0x400000000))      // SHA instructions
 
   enum Extended_Family {
     // AMD
@@ -516,6 +518,8 @@
          result |= CPU_ADX;
       if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
         result |= CPU_BMI2;
+      if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
+        result |= CPU_SHA;
       if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
         result |= CPU_LZCNT;
       // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
@@ -721,6 +725,7 @@
   static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
   static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
   static bool supports_avxonly()    { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
+  static bool supports_sha()        { return (_features & CPU_SHA) != 0; }
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
                                        extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
--- a/hotspot/src/cpu/x86/vm/x86.ad	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/x86.ad	Tue Apr 05 15:39:34 2016 -0400
@@ -3179,13 +3179,13 @@
             "punpcklbw $dst,$dst\n\t"
             "pshuflw $dst,$dst,0x00\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate32B" %}
+            "vinserti128_high $dst,$dst\t! replicate32B" %}
   ins_encode %{
     __ movdl($dst$$XMMRegister, $src$$Register);
     __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
     __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3196,12 +3196,12 @@
   format %{ "punpcklbw $dst,$mem\n\t"
             "pshuflw $dst,$dst,0x00\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate32B" %}
+            "vinserti128_high $dst,$dst\t! replicate32B" %}
   ins_encode %{
     __ punpcklbw($dst$$XMMRegister, $mem$$Address);
     __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3223,11 +3223,11 @@
   match(Set dst (ReplicateB con));
   format %{ "movq    $dst,[$constantaddress]\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %}
+            "vinserti128_high $dst,$dst\t! lreplicate32B($con)" %}
   ins_encode %{
     __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3298,12 +3298,12 @@
   format %{ "movd    $dst,$src\n\t"
             "pshuflw $dst,$dst,0x00\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate16S" %}
+            "vinserti128_high $dst,$dst\t! replicate16S" %}
   ins_encode %{
     __ movdl($dst$$XMMRegister, $src$$Register);
     __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3313,11 +3313,11 @@
   match(Set dst (ReplicateS (LoadS mem)));
   format %{ "pshuflw $dst,$mem,0x00\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate16S" %}
+            "vinserti128_high $dst,$dst\t! replicate16S" %}
   ins_encode %{
     __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3327,11 +3327,11 @@
   match(Set dst (ReplicateS con));
   format %{ "movq    $dst,[$constantaddress]\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %}
+            "vinserti128_high $dst,$dst\t! replicate16S($con)" %}
   ins_encode %{
     __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3363,11 +3363,11 @@
   match(Set dst (ReplicateI src));
   format %{ "movd    $dst,$src\n\t"
             "pshufd  $dst,$dst,0x00\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate8I" %}
+            "vinserti128_high $dst,$dst\t! replicate8I" %}
   ins_encode %{
     __ movdl($dst$$XMMRegister, $src$$Register);
     __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3376,10 +3376,10 @@
   predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
   match(Set dst (ReplicateI (LoadI mem)));
   format %{ "pshufd  $dst,$mem,0x00\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate8I" %}
+            "vinserti128_high $dst,$dst\t! replicate8I" %}
   ins_encode %{
     __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3401,11 +3401,11 @@
   match(Set dst (ReplicateI con));
   format %{ "movq    $dst,[$constantaddress]\t! replicate8I($con)\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst" %}
+            "vinserti128_high $dst,$dst" %}
   ins_encode %{
     __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3430,11 +3430,11 @@
   match(Set dst (ReplicateL src));
   format %{ "movdq   $dst,$src\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
+            "vinserti128_high $dst,$dst\t! replicate4L" %}
   ins_encode %{
     __ movdq($dst$$XMMRegister, $src$$Register);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3447,13 +3447,13 @@
             "movdl   $tmp,$src.hi\n\t"
             "punpckldq $dst,$tmp\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
+            "vinserti128_high $dst,$dst\t! replicate4L" %}
   ins_encode %{
     __ movdl($dst$$XMMRegister, $src$$Register);
     __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
     __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3464,11 +3464,11 @@
   match(Set dst (ReplicateL con));
   format %{ "movq    $dst,[$constantaddress]\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %}
+            "vinserti128_high $dst,$dst\t! replicate4L($con)" %}
   ins_encode %{
     __ movq($dst$$XMMRegister, $constantaddress($con));
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3478,11 +3478,11 @@
   match(Set dst (ReplicateL (LoadL mem)));
   format %{ "movq    $dst,$mem\n\t"
             "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
+            "vinserti128_high $dst,$dst\t! replicate4L" %}
   ins_encode %{
     __ movq($dst$$XMMRegister, $mem$$Address);
     __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3511,10 +3511,10 @@
   predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
   match(Set dst (ReplicateF src));
   format %{ "pshufd  $dst,$src,0x00\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! replicate8F" %}
+            "vinsertf128_high $dst,$dst\t! replicate8F" %}
   ins_encode %{
     __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3523,10 +3523,10 @@
   predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
   match(Set dst (ReplicateF (LoadF mem)));
   format %{ "pshufd  $dst,$mem,0x00\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! replicate8F" %}
+            "vinsertf128_high $dst,$dst\t! replicate8F" %}
   ins_encode %{
     __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3576,10 +3576,10 @@
   predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
   match(Set dst (ReplicateD src));
   format %{ "pshufd  $dst,$src,0x44\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! replicate4D" %}
+            "vinsertf128_high $dst,$dst\t! replicate4D" %}
   ins_encode %{
     __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -3588,10 +3588,10 @@
   predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
   match(Set dst (ReplicateD (LoadD mem)));
   format %{ "pshufd  $dst,$mem,0x44\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! replicate4D" %}
+            "vinsertf128_high $dst,$dst\t! replicate4D" %}
   ins_encode %{
     __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -4791,7 +4791,7 @@
   effect(TEMP tmp, TEMP tmp2);
   format %{ "vphaddd  $tmp,$src2,$src2\n\t"
             "vphaddd  $tmp,$tmp,$tmp2\n\t"
-            "vextracti128  $tmp2,$tmp\n\t"
+            "vextracti128_high  $tmp2,$tmp\n\t"
             "vpaddd   $tmp,$tmp,$tmp2\n\t"
             "movd     $tmp2,$src1\n\t"
             "vpaddd   $tmp2,$tmp2,$tmp\n\t"
@@ -4800,7 +4800,7 @@
     int vector_len = 1;
     __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len);
     __ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len);
-    __ vextracti128h($tmp2$$XMMRegister, $tmp$$XMMRegister);
+    __ vextracti128_high($tmp2$$XMMRegister, $tmp$$XMMRegister);
     __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0);
     __ movdl($tmp2$$XMMRegister, $src1$$Register);
     __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@@ -4813,7 +4813,7 @@
   predicate(UseAVX > 2);
   match(Set dst (AddReductionVI src1 src2));
   effect(TEMP tmp, TEMP tmp2);
-  format %{ "vextracti128  $tmp,$src2\n\t"
+  format %{ "vextracti128_high  $tmp,$src2\n\t"
             "vpaddd  $tmp,$tmp,$src2\n\t"
             "pshufd  $tmp2,$tmp,0xE\n\t"
             "vpaddd  $tmp,$tmp,$tmp2\n\t"
@@ -4824,7 +4824,7 @@
             "movd    $dst,$tmp2\t! add reduction8I" %}
   ins_encode %{
     int vector_len = 0;
-    __ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
     __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len);
     __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
     __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len);
@@ -4841,9 +4841,9 @@
   predicate(UseAVX > 2);
   match(Set dst (AddReductionVI src1 src2));
   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
-  format %{ "vextracti64x4  $tmp3,$src2,0x1\n\t"
+  format %{ "vextracti64x4_high  $tmp3,$src2\n\t"
             "vpaddd  $tmp3,$tmp3,$src2\n\t"
-            "vextracti128   $tmp,$tmp3\n\t"
+            "vextracti128_high  $tmp,$tmp3\n\t"
             "vpaddd  $tmp,$tmp,$tmp3\n\t"
             "pshufd  $tmp2,$tmp,0xE\n\t"
             "vpaddd  $tmp,$tmp,$tmp2\n\t"
@@ -4853,9 +4853,9 @@
             "vpaddd  $tmp2,$tmp,$tmp2\n\t"
             "movd    $dst,$tmp2\t! mul reduction16I" %}
   ins_encode %{
-    __ vextracti64x4h($tmp3$$XMMRegister, $src2$$XMMRegister, 1);
+    __ vextracti64x4_high($tmp3$$XMMRegister, $src2$$XMMRegister);
     __ vpaddd($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1);
-    __ vextracti128h($tmp$$XMMRegister, $tmp3$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $tmp3$$XMMRegister);
     __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0);
     __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
     __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0);
@@ -4892,7 +4892,7 @@
   predicate(UseAVX > 2);
   match(Set dst (AddReductionVL src1 src2));
   effect(TEMP tmp, TEMP tmp2);
-  format %{ "vextracti128  $tmp,$src2\n\t"
+  format %{ "vextracti128_high  $tmp,$src2\n\t"
             "vpaddq  $tmp2,$tmp,$src2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vpaddq  $tmp2,$tmp2,$tmp\n\t"
@@ -4900,7 +4900,7 @@
             "vpaddq  $tmp2,$tmp2,$tmp\n\t"
             "movdq   $dst,$tmp2\t! add reduction4L" %}
   ins_encode %{
-    __ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
     __ vpaddq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@@ -4915,9 +4915,9 @@
   predicate(UseAVX > 2);
   match(Set dst (AddReductionVL src1 src2));
   effect(TEMP tmp, TEMP tmp2);
-  format %{ "vextracti64x4  $tmp2,$src2,0x1\n\t"
+  format %{ "vextracti64x4_high  $tmp2,$src2\n\t"
             "vpaddq  $tmp2,$tmp2,$src2\n\t"
-            "vextracti128   $tmp,$tmp2\n\t"
+            "vextracti128_high  $tmp,$tmp2\n\t"
             "vpaddq  $tmp2,$tmp2,$tmp\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vpaddq  $tmp2,$tmp2,$tmp\n\t"
@@ -4925,9 +4925,9 @@
             "vpaddq  $tmp2,$tmp2,$tmp\n\t"
             "movdq   $dst,$tmp2\t! add reduction8L" %}
   ins_encode %{
-    __ vextracti64x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 1);
+    __ vextracti64x4_high($tmp2$$XMMRegister, $src2$$XMMRegister);
     __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1);
-    __ vextracti128h($tmp$$XMMRegister, $tmp2$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $tmp2$$XMMRegister);
     __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@@ -5026,7 +5026,7 @@
             "vaddss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$src2,0x03\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
-            "vextractf128  $tmp2,$src2\n\t"
+            "vextractf128_high  $tmp2,$src2\n\t"
             "vaddss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
@@ -5042,7 +5042,7 @@
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf128h($tmp2$$XMMRegister, $src2$$XMMRegister);
+    __ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5065,7 +5065,7 @@
             "vaddss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$src2,0x03\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x1\n\t"
+            "vextractf32x4  $tmp2,$src2,0x1\n\t"
             "vaddss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
@@ -5073,7 +5073,7 @@
             "vaddss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$tmp2,0x03\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x2\n\t"
+            "vextractf32x4  $tmp2,$src2,0x2\n\t"
             "vaddss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
@@ -5081,7 +5081,7 @@
             "vaddss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$tmp2,0x03\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x3\n\t"
+            "vextractf32x4  $tmp2,$src2,0x3\n\t"
             "vaddss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vaddss  $dst,$dst,$tmp\n\t"
@@ -5097,7 +5097,7 @@
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5105,7 +5105,7 @@
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5113,7 +5113,7 @@
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5162,7 +5162,7 @@
   format %{ "vaddsd  $dst,$dst,$src2\n\t"
             "pshufd  $tmp,$src2,0xE\n\t"
             "vaddsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4h  $tmp2,$src2, 0x1\n\t"
+            "vextractf32x4  $tmp2,$src2,0x1\n\t"
             "vaddsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vaddsd  $dst,$dst,$tmp\t! add reduction4D" %}
@@ -5170,7 +5170,7 @@
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5185,15 +5185,15 @@
   format %{ "vaddsd  $dst,$dst,$src2\n\t"
             "pshufd  $tmp,$src2,0xE\n\t"
             "vaddsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x1\n\t"
+            "vextractf32x4  $tmp2,$src2,0x1\n\t"
             "vaddsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vaddsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x2\n\t"
+            "vextractf32x4  $tmp2,$src2,0x2\n\t"
             "vaddsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vaddsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x3\n\t"
+            "vextractf32x4  $tmp2,$src2,0x3\n\t"
             "vaddsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vaddsd  $dst,$dst,$tmp\t! add reduction8D" %}
@@ -5201,15 +5201,15 @@
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5307,7 +5307,7 @@
   predicate(UseAVX > 0);
   match(Set dst (MulReductionVI src1 src2));
   effect(TEMP tmp, TEMP tmp2);
-  format %{ "vextracti128  $tmp,$src2\n\t"
+  format %{ "vextracti128_high  $tmp,$src2\n\t"
             "vpmulld  $tmp,$tmp,$src2\n\t"
             "pshufd   $tmp2,$tmp,0xE\n\t"
             "vpmulld  $tmp,$tmp,$tmp2\n\t"
@@ -5318,7 +5318,7 @@
             "movd     $dst,$tmp2\t! mul reduction8I" %}
   ins_encode %{
     int vector_len = 0;
-    __ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
     __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len);
     __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
     __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len);
@@ -5335,9 +5335,9 @@
   predicate(UseAVX > 2);
   match(Set dst (MulReductionVI src1 src2));
   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
-  format %{ "vextracti64x4  $tmp3,$src2,0x1\n\t"
+  format %{ "vextracti64x4_high  $tmp3,$src2\n\t"
             "vpmulld  $tmp3,$tmp3,$src2\n\t"
-            "vextracti128   $tmp,$tmp3\n\t"
+            "vextracti128_high  $tmp,$tmp3\n\t"
             "vpmulld  $tmp,$tmp,$src2\n\t"
             "pshufd   $tmp2,$tmp,0xE\n\t"
             "vpmulld  $tmp,$tmp,$tmp2\n\t"
@@ -5347,9 +5347,9 @@
             "vpmulld  $tmp2,$tmp,$tmp2\n\t"
             "movd     $dst,$tmp2\t! mul reduction16I" %}
   ins_encode %{
-    __ vextracti64x4h($tmp3$$XMMRegister, $src2$$XMMRegister, 1);
+    __ vextracti64x4_high($tmp3$$XMMRegister, $src2$$XMMRegister);
     __ vpmulld($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1);
-    __ vextracti128h($tmp$$XMMRegister, $tmp3$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $tmp3$$XMMRegister);
     __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0);
     __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
     __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0);
@@ -5386,7 +5386,7 @@
   predicate(UseAVX > 2 && VM_Version::supports_avx512dq());
   match(Set dst (MulReductionVL src1 src2));
   effect(TEMP tmp, TEMP tmp2);
-  format %{ "vextracti128  $tmp,$src2\n\t"
+  format %{ "vextracti128_high  $tmp,$src2\n\t"
             "vpmullq  $tmp2,$tmp,$src2\n\t"
             "pshufd   $tmp,$tmp2,0xE\n\t"
             "vpmullq  $tmp2,$tmp2,$tmp\n\t"
@@ -5394,7 +5394,7 @@
             "vpmullq  $tmp2,$tmp2,$tmp\n\t"
             "movdq    $dst,$tmp2\t! mul reduction4L" %}
   ins_encode %{
-    __ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
     __ vpmullq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@@ -5409,9 +5409,9 @@
   predicate(UseAVX > 2 && VM_Version::supports_avx512dq());
   match(Set dst (MulReductionVL src1 src2));
   effect(TEMP tmp, TEMP tmp2);
-  format %{ "vextracti64x4  $tmp2,$src2,0x1\n\t"
+  format %{ "vextracti64x4_high  $tmp2,$src2\n\t"
             "vpmullq  $tmp2,$tmp2,$src2\n\t"
-            "vextracti128   $tmp,$tmp2\n\t"
+            "vextracti128_high  $tmp,$tmp2\n\t"
             "vpmullq  $tmp2,$tmp2,$tmp\n\t"
             "pshufd   $tmp,$tmp2,0xE\n\t"
             "vpmullq  $tmp2,$tmp2,$tmp\n\t"
@@ -5419,9 +5419,9 @@
             "vpmullq  $tmp2,$tmp2,$tmp\n\t"
             "movdq    $dst,$tmp2\t! mul reduction8L" %}
   ins_encode %{
-    __ vextracti64x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 1);
+    __ vextracti64x4_high($tmp2$$XMMRegister, $src2$$XMMRegister);
     __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1);
-    __ vextracti128h($tmp$$XMMRegister, $tmp2$$XMMRegister);
+    __ vextracti128_high($tmp$$XMMRegister, $tmp2$$XMMRegister);
     __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@@ -5520,7 +5520,7 @@
             "vmulss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$src2,0x03\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
-            "vextractf128  $tmp2,$src2\n\t"
+            "vextractf128_high  $tmp2,$src2\n\t"
             "vmulss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
@@ -5536,7 +5536,7 @@
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf128h($tmp2$$XMMRegister, $src2$$XMMRegister);
+    __ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5559,7 +5559,7 @@
             "vmulss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$src2,0x03\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x1\n\t"
+            "vextractf32x4  $tmp2,$src2,0x1\n\t"
             "vmulss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
@@ -5567,7 +5567,7 @@
             "vmulss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$tmp2,0x03\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x2\n\t"
+            "vextractf32x4  $tmp2,$src2,0x2\n\t"
             "vmulss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
@@ -5575,7 +5575,7 @@
             "vmulss  $dst,$dst,$tmp\n\t"
             "pshufd  $tmp,$tmp2,0x03\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x3\n\t"
+            "vextractf32x4  $tmp2,$src2,0x3\n\t"
             "vmulss  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0x01\n\t"
             "vmulss  $dst,$dst,$tmp\n\t"
@@ -5591,7 +5591,7 @@
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5599,7 +5599,7 @@
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5607,7 +5607,7 @@
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
     __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5656,7 +5656,7 @@
   format %{ "vmulsd  $dst,$dst,$src2\n\t"
             "pshufd  $tmp,$src2,0xE\n\t"
             "vmulsd  $dst,$dst,$tmp\n\t"
-            "vextractf128  $tmp2,$src2\n\t"
+            "vextractf128_high  $tmp2,$src2\n\t"
             "vmulsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vmulsd  $dst,$dst,$tmp\t! mul reduction4D" %}
@@ -5664,7 +5664,7 @@
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf128h($tmp2$$XMMRegister, $src2$$XMMRegister);
+    __ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@@ -5679,15 +5679,15 @@
   format %{ "vmulsd  $dst,$dst,$src2\n\t"
             "pshufd  $tmp,$src2,0xE\n\t"
             "vmulsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x1\n\t"
+            "vextractf32x4  $tmp2,$src2,0x1\n\t"
             "vmulsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$src2,0xE\n\t"
             "vmulsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x2\n\t"
+            "vextractf32x4  $tmp2,$src2,0x2\n\t"
             "vmulsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vmulsd  $dst,$dst,$tmp\n\t"
-            "vextractf32x4  $tmp2,$src2, 0x3\n\t"
+            "vextractf32x4  $tmp2,$src2,0x3\n\t"
             "vmulsd  $dst,$dst,$tmp2\n\t"
             "pshufd  $tmp,$tmp2,0xE\n\t"
             "vmulsd  $dst,$dst,$tmp\t! mul reduction8D" %}
@@ -5695,15 +5695,15 @@
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
-    __ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
+    __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
     __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
     __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Tue Apr 05 15:39:34 2016 -0400
@@ -1420,9 +1420,6 @@
 // The ecx parameter to rep stos for the ClearArray node is in dwords.
 const bool Matcher::init_array_count_is_in_bytes = false;
 
-// Threshold size for cleararray.
-const int Matcher::init_array_short_size = 8 * BytesPerLong;
-
 // Needs 2 CMOV's for longs.
 const int Matcher::long_cmove_cost() { return 1; }
 
@@ -11369,27 +11366,54 @@
 // =======================================================================
 // fast clearing of an array
 instruct rep_stos(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
-  predicate(!UseFastStosb);
+  predicate(!((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
-  format %{ "XOR    EAX,EAX\t# ClearArray:\n\t"
-            "SHL    ECX,1\t# Convert doublewords to words\n\t"
-            "REP STOS\t# store EAX into [EDI++] while ECX--" %}
-  ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct rep_fast_stosb(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
-  predicate(UseFastStosb);
+
+  format %{ $$template
+    $$emit$$"XOR    EAX,EAX\t# ClearArray:\n\t"
+    $$emit$$"CMP    InitArrayShortSize,rcx\n\t"
+    $$emit$$"JG     LARGE\n\t"
+    $$emit$$"SHL    ECX, 1\n\t"
+    $$emit$$"DEC    ECX\n\t"
+    $$emit$$"JS     DONE\t# Zero length\n\t"
+    $$emit$$"MOV    EAX,(EDI,ECX,4)\t# LOOP\n\t"
+    $$emit$$"DEC    ECX\n\t"
+    $$emit$$"JGE    LOOP\n\t"
+    $$emit$$"JMP    DONE\n\t"
+    $$emit$$"# LARGE:\n\t"
+    if (UseFastStosb) {
+       $$emit$$"SHL    ECX,3\t# Convert doublewords to bytes\n\t"
+       $$emit$$"REP STOSB\t# store EAX into [EDI++] while ECX--\n\t"
+    } else {
+       $$emit$$"SHL    ECX,1\t# Convert doublewords to words\n\t"
+       $$emit$$"REP STOS\t# store EAX into [EDI++] while ECX--\n\t"
+    }
+    $$emit$$"# DONE"
+  %}
+  ins_encode %{
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, false);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct rep_stos_large(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
+  predicate(((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
-  format %{ "XOR    EAX,EAX\t# ClearArray:\n\t"
-            "SHL    ECX,3\t# Convert doublewords to bytes\n\t"
-            "REP STOSB\t# store EAX into [EDI++] while ECX--" %}
-  ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
+  format %{ $$template
+    $$emit$$"XOR    EAX,EAX\t# ClearArray:\n\t"
+    if (UseFastStosb) {
+       $$emit$$"SHL    ECX,3\t# Convert doublewords to bytes\n\t"
+       $$emit$$"REP STOSB\t# store EAX into [EDI++] while ECX--\n\t"
+    } else {
+       $$emit$$"SHL    ECX,1\t# Convert doublewords to words\n\t"
+       $$emit$$"REP STOS\t# store EAX into [EDI++] while ECX--\n\t"
+    }
+    $$emit$$"# DONE"
+  %}
+  ins_encode %{
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, true);
   %}
   ins_pipe( pipe_slow );
 %}
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Tue Apr 05 15:39:34 2016 -0400
@@ -1637,9 +1637,6 @@
 // The ecx parameter to rep stosq for the ClearArray node is in words.
 const bool Matcher::init_array_count_is_in_bytes = false;
 
-// Threshold size for cleararray.
-const int Matcher::init_array_short_size = 8 * BytesPerLong;
-
 // No additional cost for CMOVL.
 const int Matcher::long_cmove_cost() { return 0; }
 
@@ -10460,31 +10457,55 @@
 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
                   rFlagsReg cr)
 %{
-  predicate(!UseFastStosb);
+  predicate(!((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
 
-  format %{ "xorq    rax, rax\t# ClearArray:\n\t"
-            "rep     stosq\t# Store rax to *rdi++ while rcx--" %}
-  ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
+  format %{ $$template
+    $$emit$$"xorq    rax, rax\t# ClearArray:\n\t"
+    $$emit$$"cmp     InitArrayShortSize,rcx\n\t"
+    $$emit$$"jg      LARGE\n\t"
+    $$emit$$"dec     rcx\n\t"
+    $$emit$$"js      DONE\t# Zero length\n\t"
+    $$emit$$"mov     rax,(rdi,rcx,8)\t# LOOP\n\t"
+    $$emit$$"dec     rcx\n\t"
+    $$emit$$"jge     LOOP\n\t"
+    $$emit$$"jmp     DONE\n\t"
+    $$emit$$"# LARGE:\n\t"
+    if (UseFastStosb) {
+       $$emit$$"shlq    rcx,3\t# Convert doublewords to bytes\n\t"
+       $$emit$$"rep     stosb\t# Store rax to *rdi++ while rcx--\n\t"
+    } else {
+       $$emit$$"rep     stosq\t# Store rax to *rdi++ while rcx--\n\t"
+    }
+    $$emit$$"# DONE"
+  %}
+  ins_encode %{
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, false);
   %}
   ins_pipe(pipe_slow);
 %}
 
-instruct rep_fast_stosb(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
-                        rFlagsReg cr)
-%{
-  predicate(UseFastStosb);
+instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
+                  rFlagsReg cr)
+%{
+  predicate(((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
-  format %{ "xorq    rax, rax\t# ClearArray:\n\t"
-            "shlq    rcx,3\t# Convert doublewords to bytes\n\t"
-            "rep     stosb\t# Store rax to *rdi++ while rcx--" %}
-  ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
-  %}
-  ins_pipe( pipe_slow );
+
+  format %{ $$template
+    $$emit$$"xorq    rax, rax\t# ClearArray:\n\t"
+    if (UseFastStosb) {
+       $$emit$$"shlq    rcx,3\t# Convert doublewords to bytes\n\t"
+       $$emit$$"rep     stosb\t# Store rax to *rdi++ while rcx--"
+    } else {
+       $$emit$$"rep     stosq\t# Store rax to *rdi++ while rcx--"
+    }
+  %}
+  ins_encode %{
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, true);
+  %}
+  ins_pipe(pipe_slow);
 %}
 
 instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.amd64/src/jdk/vm/ci/amd64/AMD64.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.amd64/src/jdk/vm/ci/amd64/AMD64.java	Tue Apr 05 15:39:34 2016 -0400
@@ -203,7 +203,8 @@
         AVX512ER,
         AVX512CD,
         AVX512BW,
-        AVX512VL
+        AVX512VL,
+        SHA
     }
 
     private final EnumSet<CPUFeature> features;
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.amd64/src/jdk/vm/ci/hotspot/amd64/AMD64HotSpotJVMCIBackendFactory.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.amd64/src/jdk/vm/ci/hotspot/amd64/AMD64HotSpotJVMCIBackendFactory.java	Tue Apr 05 15:39:34 2016 -0400
@@ -122,6 +122,9 @@
         if ((config.vmVersionFeatures & config.amd64AVX512VL) != 0) {
             features.add(AMD64.CPUFeature.AVX512VL);
         }
+        if ((config.vmVersionFeatures & config.amd64SHA) != 0) {
+            features.add(AMD64.CPUFeature.SHA);
+        }
         return features;
     }
 
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantReflectionProvider.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantReflectionProvider.java	Tue Apr 05 15:39:34 2016 -0400
@@ -339,7 +339,7 @@
 
     public JavaConstant readStableFieldValue(ResolvedJavaField field, JavaConstant receiver, boolean isDefaultStable) {
         JavaConstant fieldValue = readNonStableFieldValue(field, receiver);
-        if (fieldValue.isNonNull()) {
+        if (fieldValue != null && fieldValue.isNonNull()) {
             JavaType declaredType = field.getType();
             if (declaredType.getComponentType() != null) {
                 int stableDimension = getArrayDimension(declaredType);
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCICompilerConfig.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCICompilerConfig.java	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 import jdk.vm.ci.code.CompilationRequest;
 import jdk.vm.ci.code.CompilationRequestResult;
 import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.Option;
 import jdk.vm.ci.runtime.JVMCICompiler;
 import jdk.vm.ci.runtime.JVMCICompilerFactory;
 import jdk.vm.ci.runtime.JVMCIRuntime;
@@ -47,29 +48,33 @@
         }
     }
 
+    /**
+     * Factory of the selected system compiler.
+     */
     private static JVMCICompilerFactory compilerFactory;
 
     /**
-     * Selects the system compiler.
+     * Gets the selected system compiler factory.
      *
-     * Called from VM. This method has an object return type to allow it to be called with a VM
-     * utility function used to call other static initialization methods.
+     * @return the selected system compiler factory
      */
-    static Boolean selectCompiler(String compilerName) {
-        assert compilerFactory == null;
-        for (JVMCICompilerFactory factory : Services.load(JVMCICompilerFactory.class)) {
-            if (factory.getCompilerName().equals(compilerName)) {
-                compilerFactory = factory;
-                return Boolean.TRUE;
-            }
-        }
-
-        throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
-    }
-
     static JVMCICompilerFactory getCompilerFactory() {
         if (compilerFactory == null) {
-            compilerFactory = new DummyCompilerFactory();
+            JVMCICompilerFactory factory = null;
+            String compilerName = Option.Compiler.getString();
+            if (compilerName != null) {
+                for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
+                    if (f.getCompilerName().equals(compilerName)) {
+                        factory = f;
+                    }
+                }
+                if (factory == null) {
+                    throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
+                }
+            } else {
+                factory = new DummyCompilerFactory();
+            }
+            compilerFactory = factory;
         }
         return compilerFactory;
     }
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java	Tue Apr 05 15:39:34 2016 -0400
@@ -91,6 +91,7 @@
      * A list of all supported JVMCI options.
      */
     public enum Option {
+        Compiler(String.class, null, "Selects the system compiler."),
         ImplicitStableValues(boolean.class, true, "Mark well-known stable fields as such."),
         // Note: The following one is not used (see InitTimer.ENABLED).
         InitTimer(boolean.class, false, "Specifies if initialization timing is enabled."),
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMetaAccessProvider.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMetaAccessProvider.java	Tue Apr 05 15:39:34 2016 -0400
@@ -41,7 +41,6 @@
 import jdk.vm.ci.meta.DeoptimizationReason;
 import jdk.vm.ci.meta.JavaConstant;
 import jdk.vm.ci.meta.JavaKind;
-import jdk.vm.ci.meta.JavaType;
 import jdk.vm.ci.meta.MetaAccessProvider;
 import jdk.vm.ci.meta.ResolvedJavaField;
 import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -111,23 +110,26 @@
     }
 
     public ResolvedJavaField lookupJavaField(Field reflectionField) {
-        String name = reflectionField.getName();
         Class<?> fieldHolder = reflectionField.getDeclaringClass();
-        Class<?> fieldType = reflectionField.getType();
-        // java.lang.reflect.Field's modifiers should be enough here since VM internal modifier bits
-        // are not used (yet).
-        final int modifiers = reflectionField.getModifiers();
-        final long offset = Modifier.isStatic(modifiers) ? UNSAFE.staticFieldOffset(reflectionField) : UNSAFE.objectFieldOffset(reflectionField);
 
         HotSpotResolvedObjectType holder = fromObjectClass(fieldHolder);
-        JavaType type = runtime.fromClass(fieldType);
+        if (Modifier.isStatic(reflectionField.getModifiers())) {
+            final long offset = UNSAFE.staticFieldOffset(reflectionField);
+            for (ResolvedJavaField field : holder.getStaticFields()) {
+                if (offset == ((HotSpotResolvedJavaField) field).offset()) {
+                    return field;
+                }
+            }
+        } else {
+            final long offset = UNSAFE.objectFieldOffset(reflectionField);
+            for (ResolvedJavaField field : holder.getInstanceFields(false)) {
+                if (offset == ((HotSpotResolvedJavaField) field).offset()) {
+                    return field;
+                }
+            }
+        }
 
-        if (offset != -1) {
-            HotSpotResolvedObjectType resolved = holder;
-            return resolved.createField(name, type, offset, modifiers);
-        } else {
-            throw new JVMCIError("unresolved field %s", reflectionField);
-        }
+        throw new JVMCIError("unresolved field %s", reflectionField);
     }
 
     private static int intMaskRight(int n) {
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Tue Apr 05 15:39:34 2016 -0400
@@ -945,6 +945,7 @@
     @HotSpotVMConstant(name = "VM_Version::CPU_AVX512CD", archs = {"amd64"}) @Stable public long amd64AVX512CD;
     @HotSpotVMConstant(name = "VM_Version::CPU_AVX512BW", archs = {"amd64"}) @Stable public long amd64AVX512BW;
     @HotSpotVMConstant(name = "VM_Version::CPU_AVX512VL", archs = {"amd64"}) @Stable public long amd64AVX512VL;
+    @HotSpotVMConstant(name = "VM_Version::CPU_SHA", archs = {"amd64"}) @Stable public long amd64SHA;
 
     // SPARC specific values
     @HotSpotVMConstant(name = "VM_Version::vis3_instructions_m", archs = {"sparc"}) @Stable public int sparcVis3Instructions;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -144,6 +144,7 @@
 int os::Linux::_page_size = -1;
 const int os::Linux::_vm_default_page_size = (8 * K);
 bool os::Linux::_supports_fast_thread_cpu_time = false;
+uint32_t os::Linux::_os_version = 0;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
 pthread_condattr_t os::Linux::_condattr[1];
@@ -4356,6 +4357,48 @@
   return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
 }
 
+void os::Linux::initialize_os_info() {
+  assert(_os_version == 0, "OS info already initialized");
+
+  struct utsname _uname;
+
+  uint32_t major;
+  uint32_t minor;
+  uint32_t fix;
+
+  int rc;
+
+  // Kernel version is unknown if
+  // verification below fails.
+  _os_version = 0x01000000;
+
+  rc = uname(&_uname);
+  if (rc != -1) {
+
+    rc = sscanf(_uname.release,"%d.%d.%d", &major, &minor, &fix);
+    if (rc == 3) {
+
+      if (major < 256 && minor < 256 && fix < 256) {
+        // Kernel version format is as expected,
+        // set it overriding unknown state.
+        _os_version = (major << 16) |
+                      (minor << 8 ) |
+                      (fix   << 0 ) ;
+      }
+    }
+  }
+}
+
+uint32_t os::Linux::os_version() {
+  assert(_os_version != 0, "not initialized");
+  return _os_version & 0x00FFFFFF;
+}
+
+bool os::Linux::os_version_is_known() {
+  assert(_os_version != 0, "not initialized");
+  return _os_version & 0x01000000 ? false : true;
+}
+
 /////
 // glibc on Linux platform uses non-documented flag
 // to indicate, that some special sort of signal
@@ -4578,6 +4621,8 @@
 
   Linux::initialize_system_info();
 
+  Linux::initialize_os_info();
+
   // main_thread points to the aboriginal thread
   Linux::_main_thread = pthread_self();
 
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -56,6 +56,15 @@
 
   static GrowableArray<int>* _cpu_to_node;
 
+  // 0x00000000 = uninitialized,
+  // 0x01000000 = kernel version unknown,
+  // otherwise a 32-bit number:
+  // Ox00AABBCC
+  // AA, Major Version
+  // BB, Minor Version
+  // CC, Fix   Version
+  static uint32_t _os_version;
+
  protected:
 
   static julong _physical_memory;
@@ -198,6 +207,10 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
+  static void initialize_os_info();
+  static bool os_version_is_known();
+  static uint32_t os_version();
+
   // pthread_cond clock suppport
  private:
   static pthread_condattr_t _condattr[1];
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -336,13 +336,13 @@
   const char *start;
 
   if (lib_name != NULL) {
-    len = name_len = strlen(lib_name);
+    name_len = strlen(lib_name);
     if (is_absolute_path) {
       // Need to strip path, prefix and suffix
       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
         lib_name = ++start;
       }
-      if (len <= (prefix_len + suffix_len)) {
+      if (strlen(lib_name) <= (prefix_len + suffix_len)) {
         return NULL;
       }
       lib_name += prefix_len;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -951,11 +951,11 @@
     FILETIME wt;
     GetSystemTimeAsFileTime(&wt);
     jlong rtc_millis = windows_to_java_time(wt);
-    jlong user_millis = windows_to_java_time(user_time);
-    jlong system_millis = windows_to_java_time(kernel_time);
     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
-    *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
-    *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
+    *process_user_time =
+      (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
+    *process_system_time =
+      (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
     return true;
   } else {
     return false;
--- a/hotspot/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.inline.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.inline.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -26,44 +26,108 @@
 #ifndef OS_CPU_LINUX_AARCH64_VM_COPY_LINUX_AARCH64_INLINE_HPP
 #define OS_CPU_LINUX_AARCH64_VM_COPY_LINUX_AARCH64_INLINE_HPP
 
+#define COPY_SMALL(from, to, count)                                     \
+{                                                                       \
+        long tmp0, tmp1, tmp2, tmp3;                                    \
+        long tmp4, tmp5, tmp6, tmp7;                                    \
+  __asm volatile(                                                       \
+"       adr     %[t0], 0f;"                                             \
+"       add     %[t0], %[t0], %[cnt], lsl #5;"                          \
+"       br      %[t0];"                                                 \
+"       .align  5;"                                                     \
+"0:"                                                                    \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldr     %[t0], [%[s], #0];"                                     \
+"       str     %[t0], [%[d], #0];"                                     \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldp     %[t0], %[t1], [%[s], #0];"                              \
+"       stp     %[t0], %[t1], [%[d], #0];"                              \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldp     %[t0], %[t1], [%[s], #0];"                              \
+"       ldr     %[t2], [%[s], #16];"                                    \
+"       stp     %[t0], %[t1], [%[d], #0];"                              \
+"       str     %[t2], [%[d], #16];"                                    \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldp     %[t0], %[t1], [%[s], #0];"                              \
+"       ldp     %[t2], %[t3], [%[s], #16];"                             \
+"       stp     %[t0], %[t1], [%[d], #0];"                              \
+"       stp     %[t2], %[t3], [%[d], #16];"                             \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldp     %[t0], %[t1], [%[s], #0];"                              \
+"       ldp     %[t2], %[t3], [%[s], #16];"                             \
+"       ldr     %[t4], [%[s], #32];"                                    \
+"       stp     %[t0], %[t1], [%[d], #0];"                              \
+"       stp     %[t2], %[t3], [%[d], #16];"                             \
+"       str     %[t4], [%[d], #32];"                                    \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldp     %[t0], %[t1], [%[s], #0];"                              \
+"       ldp     %[t2], %[t3], [%[s], #16];"                             \
+"       ldp     %[t4], %[t5], [%[s], #32];"                             \
+"2:"                                                                    \
+"       stp     %[t0], %[t1], [%[d], #0];"                              \
+"       stp     %[t2], %[t3], [%[d], #16];"                             \
+"       stp     %[t4], %[t5], [%[d], #32];"                             \
+"       b       1f;"                                                    \
+"       .align  5;"                                                     \
+"       ldr     %[t6], [%[s], #0];"                                     \
+"       ldp     %[t0], %[t1], [%[s], #8];"                              \
+"       ldp     %[t2], %[t3], [%[s], #24];"                             \
+"       ldp     %[t4], %[t5], [%[s], #40];"                             \
+"       str     %[t6], [%[d]], #8;"                                     \
+"       b       2b;"                                                    \
+"       .align  5;"                                                     \
+"       ldp     %[t0], %[t1], [%[s], #0];"                              \
+"       ldp     %[t2], %[t3], [%[s], #16];"                             \
+"       ldp     %[t4], %[t5], [%[s], #32];"                             \
+"       ldp     %[t6], %[t7], [%[s], #48];"                             \
+"       stp     %[t0], %[t1], [%[d], #0];"                              \
+"       stp     %[t2], %[t3], [%[d], #16];"                             \
+"       stp     %[t4], %[t5], [%[d], #32];"                             \
+"       stp     %[t6], %[t7], [%[d], #48];"                             \
+"1:"                                                                    \
+                                                                        \
+  : [s]"+r"(from), [d]"+r"(to), [cnt]"+r"(count),                       \
+    [t0]"=&r"(tmp0), [t1]"=&r"(tmp1), [t2]"=&r"(tmp2), [t3]"=&r"(tmp3), \
+    [t4]"=&r"(tmp4), [t5]"=&r"(tmp5), [t6]"=&r"(tmp6), [t7]"=&r"(tmp7)  \
+  :                                                                     \
+  : "memory", "cc");                                                    \
+}
+
 static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
-  (void)memmove(to, from, count * HeapWordSize);
+  __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
+  if (__builtin_expect(count <= 8, 1)) {
+    COPY_SMALL(from, to, count);
+    return;
+  }
+  _Copy_conjoint_words(from, to, count);
 }
 
 static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
-  switch (count) {
-  case 8:  to[7] = from[7];
-  case 7:  to[6] = from[6];
-  case 6:  to[5] = from[5];
-  case 5:  to[4] = from[4];
-  case 4:  to[3] = from[3];
-  case 3:  to[2] = from[2];
-  case 2:  to[1] = from[1];
-  case 1:  to[0] = from[0];
-  case 0:  break;
-  default:
-    (void)memcpy(to, from, count * HeapWordSize);
-    break;
+  if (__builtin_constant_p(count)) {
+    memcpy(to, from, count * sizeof(HeapWord));
+    return;
   }
+  __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
+  if (__builtin_expect(count <= 8, 1)) {
+    COPY_SMALL(from, to, count);
+    return;
+  }
+  _Copy_disjoint_words(from, to, count);
 }
 
 static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
-  switch (count) {
-  case 8:  to[7] = from[7];
-  case 7:  to[6] = from[6];
-  case 6:  to[5] = from[5];
-  case 5:  to[4] = from[4];
-  case 4:  to[3] = from[3];
-  case 3:  to[2] = from[2];
-  case 2:  to[1] = from[1];
-  case 1:  to[0] = from[0];
-  case 0:  break;
-  default:
-    while (count-- > 0) {
-      *to++ = *from++;
-    }
-    break;
+  __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
+  if (__builtin_expect(count <= 8, 1)) {
+    COPY_SMALL(from, to, count);
+    return;
   }
+  _Copy_disjoint_words(from, to, count);
 }
 
 static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.s	Tue Apr 05 15:39:34 2016 -0400
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+        .global _Copy_conjoint_words
+        .global _Copy_disjoint_words
+
+s       .req    x0
+d       .req    x1
+count   .req    x2
+t0      .req    x3
+t1      .req    x4
+t2      .req    x5
+t3      .req    x6
+t4      .req    x7
+t5      .req    x8
+t6      .req    x9
+t7      .req    x10
+
+        .align  6
+_Copy_disjoint_words:
+        // Ensure 2 word aligned
+        tbz     s, #3, fwd_copy_aligned
+        ldr     t0, [s], #8
+        str     t0, [d], #8
+        sub     count, count, #1
+
+fwd_copy_aligned:
+        // Bias s & d so we only pre index on the last copy
+        sub     s, s, #16
+        sub     d, d, #16
+
+        ldp     t0, t1, [s, #16]
+        ldp     t2, t3, [s, #32]
+        ldp     t4, t5, [s, #48]
+        ldp     t6, t7, [s, #64]!
+
+        subs    count, count, #16
+        blo     fwd_copy_drain
+
+fwd_copy_again:
+        prfm    pldl1keep, [s, #256]
+        stp     t0, t1, [d, #16]
+        ldp     t0, t1, [s, #16]
+        stp     t2, t3, [d, #32]
+        ldp     t2, t3, [s, #32]
+        stp     t4, t5, [d, #48]
+        ldp     t4, t5, [s, #48]
+        stp     t6, t7, [d, #64]!
+        ldp     t6, t7, [s, #64]!
+        subs    count, count, #8
+        bhs     fwd_copy_again
+
+fwd_copy_drain:
+        stp     t0, t1, [d, #16]
+        stp     t2, t3, [d, #32]
+        stp     t4, t5, [d, #48]
+        stp     t6, t7, [d, #64]!
+
+        // count is now -8..-1 for 0..7 words to copy
+        adr     t0, 0f
+        add     t0, t0, count, lsl #5
+        br      t0
+
+        .align  5
+        ret                             // -8 == 0 words
+        .align  5
+        ldr     t0, [s, #16]            // -7 == 1 word
+        str     t0, [d, #16]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #16]        // -6 = 2 words
+        stp     t0, t1, [d, #16]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #16]        // -5 = 3 words
+        ldr     t2, [s, #32]
+        stp     t0, t1, [d, #16]
+        str     t2, [d, #32]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #16]        // -4 = 4 words
+        ldp     t2, t3, [s, #32]
+        stp     t0, t1, [d, #16]
+        stp     t2, t3, [d, #32]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #16]        // -3 = 5 words
+        ldp     t2, t3, [s, #32]
+        ldr     t4, [s, #48]
+        stp     t0, t1, [d, #16]
+        stp     t2, t3, [d, #32]
+        str     t4, [d, #48]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #16]        // -2 = 6 words
+        ldp     t2, t3, [s, #32]
+        ldp     t4, t5, [s, #48]
+        stp     t0, t1, [d, #16]
+        stp     t2, t3, [d, #32]
+        stp     t4, t5, [d, #48]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #16]        // -1 = 7 words
+        ldp     t2, t3, [s, #32]
+        ldp     t4, t5, [s, #48]
+        ldr     t6, [s, #64]
+        stp     t0, t1, [d, #16]
+        stp     t2, t3, [d, #32]
+        stp     t4, t5, [d, #48]
+        str     t6, [d, #64]
+        // Is always aligned here, code for 7 words is one instruction
+        // too large so it just falls through.
+        .align  5
+0:
+        ret
+
+        .align  6
+_Copy_conjoint_words:
+        sub     t0, d, s
+        cmp     t0, count, lsl #3
+        bhs     _Copy_disjoint_words
+
+        add     s, s, count, lsl #3
+        add     d, d, count, lsl #3
+
+        // Ensure 2 word aligned
+        tbz     s, #3, bwd_copy_aligned
+        ldr     t0, [s, #-8]!
+        str     t0, [d, #-8]!
+        sub     count, count, #1
+
+bwd_copy_aligned:
+        ldp     t0, t1, [s, #-16]
+        ldp     t2, t3, [s, #-32]
+        ldp     t4, t5, [s, #-48]
+        ldp     t6, t7, [s, #-64]!
+
+        subs    count, count, #16
+        blo     bwd_copy_drain
+
+bwd_copy_again:
+        prfm    pldl1keep, [s, #-256]
+        stp     t0, t1, [d, #-16]
+        ldp     t0, t1, [s, #-16]
+        stp     t2, t3, [d, #-32]
+        ldp     t2, t3, [s, #-32]
+        stp     t4, t5, [d, #-48]
+        ldp     t4, t5, [s, #-48]
+        stp     t6, t7, [d, #-64]!
+        ldp     t6, t7, [s, #-64]!
+        subs    count, count, #8
+        bhs     bwd_copy_again
+
+bwd_copy_drain:
+        stp     t0, t1, [d, #-16]
+        stp     t2, t3, [d, #-32]
+        stp     t4, t5, [d, #-48]
+        stp     t6, t7, [d, #-64]!
+
+        // count is now -8..-1 for 0..7 words to copy
+        adr     t0, 0f
+        add     t0, t0, count, lsl #5
+        br      t0
+
+        .align  5
+        ret                             // -8 == 0 words
+        .align  5
+        ldr     t0, [s, #-8]            // -7 == 1 word
+        str     t0, [d, #-8]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #-16]       // -6 = 2 words
+        stp     t0, t1, [d, #-16]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #-16]       // -5 = 3 words
+        ldr     t2, [s, #-24]
+        stp     t0, t1, [d, #-16]
+        str     t2, [d, #-24]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #-16]       // -4 = 4 words
+        ldp     t2, t3, [s, #-32]
+        stp     t0, t1, [d, #-16]
+        stp     t2, t3, [d, #-32]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #-16]       // -3 = 5 words
+        ldp     t2, t3, [s, #-32]
+        ldr     t4, [s, #-40]
+        stp     t0, t1, [d, #-16]
+        stp     t2, t3, [d, #-32]
+        str     t4, [d, #-40]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #-16]       // -2 = 6 words
+        ldp     t2, t3, [s, #-32]
+        ldp     t4, t5, [s, #-48]
+        stp     t0, t1, [d, #-16]
+        stp     t2, t3, [d, #-32]
+        stp     t4, t5, [d, #-48]
+        ret
+        .align  5
+        ldp     t0, t1, [s, #-16]       // -1 = 7 words
+        ldp     t2, t3, [s, #-32]
+        ldp     t4, t5, [s, #-48]
+        ldr     t6, [s, #-56]
+        stp     t0, t1, [d, #-16]
+        stp     t2, t3, [d, #-32]
+        stp     t4, t5, [d, #-48]
+        str     t6, [d, #-56]
+        // Is always aligned here, code for 7 words is one instruction
+        // too large so it just falls through.
+        .align  5
+0:
+        ret
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -257,7 +257,38 @@
   }
 }
 
-void Canonicalizer::do_LoadIndexed    (LoadIndexed*     x) {}
+void Canonicalizer::do_LoadIndexed    (LoadIndexed*     x) {
+  StableArrayConstant* array = x->array()->type()->as_StableArrayConstant();
+  IntConstant* index = x->index()->type()->as_IntConstant();
+
+  assert(array == NULL || FoldStableValues, "not enabled");
+
+  // Constant fold loads from stable arrays.
+  if (array != NULL && index != NULL) {
+    jint idx = index->value();
+    if (idx < 0 || idx >= array->value()->length()) {
+      // Leave the load as is. The range check will handle it.
+      return;
+    }
+
+    ciConstant field_val = array->value()->element_value(idx);
+    if (!field_val.is_null_or_zero()) {
+      jint dimension = array->dimension();
+      assert(dimension <= array->value()->array_type()->dimension(), "inconsistent info");
+      ValueType* value = NULL;
+      if (dimension > 1) {
+        // Preserve information about the dimension for the element.
+        assert(field_val.as_object()->is_array(), "not an array");
+        value = new StableArrayConstant(field_val.as_object()->as_array(), dimension - 1);
+      } else {
+        assert(dimension == 1, "sanity");
+        value = as_ValueType(field_val);
+      }
+      set_canonical(new Constant(value));
+    }
+  }
+}
+
 void Canonicalizer::do_StoreIndexed   (StoreIndexed*    x) {
   // If a value is going to be stored into a field or array some of
   // the conversions emitted by javac are unneeded because the fields
@@ -471,7 +502,7 @@
     InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
     if (c != NULL && !c->value()->is_null_object()) {
       // ciInstance::java_mirror_type() returns non-NULL only for Java mirrors
-      ciType* t = c->value()->as_instance()->java_mirror_type();
+      ciType* t = c->value()->java_mirror_type();
       if (t->is_klass()) {
         // substitute cls.isInstance(obj) of a constant Class into
         // an InstantOf instruction
@@ -487,6 +518,17 @@
     }
     break;
   }
+  case vmIntrinsics::_isPrimitive        : {
+    assert(x->number_of_arguments() == 1, "wrong type");
+
+    // Class.isPrimitive is known on constant classes:
+    InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
+    if (c != NULL && !c->value()->is_null_object()) {
+      ciType* t = c->value()->java_mirror_type();
+      set_constant(t->is_primitive_type());
+    }
+    break;
+  }
   }
 }
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -148,6 +148,7 @@
   case vmIntrinsics::_longBitsToDouble:
   case vmIntrinsics::_getClass:
   case vmIntrinsics::_isInstance:
+  case vmIntrinsics::_isPrimitive:
   case vmIntrinsics::_currentThread:
   case vmIntrinsics::_dabs:
   case vmIntrinsics::_dsqrt:
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1519,6 +1519,29 @@
   append(new Return(x));
 }
 
+Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) {
+  BasicType field_type = field_value.basic_type();
+  ValueType* value = as_ValueType(field_value);
+
+  // Attach dimension info to stable arrays.
+  if (FoldStableValues &&
+      field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
+    ciArray* array = field_value.as_object()->as_array();
+    jint dimension = field->type()->as_array_klass()->dimension();
+    value = new StableArrayConstant(array, dimension);
+  }
+
+  switch (field_type) {
+    case T_ARRAY:
+    case T_OBJECT:
+      if (field_value.as_object()->should_be_constant()) {
+        return new Constant(value);
+      }
+      return NULL; // Not a constant.
+    default:
+      return new Constant(value);
+  }
+}
 
 void GraphBuilder::access_field(Bytecodes::Code code) {
   bool will_link;
@@ -1563,22 +1586,13 @@
   switch (code) {
     case Bytecodes::_getstatic: {
       // check for compile-time constants, i.e., initialized static final fields
-      Instruction* constant = NULL;
+      Value constant = NULL;
       if (field->is_constant() && !PatchALot) {
-        ciConstant field_val = field->constant_value();
-        BasicType field_type = field_val.basic_type();
-        switch (field_type) {
-        case T_ARRAY:
-        case T_OBJECT:
-          if (field_val.as_object()->should_be_constant()) {
-            constant = new Constant(as_ValueType(field_val));
-          }
-          break;
-
-        default:
-          constant = new Constant(as_ValueType(field_val));
-        }
+        ciConstant field_value = field->constant_value();
         // Stable static fields are checked for non-default values in ciField::initialize_from().
+        assert(!field->is_stable() || !field_value.is_null_or_zero(),
+               "stable static w/ default value shouldn't be a constant");
+        constant = make_constant(field_value, field);
       }
       if (constant != NULL) {
         push(type, append(constant));
@@ -1591,38 +1605,29 @@
       }
       break;
     }
-    case Bytecodes::_putstatic:
-      { Value val = pop(type);
-        if (state_before == NULL) {
-          state_before = copy_state_for_exception();
-        }
-        append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
+    case Bytecodes::_putstatic: {
+      Value val = pop(type);
+      if (state_before == NULL) {
+        state_before = copy_state_for_exception();
       }
+      append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
       break;
+    }
     case Bytecodes::_getfield: {
       // Check for compile-time constants, i.e., trusted final non-static fields.
-      Instruction* constant = NULL;
+      Value constant = NULL;
       obj = apop();
       ObjectType* obj_type = obj->type()->as_ObjectType();
       if (obj_type->is_constant() && !PatchALot) {
         ciObject* const_oop = obj_type->constant_value();
         if (!const_oop->is_null_object() && const_oop->is_loaded()) {
           if (field->is_constant()) {
-            ciConstant field_val = field->constant_value_of(const_oop);
-            BasicType field_type = field_val.basic_type();
-            switch (field_type) {
-            case T_ARRAY:
-            case T_OBJECT:
-              if (field_val.as_object()->should_be_constant()) {
-                constant = new Constant(as_ValueType(field_val));
-              }
-              break;
-            default:
-              constant = new Constant(as_ValueType(field_val));
-            }
-            if (FoldStableValues && field->is_stable() && field_val.is_null_or_zero()) {
+            ciConstant field_value = field->constant_value_of(const_oop);
+            if (FoldStableValues && field->is_stable() && field_value.is_null_or_zero()) {
               // Stable field with default value can't be constant.
               constant = NULL;
+            } else {
+              constant = make_constant(field_value, field);
             }
           } else {
             // For CallSite objects treat the target field as a compile time constant.
@@ -3942,7 +3947,7 @@
 
 
 bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
-  ValueStack* state_before = state()->copy_for_parsing();
+  ValueStack* state_before = copy_state_before();
   vmIntrinsics::ID iid = callee->intrinsic_id();
   switch (iid) {
   case vmIntrinsics::_invokeBasic:
@@ -4032,7 +4037,7 @@
     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
     break;
   }
-  set_state(state_before);
+  set_state(state_before->copy_for_parsing());
   return false;
 }
 
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -276,6 +276,7 @@
   void iterate_all_blocks(bool start_in_current_block_for_inlining = false);
   Dependencies* dependency_recorder() const; // = compilation()->dependencies()
   bool direct_compare(ciKlass* k);
+  Value make_constant(ciConstant value, ciField* field);
 
   void kill_all();
 
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1296,6 +1296,25 @@
   __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
 }
 
+// java.lang.Class::isPrimitive()
+void LIRGenerator::do_isPrimitive(Intrinsic* x) {
+  assert(x->number_of_arguments() == 1, "wrong type");
+
+  LIRItem rcvr(x->argument_at(0), this);
+  rcvr.load_item();
+  LIR_Opr temp = new_register(T_METADATA);
+  LIR_Opr result = rlock_result(x);
+
+  CodeEmitInfo* info = NULL;
+  if (x->needs_null_check()) {
+    info = state_for(x);
+  }
+
+  __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), temp, info);
+  __ cmp(lir_cond_notEqual, temp, LIR_OprFact::intConst(0));
+  __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
+}
+
 
 // Example: Thread.currentThread()
 void LIRGenerator::do_currentThread(Intrinsic* x) {
@@ -3098,6 +3117,7 @@
 
   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
+  case vmIntrinsics::_isPrimitive:    do_isPrimitive(x);   break;
   case vmIntrinsics::_getClass:       do_getClass(x);      break;
   case vmIntrinsics::_currentThread:  do_currentThread(x); break;
 
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -246,6 +246,7 @@
 
   void do_RegisterFinalizer(Intrinsic* x);
   void do_isInstance(Intrinsic* x);
+  void do_isPrimitive(Intrinsic* x);
   void do_getClass(Intrinsic* x);
   void do_currentThread(Intrinsic* x);
   void do_MathIntrinsic(Intrinsic* x);
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -335,6 +335,7 @@
   NOT_PRODUCT(_new_instance_slowcase_cnt++;)
 
   assert(klass->is_klass(), "not a class");
+  Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
   instanceKlassHandle h(thread, klass);
   h->check_valid_for_instantiation(true, CHECK);
   // make sure klass is initialized
@@ -370,6 +371,7 @@
   //       anymore after new_objArray() and no GC can happen before.
   //       (This may have to change if this code changes!)
   assert(array_klass->is_klass(), "not a class");
+  Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
   thread->set_vm_result(obj);
@@ -386,6 +388,7 @@
 
   assert(klass->is_klass(), "not a class");
   assert(rank >= 1, "rank must be nonzero");
+  Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
   thread->set_vm_result(obj);
 JRT_END
--- a/hotspot/src/share/vm/c1/c1_ValueType.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueType.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -45,6 +45,7 @@
 class     ObjectConstant;
 class     ArrayType;
 class       ArrayConstant;
+class         StableArrayConstant;
 class     InstanceType;
 class       InstanceConstant;
 class   MetadataType;
@@ -168,6 +169,7 @@
   virtual MethodConstant*   as_MethodConstant()  { return NULL; }
   virtual MethodDataConstant* as_MethodDataConstant() { return NULL; }
   virtual ArrayConstant*    as_ArrayConstant()   { return NULL; }
+  virtual StableArrayConstant* as_StableArrayConstant()   { return NULL; }
   virtual AddressConstant*  as_AddressConstant() { return NULL; }
 
   // type operations
@@ -355,6 +357,20 @@
   virtual ciType* exact_type() const;
 };
 
+class StableArrayConstant: public ArrayConstant {
+ private:
+  jint _dimension;
+
+ public:
+  StableArrayConstant(ciArray* value, jint dimension) : ArrayConstant(value) {
+    assert(dimension > 0, "not a stable array");
+    _dimension = dimension;
+  }
+
+  jint dimension() const                              { return _dimension; }
+
+  virtual StableArrayConstant* as_StableArrayConstant() { return this; }
+};
 
 class InstanceType: public ObjectType {
  public:
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -81,7 +81,7 @@
 void ciMethodData::load_extra_data() {
   MethodData* mdo = get_MethodData();
 
-  MutexLocker(mdo->extra_data_lock());
+  MutexLocker ml(mdo->extra_data_lock());
 
   // speculative trap entries also hold a pointer to a Method so need to be translated
   DataLayout* dp_src  = mdo->extra_data_base();
@@ -103,16 +103,13 @@
 
     switch(tag) {
     case DataLayout::speculative_trap_data_tag: {
-      ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
-      SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
+      ciSpeculativeTrapData data_dst(dp_dst);
+      SpeculativeTrapData   data_src(dp_src);
 
-      data_dst->translate_from(data_src);
-
-#ifdef ASSERT
-      SpeculativeTrapData* data_src2 = new SpeculativeTrapData(dp_src);
-      assert(data_src2->method() == data_src->method() && data_src2->bci() == data_src->bci(), "entries changed while translating");
-#endif
-
+      { // During translation a safepoint can happen or VM lock can be taken (e.g., Compile_lock).
+        MutexUnlocker ml(mdo->extra_data_lock());
+        data_dst.translate_from(&data_src);
+      }
       break;
     }
     case DataLayout::bit_data_tag:
@@ -120,9 +117,11 @@
     case DataLayout::no_tag:
     case DataLayout::arg_info_data_tag:
       // An empty slot or ArgInfoData entry marks the end of the trap data
-      return;
+      {
+        return; // Need a block to avoid SS compiler bug
+      }
     default:
-      fatal("bad tag = %d", dp_dst->tag());
+      fatal("bad tag = %d", tag);
     }
   }
 }
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1060,14 +1060,15 @@
    do_name(     updateByteBuffer_A_name,                          "updateByteBuffer")                                   \
                                                                                                                         \
   /* support for Unsafe */                                                                                              \
-  do_class(sun_misc_Unsafe,                        "sun/misc/Unsafe")                                                   \
   do_class(jdk_internal_misc_Unsafe,               "jdk/internal/misc/Unsafe")                                          \
                                                                                                                         \
   do_intrinsic(_allocateInstance,         jdk_internal_misc_Unsafe,     allocateInstance_name, allocateInstance_signature, F_RN) \
    do_name(     allocateInstance_name,                                  "allocateInstance")                                      \
    do_signature(allocateInstance_signature,                             "(Ljava/lang/Class;)Ljava/lang/Object;")                 \
+  do_intrinsic(_allocateUninitializedArray, jdk_internal_misc_Unsafe,   allocateUninitializedArray_name, newArray_signature,  F_R) \
+   do_name(     allocateUninitializedArray_name,                        "allocateUninitializedArray0")                           \
   do_intrinsic(_copyMemory,               jdk_internal_misc_Unsafe,     copyMemory_name, copyMemory_signature,         F_RN)     \
-   do_name(     copyMemory_name,                                        "copyMemory")                                            \
+   do_name(     copyMemory_name,                                        "copyMemory0")                                           \
    do_signature(copyMemory_signature,                                   "(Ljava/lang/Object;JLjava/lang/Object;JJ)V")            \
   do_intrinsic(_loadFence,                jdk_internal_misc_Unsafe,     loadFence_name, loadFence_signature,           F_RN)     \
    do_name(     loadFence_name,                                         "loadFence")                                             \
--- a/hotspot/src/share/vm/code/codeCache.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -637,16 +637,19 @@
 }
 
 // Walk the list of methods which might contain non-perm oops.
-void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
+void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
 
   if (UseG1GC) {
     return;
   }
 
+  const bool fix_relocations = f->fix_relocations();
   debug_only(mark_scavenge_root_nmethods());
 
-  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
+  nmethod* prev = NULL;
+  nmethod* cur = scavenge_root_nmethods();
+  while (cur != NULL) {
     debug_only(cur->clear_scavenge_root_marked());
     assert(cur->scavenge_root_not_marked(), "");
     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
@@ -659,6 +662,18 @@
       // Perform cur->oops_do(f), maybe just once per nmethod.
       f->do_code_blob(cur);
     }
+    nmethod* const next = cur->scavenge_root_link();
+    // The scavengable nmethod list must contain all methods with scavengable
+    // oops. It is safe to include more nmethod on the list, but we do not
+    // expect any live non-scavengable nmethods on the list.
+    if (fix_relocations) {
+      if (!is_live || !cur->detect_scavenge_root_oops()) {
+        unlink_scavenge_root_nmethod(cur, prev);
+      } else {
+        prev = cur;
+      }
+    }
+    cur = next;
   }
 
   // Check for stray marks.
@@ -678,6 +693,24 @@
   print_trace("add_scavenge_root", nm);
 }
 
+void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  assert((prev == NULL && scavenge_root_nmethods() == nm) ||
+         (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
+
+  assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
+
+  print_trace("unlink_scavenge_root", nm);
+  if (prev == NULL) {
+    set_scavenge_root_nmethods(nm->scavenge_root_link());
+  } else {
+    prev->set_scavenge_root_link(nm->scavenge_root_link());
+  }
+  nm->set_scavenge_root_link(NULL);
+  nm->clear_on_scavenge_root_list();
+}
+
 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 
@@ -686,20 +719,13 @@
   }
 
   print_trace("drop_scavenge_root", nm);
-  nmethod* last = NULL;
-  nmethod* cur = scavenge_root_nmethods();
-  while (cur != NULL) {
-    nmethod* next = cur->scavenge_root_link();
+  nmethod* prev = NULL;
+  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
     if (cur == nm) {
-      if (last != NULL)
-            last->set_scavenge_root_link(next);
-      else  set_scavenge_root_nmethods(next);
-      nm->set_scavenge_root_link(NULL);
-      nm->clear_on_scavenge_root_list();
+      unlink_scavenge_root_nmethod(cur, prev);
       return;
     }
-    last = cur;
-    cur = next;
+    prev = cur;
   }
   assert(false, "should have been on list");
 }
@@ -728,11 +754,7 @@
     } else {
       // Prune it from the list, so we don't have to look at it any more.
       print_trace("prune_scavenge_root", cur);
-      cur->set_scavenge_root_link(NULL);
-      cur->clear_on_scavenge_root_list();
-      if (last != NULL)
-            last->set_scavenge_root_link(next);
-      else  set_scavenge_root_nmethods(next);
+      unlink_scavenge_root_nmethod(cur, last);
     }
     cur = next;
   }
--- a/hotspot/src/share/vm/code/codeCache.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -116,6 +116,10 @@
   static int    allocated_segments();
   static size_t freelists_length();
 
+  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
+  static void prune_scavenge_root_nmethods();
+  static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev);
+
  public:
   // Initialization
   static void initialize();
@@ -153,13 +157,17 @@
   // to "true" iff some code got unloaded.
   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
-  static void scavenge_root_nmethods_do(CodeBlobClosure* f);
+
+  // Apply f to every live code blob in scavengable nmethods. Prune nmethods
+  // from the list of scavengable nmethods if f->fix_relocations() and a nmethod
+  // no longer has scavengable oops.  If f->fix_relocations(), then f must copy
+  // objects to their new location immediately to avoid fixing nmethods on the
+  // basis of the old object locations.
+  static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
 
   static nmethod* scavenge_root_nmethods()            { return _scavenge_root_nmethods; }
-  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
   static void add_scavenge_root_nmethod(nmethod* nm);
   static void drop_scavenge_root_nmethod(nmethod* nm);
-  static void prune_scavenge_root_nmethods();
 
   // Printing/debugging
   static void print();                           // prints summary
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -369,7 +369,6 @@
   assert(method == NULL ||
          (method->is_native() && bci == 0) ||
          (!method->is_native() && 0 <= bci && bci < method->code_size()) ||
-         (method->is_compiled_lambda_form() && bci == -99) ||  // this might happen in C1
          bci == -1, "illegal bci");
 
   // serialize the locals/expressions/monitors
--- a/hotspot/src/share/vm/code/nmethod.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1381,7 +1381,6 @@
   assert(_method == NULL, "Tautology");
 
   set_osr_link(NULL);
-  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
   NMethodSweeper::report_state_change(this);
 }
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -838,12 +838,8 @@
                                         const methodHandle& hot_method,
                                         int hot_count,
                                         const char* comment,
+                                        bool blocking,
                                         Thread* thread) {
-  // do nothing if compiler thread(s) is not available
-  if (!_initialized) {
-    return;
-  }
-
   guarantee(!method->is_abstract(), "cannot compile abstract methods");
   assert(method->method_holder()->is_instance_klass(),
          "sanity check");
@@ -916,7 +912,6 @@
 
   // Outputs from the following MutexLocker block:
   CompileTask* task     = NULL;
-  bool         blocking = false;
   CompileQueue* queue  = compile_queue(comp_level);
 
   // Acquire our lock.
@@ -946,9 +941,6 @@
       return;
     }
 
-    // Should this thread wait for completion of the compile?
-    blocking = is_compile_blocking();
-
 #if INCLUDE_JVMCI
     if (UseJVMCICompiler) {
       if (blocking) {
@@ -1034,11 +1026,28 @@
   }
 }
 
-
 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
                                        int comp_level,
                                        const methodHandle& hot_method, int hot_count,
                                        const char* comment, Thread* THREAD) {
+  // do nothing if compilebroker is not available
+  if (!_initialized) {
+    return NULL;
+  }
+  AbstractCompiler *comp = CompileBroker::compiler(comp_level);
+  assert(comp != NULL, "Ensure we don't compile before compilebroker init");
+  DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
+  nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, comment, directive, THREAD);
+  DirectivesStack::release(directive);
+  return nm;
+}
+
+nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
+                                         int comp_level,
+                                         const methodHandle& hot_method, int hot_count,
+                                         const char* comment, DirectiveSet* directive,
+                                         Thread* THREAD) {
+
   // make sure arguments make sense
   assert(method->method_holder()->is_instance_klass(), "not an instance method");
   assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
@@ -1051,8 +1060,8 @@
   // lock, make sure that the compilation
   // isn't prohibited in a straightforward way.
   AbstractCompiler *comp = CompileBroker::compiler(comp_level);
-  if (comp == NULL || !comp->can_compile_method(method) ||
-      compilation_is_prohibited(method, osr_bci, comp_level)) {
+  if (!comp->can_compile_method(method) ||
+      compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) {
     return NULL;
   }
 
@@ -1160,7 +1169,7 @@
       CompilationPolicy::policy()->delay_compilation(method());
       return NULL;
     }
-    compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
+    compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, !directive->BackgroundCompilationOption, THREAD);
   }
 
   // return requested nmethod
@@ -1217,7 +1226,7 @@
 // CompileBroker::compilation_is_prohibited
 //
 // See if this compilation is not allowed.
-bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level) {
+bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) {
   bool is_native = method->is_native();
   // Some compilers may not support the compilation of natives.
   AbstractCompiler *comp = compiler(comp_level);
@@ -1235,11 +1244,6 @@
     return true;
   }
 
-  // Breaking the abstraction - directives are only used inside a compilation otherwise.
-  DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
-  bool excluded = directive->ExcludeOption;
-  DirectivesStack::release(directive);
-
   // The method may be explicitly excluded by the user.
   double scale;
   if (excluded || (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
@@ -1304,16 +1308,6 @@
   return assign_compile_id(method, osr_bci);
 }
 
-/**
- * Should the current thread block until this compilation request
- * has been fulfilled?
- */
-bool CompileBroker::is_compile_blocking() {
-  assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
-  return !BackgroundCompilation;
-}
-
-
 // ------------------------------------------------------------------
 // CompileBroker::preload_classes
 void CompileBroker::preload_classes(const methodHandle& method, TRAPS) {
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,8 +222,7 @@
   static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
   static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (const methodHandle& method, int osr_bci, int comp_level);
-  static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level);
-  static bool is_compile_blocking();
+  static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded);
   static void preload_classes          (const methodHandle& method, TRAPS);
 
   static CompileTask* create_compile_task(CompileQueue*       queue,
@@ -253,6 +252,7 @@
                                   const methodHandle& hot_method,
                                   int hot_count,
                                   const char* comment,
+                                  bool blocking,
                                   Thread* thread);
 
   static CompileQueue* compile_queue(int comp_level);
@@ -291,6 +291,15 @@
                                  int hot_count,
                                  const char* comment, Thread* thread);
 
+  static nmethod* compile_method(const methodHandle& method,
+                                   int osr_bci,
+                                   int comp_level,
+                                   const methodHandle& hot_method,
+                                   int hot_count,
+                                   const char* comment,
+                                   DirectiveSet* directive,
+                                   Thread* thread);
+
   // Acquire any needed locks and assign a compile id
   static uint assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci);
 
--- a/hotspot/src/share/vm/compiler/compilerDirectives.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/compiler/compilerDirectives.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -472,9 +472,12 @@
   _depth++;
 }
 
-void DirectivesStack::pop() {
+void DirectivesStack::pop(int count) {
   MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
-  pop_inner();
+  assert(count > -1, "No negative values");
+  for (int i = 0; i < count; i++) {
+    pop_inner();
+  }
 }
 
 void DirectivesStack::pop_inner() {
--- a/hotspot/src/share/vm/compiler/compilerDirectives.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/compiler/compilerDirectives.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,7 @@
     cflags(PrintAssembly,           bool, PrintAssembly, PrintAssembly) \
     cflags(PrintInlining,           bool, PrintInlining, PrintInlining) \
     cflags(PrintNMethods,           bool, PrintNMethods, PrintNMethods) \
+    cflags(BackgroundCompilation,   bool, BackgroundCompilation, BackgroundCompilation) \
     cflags(ReplayInline,            bool, false, ReplayInline) \
     cflags(DumpReplay,              bool, false, DumpReplay) \
     cflags(DumpInline,              bool, false, DumpInline) \
@@ -87,7 +88,7 @@
   static DirectiveSet* getMatchingDirective(methodHandle mh, AbstractCompiler* comp);
   static DirectiveSet* getDefaultDirective(AbstractCompiler* comp);
   static void push(CompilerDirectives* directive);
-  static void pop();
+  static void pop(int count);
   static bool check_capacity(int request_size, outputStream* st);
   static void clear();
   static void print(outputStream* st);
--- a/hotspot/src/share/vm/compiler/directivesParser.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/compiler/directivesParser.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@
   assert(_tmp_depth == 0, "Consistency");
 }
 
-bool DirectivesParser::parse_string(const char* text, outputStream* st) {
+int DirectivesParser::parse_string(const char* text, outputStream* st) {
   DirectivesParser cd(text, st);
   if (cd.valid()) {
     return cd.install_directives();
@@ -63,7 +63,7 @@
     cd.clean_tmp();
     st->flush();
     st->print_cr("Parsing of compiler directives failed");
-    return false;
+    return -1;
   }
 }
 
@@ -97,17 +97,17 @@
       buffer[num_read] = '\0';
       // close file
       os::close(file_handle);
-      return parse_string(buffer, stream);
+      return parse_string(buffer, stream) > 0;
     }
   }
   return false;
 }
 
-bool DirectivesParser::install_directives() {
+int DirectivesParser::install_directives() {
   // Check limit
   if (!DirectivesStack::check_capacity(_tmp_depth, _st)) {
     clean_tmp();
-    return false;
+    return 0;
   }
 
   // Pop from internal temporary stack and push to compileBroker.
@@ -120,14 +120,14 @@
   }
   if (i == 0) {
     _st->print_cr("No directives in file");
-    return false;
+    return 0;
   } else {
     _st->print_cr("%i compiler directives added", i);
     if (CompilerDirectivesPrint) {
       // Print entire directives stack after new has been pushed.
       DirectivesStack::print(_st);
     }
-    return true;
+    return i;
   }
 }
 
--- a/hotspot/src/share/vm/compiler/directivesParser.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/compiler/directivesParser.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,8 +51,8 @@
   static bool has_file();
   static bool parse_from_flag();
   static bool parse_from_file(const char* filename, outputStream* st);
-  static bool parse_string(const char* string, outputStream* st);
-  bool install_directives();
+  static int  parse_string(const char* string, outputStream* st);
+  int install_directives();
 
 private:
   DirectivesParser(const char* text, outputStream* st);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -2329,9 +2329,13 @@
     GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
     if (_cm->has_aborted()) {
       _gc_tracer_cm->report_concurrent_mode_failure();
+
+      // ConcurrentGCTimer will be ended as well.
+      _cm->register_concurrent_gc_end_and_stop_timer();
+    } else {
+      _gc_timer_cm->register_gc_end();
     }
 
-    _gc_timer_cm->register_gc_end();
     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 
     // Clear state variables to prepare for the next concurrent cycle.
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -269,6 +269,8 @@
   _reserve_regions = 0;
 
   _cset_chooser = new CollectionSetChooser();
+
+  _ihop_control = create_ihop_control();
 }
 
 G1CollectorPolicy::~G1CollectorPolicy() {
@@ -469,8 +471,6 @@
   if (max_young_size != MaxNewSize) {
     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
   }
-
-  _ihop_control = create_ihop_control();
 }
 
 void G1CollectorPolicy::initialize_flags() {
@@ -565,6 +565,8 @@
   _reserve_regions = (uint) ceil(reserve_regions_d);
 
   _young_gen_sizer->heap_size_changed(new_number_of_regions);
+
+  _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 }
 
 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
@@ -1234,13 +1236,11 @@
 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
   if (G1UseAdaptiveIHOP) {
     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
-                                     G1CollectedHeap::heap()->max_capacity(),
                                      &_predictor,
                                      G1ReservePercent,
                                      G1HeapWastePercent);
   } else {
-    return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent,
-                                   G1CollectedHeap::heap()->max_capacity());
+    return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
   }
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Tue Apr 05 15:39:34 2016 -0400
@@ -441,7 +441,7 @@
   _has_aborted(false),
   _restart_for_overflow(false),
   _concurrent_marking_in_progress(false),
-  _concurrent_phase_started(false),
+  _concurrent_phase_status(ConcPhaseNotStarted),
 
   // _verbose_level set below
 
@@ -1008,16 +1008,43 @@
 }
 
 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
-  assert(!_concurrent_phase_started, "Sanity");
-  _concurrent_phase_started = true;
+  uint old_val = 0;
+  do {
+    old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
+  } while (old_val != ConcPhaseNotStarted);
   _g1h->gc_timer_cm()->register_gc_concurrent_start(title);
 }
 
+void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
+  if (_concurrent_phase_status == ConcPhaseNotStarted) {
+    return;
+  }
+
+  uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
+  if (old_val == ConcPhaseStarted) {
+    _g1h->gc_timer_cm()->register_gc_concurrent_end();
+    // If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
+    // We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
+    // starting a new concurrent phase by 'ConcurrentMarkThread'.
+    if (end_timer) {
+      _g1h->gc_timer_cm()->register_gc_end();
+    }
+    old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
+    assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
+  } else {
+    do {
+      // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
+      os::naked_short_sleep(1);
+    } while (_concurrent_phase_status != ConcPhaseNotStarted);
+  }
+}
+
 void G1ConcurrentMark::register_concurrent_phase_end() {
-  if (_concurrent_phase_started) {
-    _concurrent_phase_started = false;
-    _g1h->gc_timer_cm()->register_gc_concurrent_end();
-  }
+  register_concurrent_phase_end_common(false);
+}
+
+void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
+  register_concurrent_phase_end_common(true);
 }
 
 void G1ConcurrentMark::markFromRoots() {
@@ -2605,9 +2632,6 @@
 
   _g1h->trace_heap_after_concurrent_cycle();
 
-  // Close any open concurrent phase timing
-  register_concurrent_phase_end();
-
   _g1h->register_concurrent_cycle_end();
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Mon Apr 04 02:10:46 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Tue Apr 05 15:39:34 2016 -0400
@@ -352,8 +352,17 @@
   // time of remark