changeset 13622:71337910df60 jdk-10+22

Merge
author jwilhelm
date Tue, 29 Aug 2017 17:17:58 +0200
parents 5e3603c1495f f3413e6d6b8f
children a7454342f29c
files
diffstat 56 files changed, 851 insertions(+), 550 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/aarch64/vm/aarch64.ad	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/cpu/aarch64/vm/aarch64.ad	Tue Aug 29 17:17:58 2017 +0200
@@ -12658,6 +12658,64 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+// We can use ubfiz when masking by a positive number and then left shifting the result.
+// We know that the mask is positive because immI_bitmask guarantees it.
+instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
+%{
+  match(Set dst (LShiftI (AndI src mask) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
+
+  ins_cost(INSN_COST);
+  format %{ "ubfizw $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfizw(as_Register($dst$$reg),
+          as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+// We can use ubfiz when masking by a positive number and then left shifting the result.
+// We know that the mask is positive because immL_bitmask guarantees it.
+instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
+%{
+  match(Set dst (LShiftL (AndL src mask) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 63 &&
+    (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
+
+  ins_cost(INSN_COST);
+  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfiz(as_Register($dst$$reg),
+          as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
+// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
+instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
+%{
+  match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
+
+  ins_cost(INSN_COST);
+  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfiz(as_Register($dst$$reg),
+             as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
 // Rotations
 
 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
--- a/src/cpu/aarch64/vm/aarch64_ad.m4	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/cpu/aarch64/vm/aarch64_ad.m4	Tue Aug 29 17:17:58 2017 +0200
@@ -214,6 +214,48 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+define(`UBFIZ_INSN',
+// We can use ubfiz when masking by a positive number and then left shifting the result.
+// We know that the mask is positive because imm$1_bitmask guarantees it.
+`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
+%{
+  match(Set dst (LShift$1 (And$1 src mask) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= $3 &&
+    (exact_log2$5(n->in(1)->in(2)->get_$4()+1) + (unsigned int)n->in(2)->get_int()) <= ($3+1));
+
+  ins_cost(INSN_COST);
+  format %{ "$2 $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ $2(as_Register($dst$$reg),
+          as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}')
+UBFIZ_INSN(I, ubfizw, 31, int)
+UBFIZ_INSN(L, ubfiz, 63, long, _long)
+
+// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
+instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
+%{
+  match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
+
+  ins_cost(INSN_COST);
+  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfiz(as_Register($dst$$reg),
+             as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
 // Rotations
 
 define(`EXTRACT_INSN',
--- a/src/os/linux/vm/os_linux.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os/linux/vm/os_linux.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1748,8 +1748,10 @@
     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
 #if defined(VM_LITTLE_ENDIAN)
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
+    {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},
 #else
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
+    {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
 #endif
     {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
     {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
@@ -1791,9 +1793,11 @@
   static  Elf32_Half running_arch_code=EM_MIPS;
 #elif  (defined M68K)
   static  Elf32_Half running_arch_code=EM_68K;
+#elif  (defined SH)
+  static  Elf32_Half running_arch_code=EM_SH;
 #else
     #error Method os::dll_load requires that one of following is defined:\
-        AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, __sparc
+        AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, SH, __sparc
 #endif
 
   // Identify compatability class for VM's architecture and library's architecture
--- a/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -95,9 +95,21 @@
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
 
-  unsigned int result;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(4 == sizeof(I));
+  STATIC_CAST(4 == sizeof(D));
+
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -110,13 +122,17 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (jint) result;
+  return result;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(8 == sizeof(I));
+  STATIC_CAST(8 == sizeof(D));
 
-  long result;
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -129,11 +145,7 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return result;
 }
 
 
--- a/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -40,13 +40,25 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D fetch_and_add(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest)
+                    : "=r" (old_value)
+                    : "0" (add_value), "r" (dest)
                     : "cc", "memory");
-  return addend + add_value;
+  return old_value;
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
@@ -111,17 +123,17 @@
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D old_value;
   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest)
+                        : "=r" (old_value)
+                        : "0" (add_value), "r" (dest)
                         : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
@@ -164,15 +176,6 @@
 
 #else // !AMD64
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   inc((volatile jint*)dest);
 }
--- a/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -74,7 +74,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until success.
 
@@ -135,7 +135,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until a __kernel_cmpxchg succeeds.
 
@@ -173,32 +173,38 @@
   *dest = store_value;
 }
 
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(4 == sizeof(I));
+  STATIC_CAST(4 == sizeof(D));
+
 #ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 #else
 #ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
 #else
   return __sync_add_and_fetch(dest, add_value);
 #endif // M68K
 #endif // ARM
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(8 == sizeof(I));
+  STATIC_CAST(8 == sizeof(D));
+
   return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 }
 
 inline void Atomic::inc(volatile jint* dest) {
--- a/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -47,10 +47,15 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint Atomic::add(jint add_value, volatile jint* dest)
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
- return __sync_add_and_fetch(dest, add_value);
-}
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const {
+    return __sync_add_and_fetch(dest, add_value);
+  }
+};
 
 inline void Atomic::inc(volatile jint* dest)
 {
@@ -105,16 +110,6 @@
 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
-{
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest)
 {
  add_ptr(1, dest);
--- a/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -91,9 +91,21 @@
 //
 // For ARMv7 we add explicit barriers in the stubs.
 
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
 #ifdef AARCH64
-  jint val;
+  D val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -106,7 +118,7 @@
     : "memory");
   return val;
 #else
-  return (*os::atomic_add_func)(add_value, dest);
+  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
 #endif
 }
 
@@ -118,9 +130,13 @@
   Atomic::add(-1, (volatile jint *)dest);
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 #ifdef AARCH64
-  intptr_t val;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -132,14 +148,8 @@
     : [add_val] "r" (add_value), [dest] "r" (dest)
     : "memory");
   return val;
-#else
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-#endif
 }
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
+#endif // AARCH64
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   Atomic::add_ptr(1, dest);
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -93,9 +93,21 @@
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
 
-  unsigned int result;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(4 == sizeof(I));
+  STATIC_CAST(4 == sizeof(D));
+
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -108,13 +120,17 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (jint) result;
+  return result;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(8 == sizeof(I));
+  STATIC_CAST(8 == sizeof(D));
 
-  long result;
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -127,11 +143,7 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return result;
 }
 
 
--- a/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -82,8 +82,21 @@
 // The return value of the method is the value that was successfully stored. At the
 // time the caller receives back control, the value in memory may have changed already.
 
-inline jint Atomic::add(jint inc, volatile jint*dest) {
-  unsigned int old, upd;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(4 == sizeof(I));
+  STATIC_CAST(4 == sizeof(D));
+
+  D old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
@@ -124,12 +137,17 @@
     );
   }
 
-  return (jint)upd;
+  return upd;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) {
-  unsigned long old, upd;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(8 == sizeof(I));
+  STATIC_CAST(8 == sizeof(D));
+
+  D old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
@@ -170,11 +188,7 @@
     );
   }
 
-  return (intptr_t)upd;
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return upd;
 }
 
 
--- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -51,8 +51,21 @@
 
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  intptr_t rv;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(4 == sizeof(I));
+  STATIC_CAST(4 == sizeof(D));
+
+  D rv;
   __asm__ volatile(
     "1: \n\t"
     " ld     [%2], %%o2\n\t"
@@ -68,8 +81,12 @@
   return rv;
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t rv;
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(8 == sizeof(I));
+  STATIC_CAST(8 == sizeof(D));
+
+  D rv;
   __asm__ volatile(
     "1: \n\t"
     " ldx    [%2], %%o2\n\t"
@@ -85,10 +102,6 @@
   return rv;
 }
 
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
 
 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
   intptr_t rv = exchange_value;
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -40,13 +40,25 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D fetch_and_add(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest)
+                    : "=r" (old_value)
+                    : "0" (add_value), "r" (dest)
                     : "cc", "memory");
-  return addend + add_value;
+  return old_value;
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
@@ -111,17 +123,17 @@
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D old_value;
   __asm__ __volatile__ ("lock xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest)
+                        : "=r" (old_value)
+                        : "0" (add_value), "r" (dest)
                         : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
@@ -164,15 +176,6 @@
 
 #else // !AMD64
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   inc((volatile jint*)dest);
 }
--- a/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -74,7 +74,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until success.
 
@@ -135,7 +135,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until a __kernel_cmpxchg succeeds.
 
@@ -167,32 +167,38 @@
   *dest = store_value;
 }
 
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(4 == sizeof(I));
+  STATIC_CAST(4 == sizeof(D));
+
 #ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 #else
 #ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
 #else
   return __sync_add_and_fetch(dest, add_value);
 #endif // M68K
 #endif // ARM
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_CAST(8 == sizeof(I));
+  STATIC_CAST(8 == sizeof(D));
+
   return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 }
 
 inline void Atomic::inc(volatile jint* dest) {
--- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -62,22 +62,21 @@
 extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
 extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
 
-extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
-extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
-
-
-inline jint     Atomic::add     (jint    add_value, volatile jint*     dest) {
-  return _Atomic_add32(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return _Atomic_add64(add_value, dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
+// Implement ADD using a CAS loop.
+template<size_t byte_size>
+struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
+  template<typename I, typename D>
+  inline D operator()(I add_value, D volatile* dest) const {
+    D old_value = *dest;
+    while (true) {
+      D new_value = old_value + add_value;
+      D result = cmpxchg(new_value, dest, old_value);
+      if (result == old_value) break;
+      old_value = result;
+    }
+    return old_value + add_value;
+  }
+};
 
 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
   return _Atomic_swap32(exchange_value, dest);
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Tue Aug 29 17:17:58 2017 +0200
@@ -90,58 +90,6 @@
         .nonvolatile
         .end
 
-  // Support for jint Atomic::add(jint add_value, volatile jint* dest).
-  //
-  // Arguments:
-  //      add_value: O0   (e.g., +1 or -1)
-  //      dest:      O1
-  //
-  // Results:
-  //     O0: the new value stored in dest
-  //
-  // Overwrites O3
-
-        .inline _Atomic_add32, 2
-        .volatile
-    2:
-        ld      [%o1], %o2
-        add     %o0, %o2, %o3
-        cas     [%o1], %o2, %o3
-        cmp     %o2, %o3
-        bne     2b
-         nop
-        add     %o0, %o2, %o0
-        .nonvolatile
-        .end
-
-
-  // Support for intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-  //
-  // 64-bit
-  //
-  // Arguments:
-  //      add_value: O0   (e.g., +1 or -1)
-  //      dest:      O1
-  //
-  // Results:
-  //     O0: the new value stored in dest
-  //
-  // Overwrites O3
-
-        .inline _Atomic_add64, 2
-        .volatile
-    3:
-        ldx     [%o1], %o2
-        add     %o0, %o2, %o3
-        casx    [%o1], %o2, %o3
-        cmp     %o2, %o3
-        bne     %xcc, 3b
-         nop
-        add     %o0, %o2, %o0
-        .nonvolatile
-        .end
-
-
   // Support for void Prefetch::read(void *loc, intx interval)
   //
   // Prefetch for several reads.
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -51,6 +51,8 @@
 
 extern "C" {
   jint _Atomic_add(jint add_value, volatile jint* dest);
+  jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
+
   jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
   jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
                              jbyte compare_value);
@@ -60,8 +62,34 @@
                              jlong compare_value);
 }
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return _Atomic_add(add_value, dest);
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+// Not using add_using_helper; see comment for cmpxchg.
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  return PrimitiveConversions::cast<D>(
+    _Atomic_add(PrimitiveConversions::cast<jint>(add_value),
+                reinterpret_cast<jint volatile*>(dest)));
+}
+
+// Not using add_using_helper; see comment for cmpxchg.
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  return PrimitiveConversions::cast<D>(
+    _Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
+                     reinterpret_cast<jlong volatile*>(dest)));
 }
 
 inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
@@ -115,17 +143,8 @@
 
 inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
 extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
   return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
 }
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -57,20 +57,28 @@
 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
 #ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_add_func)(add_value, dest);
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
@@ -130,7 +138,11 @@
 
 #else // !AMD64
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
   __asm {
     mov edx, dest;
     mov eax, add_value;
@@ -140,14 +152,6 @@
   }
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
 inline void Atomic::inc    (volatile jint*     dest) {
   // alternative for InterlockedIncrement
   __asm {
--- a/src/share/vm/classfile/systemDictionary.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -77,9 +77,8 @@
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/threadService.hpp"
-#include "trace/traceMacros.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/macros.hpp"
-#include "utilities/ticks.hpp"
 #if INCLUDE_CDS
 #include "classfile/sharedClassUtil.hpp"
 #include "classfile/systemDictionaryShared.hpp"
@@ -87,9 +86,6 @@
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciRuntime.hpp"
 #endif
-#if INCLUDE_TRACE
-#include "trace/tracing.hpp"
-#endif
 
 PlaceholderTable*      SystemDictionary::_placeholders        = NULL;
 Dictionary*            SystemDictionary::_shared_dictionary   = NULL;
@@ -615,17 +611,17 @@
   return NULL;
 }
 
-static void post_class_load_event(const Ticks& start_time,
-                                  InstanceKlass* k,
+static void post_class_load_event(EventClassLoad* event,
+                                  const InstanceKlass* k,
                                   const ClassLoaderData* init_cld) {
 #if INCLUDE_TRACE
-  EventClassLoad event(UNTIMED);
-  if (event.should_commit()) {
-    event.set_starttime(start_time);
-    event.set_loadedClass(k);
-    event.set_definingClassLoader(k->class_loader_data());
-    event.set_initiatingClassLoader(init_cld);
-    event.commit();
+  assert(event != NULL, "invariant");
+  assert(k != NULL, "invariant");
+  if (event->should_commit()) {
+    event->set_loadedClass(k);
+    event->set_definingClassLoader(k->class_loader_data());
+    event->set_initiatingClassLoader(init_cld);
+    event->commit();
   }
 #endif // INCLUDE_TRACE
 }
@@ -653,7 +649,7 @@
   assert(name != NULL && !FieldType::is_array(name) &&
          !FieldType::is_obj(name), "invalid class name");
 
-  Ticks class_load_start_time = Ticks::now();
+  EventClassLoad class_load_start_event;
 
   HandleMark hm(THREAD);
 
@@ -899,7 +895,7 @@
     return NULL;
   }
 
-  post_class_load_event(class_load_start_time, k, loader_data);
+  post_class_load_event(&class_load_start_event, k, loader_data);
 
 #ifdef ASSERT
   {
@@ -1006,7 +1002,7 @@
                                               GrowableArray<Handle>* cp_patches,
                                               TRAPS) {
 
-  Ticks class_load_start_time = Ticks::now();
+  EventClassLoad class_load_start_event;
 
   ClassLoaderData* loader_data;
   if (host_klass != NULL) {
@@ -1064,7 +1060,7 @@
         JvmtiExport::post_class_load((JavaThread *) THREAD, k);
     }
 
-    post_class_load_event(class_load_start_time, k, loader_data);
+    post_class_load_event(&class_load_start_event, k, loader_data);
   }
   assert(host_klass != NULL || NULL == cp_patches,
          "cp_patches only found with host_klass");
--- a/src/share/vm/compiler/compileBroker.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -720,44 +720,49 @@
     // At this point it may be possible that no osthread was created for the
     // JavaThread due to lack of memory. We would have to throw an exception
     // in that case. However, since this must work and we do not allow
-    // exceptions anyway, check and abort if this fails.
+    // exceptions anyway, check and abort if this fails. But first release the
+    // lock.
 
-    if (thread == NULL || thread->osthread() == NULL) {
-      vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    os::native_thread_creation_failed_msg());
+    if (thread != NULL && thread->osthread() != NULL) {
+
+      java_lang_Thread::set_thread(thread_oop(), thread);
+
+      // Note that this only sets the JavaThread _priority field, which by
+      // definition is limited to Java priorities and not OS priorities.
+      // The os-priority is set in the CompilerThread startup code itself
+
+      java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+
+      // Note that we cannot call os::set_priority because it expects Java
+      // priorities and we are *explicitly* using OS priorities so that it's
+      // possible to set the compiler thread priority higher than any Java
+      // thread.
+
+      int native_prio = CompilerThreadPriority;
+      if (native_prio == -1) {
+        if (UseCriticalCompilerThreadPriority) {
+          native_prio = os::java_to_os_priority[CriticalPriority];
+        } else {
+          native_prio = os::java_to_os_priority[NearMaxPriority];
+        }
+      }
+      os::set_native_priority(thread, native_prio);
+
+      java_lang_Thread::set_daemon(thread_oop());
+
+      thread->set_threadObj(thread_oop());
+      if (compiler_thread) {
+        thread->as_CompilerThread()->set_compiler(comp);
+      }
+      Threads::add(thread);
+      Thread::start(thread);
     }
+  }
 
-    java_lang_Thread::set_thread(thread_oop(), thread);
-
-    // Note that this only sets the JavaThread _priority field, which by
-    // definition is limited to Java priorities and not OS priorities.
-    // The os-priority is set in the CompilerThread startup code itself
-
-    java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
-
-    // Note that we cannot call os::set_priority because it expects Java
-    // priorities and we are *explicitly* using OS priorities so that it's
-    // possible to set the compiler thread priority higher than any Java
-    // thread.
-
-    int native_prio = CompilerThreadPriority;
-    if (native_prio == -1) {
-      if (UseCriticalCompilerThreadPriority) {
-        native_prio = os::java_to_os_priority[CriticalPriority];
-      } else {
-        native_prio = os::java_to_os_priority[NearMaxPriority];
-      }
-    }
-    os::set_native_priority(thread, native_prio);
-
-    java_lang_Thread::set_daemon(thread_oop());
-
-    thread->set_threadObj(thread_oop());
-    if (compiler_thread) {
-      thread->as_CompilerThread()->set_compiler(comp);
-    }
-    Threads::add(thread);
-    Thread::start(thread);
+  // First release lock before aborting VM.
+  if (thread == NULL || thread->osthread() == NULL) {
+    vm_exit_during_initialization("java.lang.OutOfMemoryError",
+                                  os::native_thread_creation_failed_msg());
   }
 
   // Let go of Threads_lock before yielding
--- a/src/share/vm/gc/g1/g1CardLiveData.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/gc/g1/g1CardLiveData.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -409,7 +409,7 @@
 
   virtual void work(uint worker_id) {
     while (true) {
-      size_t to_process = Atomic::add(1, &_cur_chunk) - 1;
+      size_t to_process = Atomic::add(1u, &_cur_chunk) - 1;
       if (to_process >= _num_chunks) {
         break;
       }
--- a/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -200,7 +200,7 @@
     return NULL;
   }
 
-  size_t cur_idx = Atomic::add(1, &_hwm) - 1;
+  size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
   if (cur_idx >= _chunk_capacity) {
     return NULL;
   }
--- a/src/share/vm/gc/g1/g1HotCardCache.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/gc/g1/g1HotCardCache.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
     return card_ptr;
   }
   // Otherwise, the card is hot.
-  size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
+  size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
   size_t masked_index = index & (_hot_cache_size - 1);
   jbyte* current_ptr = _hot_cache[masked_index];
 
--- a/src/share/vm/gc/g1/g1HotCardCache.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/gc/g1/g1HotCardCache.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@
 
   size_t            _hot_cache_size;
 
-  int               _hot_cache_par_chunk_size;
+  size_t            _hot_cache_par_chunk_size;
 
   // Avoids false sharing when concurrently updating _hot_cache_idx or
   // _hot_cache_par_claimed_idx. These are never updated at the same time
--- a/src/share/vm/gc/g1/g1RemSet.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/gc/g1/g1RemSet.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -243,7 +243,7 @@
 
     bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean;
     if (marked_as_dirty) {
-      size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1;
+      size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1;
       _dirty_region_buffer[allocated] = region;
     }
   }
--- a/src/share/vm/gc/shared/vmGCOperations.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/gc/shared/vmGCOperations.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
 #include "memory/oopFactory.hpp"
 #include "runtime/handles.inline.hpp"
@@ -111,6 +112,9 @@
 
 void VM_GC_Operation::doit_epilogue() {
   assert(Thread::current()->is_Java_thread(), "just checking");
+  // Clean up old interpreter OopMap entries that were replaced
+  // during the GC thread root traversal.
+  OopMapCache::cleanup_old_entries();
   if (Universe::has_reference_pending_list()) {
     Heap_lock->notify_all();
   }
--- a/src/share/vm/interpreter/oopMapCache.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/interpreter/oopMapCache.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
@@ -37,6 +38,9 @@
   friend class OopMapCache;
   friend class VerifyClosure;
 
+ private:
+  OopMapCacheEntry* _next;
+
  protected:
   // Initialization
   void fill(const methodHandle& method, int bci);
@@ -54,8 +58,9 @@
 
  public:
   OopMapCacheEntry() : InterpreterOopMap() {
+    _next = NULL;
 #ifdef ASSERT
-     _resource_allocate_bit_mask = false;
+    _resource_allocate_bit_mask = false;
 #endif
   }
 };
@@ -263,23 +268,26 @@
 
   // Check if map is generated correctly
   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
-  if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
+  Log(interpreter, oopmap) logv;
+  LogStream st(logv.trace());
 
+  st.print("Locals (%d): ", max_locals);
   for(int i = 0; i < max_locals; i++) {
     bool v1 = is_oop(i)               ? true : false;
     bool v2 = vars[i].is_reference()  ? true : false;
     assert(v1 == v2, "locals oop mask generation error");
-    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
+    st.print("%d", v1 ? 1 : 0);
   }
+  st.cr();
 
-  if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
+  st.print("Stack (%d): ", stack_top);
   for(int j = 0; j < stack_top; j++) {
     bool v1 = is_oop(max_locals + j)  ? true : false;
     bool v2 = stack[j].is_reference() ? true : false;
     assert(v1 == v2, "stack oop mask generation error");
-    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
+    st.print("%d", v1 ? 1 : 0);
   }
-  if (TraceOopMapGeneration && Verbose) tty->cr();
+  st.cr();
   return true;
 }
 
@@ -373,8 +381,6 @@
 
   // verify bit mask
   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
-
-
 }
 
 void OopMapCacheEntry::flush() {
@@ -385,16 +391,6 @@
 
 // Implementation of OopMapCache
 
-#ifndef PRODUCT
-
-static long _total_memory_usage = 0;
-
-long OopMapCache::memory_usage() {
-  return _total_memory_usage;
-}
-
-#endif
-
 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
   assert(_resource_allocate_bit_mask,
     "Should not resource allocate the _bit_mask");
@@ -435,15 +431,11 @@
          ^ ((unsigned int) method->size_of_parameters() << 6);
 }
 
+OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL;
 
-OopMapCache::OopMapCache() :
-  _mut(Mutex::leaf, "An OopMapCache lock", true)
-{
-  _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
-  // Cannot call flush for initialization, since flush
-  // will check if memory should be deallocated
-  for(int i = 0; i < _size; i++) _array[i].initialize();
-  NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
+OopMapCache::OopMapCache() {
+  _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass);
+  for(int i = 0; i < _size; i++) _array[i] = NULL;
 }
 
 
@@ -452,112 +444,152 @@
   // Deallocate oop maps that are allocated out-of-line
   flush();
   // Deallocate array
-  NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
-  FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
+  FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array);
 }
 
 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
-  return &_array[i % _size];
+  return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
+}
+
+bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
+  return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
 }
 
 void OopMapCache::flush() {
-  for (int i = 0; i < _size; i++) _array[i].flush();
+  for (int i = 0; i < _size; i++) {
+    OopMapCacheEntry* entry = _array[i];
+    if (entry != NULL) {
+      _array[i] = NULL;  // no barrier, only called in OopMapCache destructor
+      entry->flush();
+      FREE_C_HEAP_OBJ(entry);
+    }
+  }
 }
 
 void OopMapCache::flush_obsolete_entries() {
-  for (int i = 0; i < _size; i++)
-    if (!_array[i].is_empty() && _array[i].method()->is_old()) {
+  assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
+  for (int i = 0; i < _size; i++) {
+    OopMapCacheEntry* entry = _array[i];
+    if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) {
       // Cache entry is occupied by an old redefined method and we don't want
       // to pin it down so flush the entry.
       if (log_is_enabled(Debug, redefine, class, oopmap)) {
         ResourceMark rm;
-        log_debug(redefine, class, oopmap)
+        log_debug(redefine, class, interpreter, oopmap)
           ("flush: %s(%s): cached entry @%d",
-           _array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i);
+           entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);
       }
-      _array[i].flush();
+      _array[i] = NULL;
+      entry->flush();
+      FREE_C_HEAP_OBJ(entry);
     }
+  }
 }
 
+// Called by GC for thread root scan during a safepoint only.  The other interpreted frame oopmaps
+// are generated locally and not cached.
 void OopMapCache::lookup(const methodHandle& method,
                          int bci,
-                         InterpreterOopMap* entry_for) const {
-  MutexLocker x(&_mut);
+                         InterpreterOopMap* entry_for) {
+  assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint");
+  int probe = hash_value_for(method, bci);
+  int i;
+  OopMapCacheEntry* entry = NULL;
 
-  OopMapCacheEntry* entry = NULL;
-  int probe = hash_value_for(method, bci);
+  if (log_is_enabled(Debug, interpreter, oopmap)) {
+    static int count = 0;
+    ResourceMark rm;
+    log_debug(interpreter, oopmap)
+          ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,
+           method()->name_and_sig_as_C_string(), probe);
+  }
 
   // Search hashtable for match
-  int i;
   for(i = 0; i < _probe_depth; i++) {
     entry = entry_at(probe + i);
-    if (entry->match(method, bci)) {
+    if (entry != NULL && !entry->is_empty() && entry->match(method, bci)) {
       entry_for->resource_copy(entry);
       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
+      log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
       return;
     }
   }
 
-  if (TraceOopMapGeneration) {
-    static int count = 0;
-    ResourceMark rm;
-    tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
-    method->print_value(); tty->cr();
-  }
+  // Entry is not in hashtable.
+  // Compute entry
 
-  // Entry is not in hashtable.
-  // Compute entry and return it
+  OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
+  tmp->initialize();
+  tmp->fill(method, bci);
+  entry_for->resource_copy(tmp);
 
   if (method->should_not_be_cached()) {
     // It is either not safe or not a good idea to cache this Method*
     // at this time. We give the caller of lookup() a copy of the
     // interesting info via parameter entry_for, but we don't add it to
     // the cache. See the gory details in Method*.cpp.
-    compute_one_oop_map(method, bci, entry_for);
+    FREE_C_HEAP_OBJ(tmp);
     return;
   }
 
   // First search for an empty slot
   for(i = 0; i < _probe_depth; i++) {
-    entry  = entry_at(probe + i);
-    if (entry->is_empty()) {
-      entry->fill(method, bci);
-      entry_for->resource_copy(entry);
-      assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
-      return;
+    entry = entry_at(probe + i);
+    if (entry == NULL) {
+      if (put_at(probe + i, tmp, NULL)) {
+        assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
+        return;
+      }
     }
   }
 
-  if (TraceOopMapGeneration) {
-    ResourceMark rm;
-    tty->print_cr("*** collision in oopmap cache - flushing item ***");
+  log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");
+
+  // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
+  // where the first entry in the collision array is replaced with the new one.
+  OopMapCacheEntry* old = entry_at(probe + 0);
+  if (put_at(probe + 0, tmp, old)) {
+    enqueue_for_cleanup(old);
+  } else {
+    enqueue_for_cleanup(tmp);
   }
 
-  // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
-  //entry_at(probe + _probe_depth - 1)->flush();
-  //for(i = _probe_depth - 1; i > 0; i--) {
-  //  // Coping entry[i] = entry[i-1];
-  //  OopMapCacheEntry *to   = entry_at(probe + i);
-  //  OopMapCacheEntry *from = entry_at(probe + i - 1);
-  //  to->copy(from);
-  // }
+  assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
+  return;
+}
 
-  assert(method->is_method(), "gaga");
+void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
+  bool success = false;
+  OopMapCacheEntry* head;
+  do {
+    head = _old_entries;
+    entry->_next = head;
+    success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
+  } while (!success);
 
-  entry = entry_at(probe + 0);
-  entry->fill(method, bci);
+  if (log_is_enabled(Debug, interpreter, oopmap)) {
+    ResourceMark rm;
+    log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
+                          entry->method()->name_and_sig_as_C_string(), entry->bci());
+  }
+}
 
-  // Copy the  newly cached entry to input parameter
-  entry_for->resource_copy(entry);
-
-  if (TraceOopMapGeneration) {
-    ResourceMark rm;
-    tty->print("Done with ");
-    method->print_value(); tty->cr();
+// This is called after GC threads are done and nothing is accessing the old_entries
+// list, so no synchronization needed.
+void OopMapCache::cleanup_old_entries() {
+  OopMapCacheEntry* entry = _old_entries;
+  _old_entries = NULL;
+  while (entry != NULL) {
+    if (log_is_enabled(Debug, interpreter, oopmap)) {
+      ResourceMark rm;
+      log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
+                          entry->method()->name_and_sig_as_C_string(), entry->bci());
+    }
+    OopMapCacheEntry* next = entry->_next;
+    entry->flush();
+    FREE_C_HEAP_OBJ(entry);
+    entry = next;
   }
-  assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
-
-  return;
 }
 
 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
--- a/src/share/vm/interpreter/oopMapCache.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/interpreter/oopMapCache.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,17 +144,19 @@
 };
 
 class OopMapCache : public CHeapObj<mtClass> {
+ static OopMapCacheEntry* volatile _old_entries;
  private:
   enum { _size        = 32,     // Use fixed size for now
          _probe_depth = 3       // probe depth in case of collisions
   };
 
-  OopMapCacheEntry* _array;
+  OopMapCacheEntry* volatile * _array;
 
   unsigned int hash_value_for(const methodHandle& method, int bci) const;
   OopMapCacheEntry* entry_at(int i) const;
+  bool put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old);
 
-  mutable Mutex _mut;
+  static void enqueue_for_cleanup(OopMapCacheEntry* entry);
 
   void flush();
 
@@ -167,13 +169,11 @@
 
   // Returns the oopMap for (method, bci) in parameter "entry".
   // Returns false if an oop map was not found.
-  void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry) const;
+  void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry);
 
   // Compute an oop map without updating the cache or grabbing any locks (for debugging)
   static void compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry);
-
-  // Returns total no. of bytes allocated as part of OopMapCache's
-  static long memory_usage()                     PRODUCT_RETURN0;
+  static void cleanup_old_entries();
 };
 
 #endif // SHARE_VM_INTERPRETER_OOPMAPCACHE_HPP
--- a/src/share/vm/logging/logTag.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/logging/logTag.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -74,6 +74,7 @@
   LOG_TAG(iklass) \
   LOG_TAG(init) \
   LOG_TAG(inlining) \
+  LOG_TAG(interpreter) \
   LOG_TAG(itables) \
   LOG_TAG(jit) \
   LOG_TAG(jni) \
--- a/src/share/vm/memory/metaspaceShared.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/memory/metaspaceShared.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1613,6 +1613,8 @@
     tty->print_cr("Dumping objects to open archive heap region ...");
     _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
     MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
+
+    MetaspaceShared::destroy_archive_object_cache();
   }
 
   G1HeapVerifier::verify_archive_regions();
--- a/src/share/vm/memory/metaspaceShared.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/memory/metaspaceShared.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -29,6 +29,7 @@
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/virtualspace.hpp"
+#include "oops/oop.inline.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/resourceHash.hpp"
@@ -96,11 +97,16 @@
     return p1 == p2;
   }
   static unsigned obj_hash(oop const& p) {
-    unsigned hash = (unsigned)((uintptr_t)&p);
-    return hash ^ (hash >> LogMinObjAlignment);
+    assert(!p->mark()->has_bias_pattern(),
+           "this object should never have been locked");  // so identity_hash won't safepoin
+    unsigned hash = (unsigned)p->identity_hash();
+    return hash;
   }
   typedef ResourceHashtable<oop, oop,
-      MetaspaceShared::obj_hash, MetaspaceShared::obj_equals> ArchivedObjectCache;
+      MetaspaceShared::obj_hash,
+      MetaspaceShared::obj_equals,
+      15889, // prime number
+      ResourceObj::C_HEAP> ArchivedObjectCache;
   static ArchivedObjectCache* _archive_object_cache;
 
  public:
@@ -115,7 +121,10 @@
     NOT_CDS_JAVA_HEAP(return false;)
   }
   static void create_archive_object_cache() {
-    CDS_JAVA_HEAP_ONLY(_archive_object_cache = new ArchivedObjectCache(););
+    CDS_JAVA_HEAP_ONLY(_archive_object_cache = new (ResourceObj::C_HEAP, mtClass)ArchivedObjectCache(););
+  }
+  static void destroy_archive_object_cache() {
+    CDS_JAVA_HEAP_ONLY(delete _archive_object_cache; _archive_object_cache = NULL;);
   }
   static void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
 
--- a/src/share/vm/oops/method.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/oops/method.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -214,26 +214,14 @@
 }
 
 void Method::mask_for(int bci, InterpreterOopMap* mask) {
-
-  Thread* myThread    = Thread::current();
-  methodHandle h_this(myThread, this);
-#if defined(ASSERT) && !INCLUDE_JVMCI
-  bool has_capability = myThread->is_VM_thread() ||
-                        myThread->is_ConcurrentGC_thread() ||
-                        myThread->is_GC_task_thread();
-
-  if (!has_capability) {
-    if (!VerifyStack && !VerifyLastFrame) {
-      // verify stack calls this outside VM thread
-      warning("oopmap should only be accessed by the "
-              "VM, GC task or CMS threads (or during debugging)");
-      InterpreterOopMap local_mask;
-      method_holder()->mask_for(h_this, bci, &local_mask);
-      local_mask.print();
-    }
+  methodHandle h_this(Thread::current(), this);
+  // Only GC uses the OopMapCache during thread stack root scanning
+  // any other uses generate an oopmap but do not save it in the cache.
+  if (Universe::heap()->is_gc_active()) {
+    method_holder()->mask_for(h_this, bci, mask);
+  } else {
+    OopMapCache::compute_one_oop_map(h_this, bci, mask);
   }
-#endif
-  method_holder()->mask_for(h_this, bci, mask);
   return;
 }
 
--- a/src/share/vm/oops/symbol.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/oops/symbol.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -219,7 +219,7 @@
 
 void Symbol::decrement_refcount() {
   if (_refcount >= 0) { // not a permanent symbol
-    jshort new_value = Atomic::add(-1, &_refcount);
+    short new_value = Atomic::add(short(-1), &_refcount);
 #ifdef ASSERT
     if (new_value == -1) { // we have transitioned from 0 -> -1
       print();
--- a/src/share/vm/opto/c2compiler.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/opto/c2compiler.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -283,7 +283,7 @@
   case vmIntrinsics::_weakCompareAndSetIntAcquire:
   case vmIntrinsics::_weakCompareAndSetIntRelease:
   case vmIntrinsics::_weakCompareAndSetInt:
-    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
+    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapI)) return false;
     break;
 
   /* CompareAndSet, Byte: */
--- a/src/share/vm/runtime/atomic.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/atomic.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -26,11 +26,14 @@
 #define SHARE_VM_RUNTIME_ATOMIC_HPP
 
 #include "memory/allocation.hpp"
+#include "metaprogramming/conditional.hpp"
 #include "metaprogramming/enableIf.hpp"
 #include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
 #include "metaprogramming/isSame.hpp"
 #include "metaprogramming/primitiveConversions.hpp"
 #include "metaprogramming/removeCV.hpp"
+#include "metaprogramming/removePointer.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
@@ -82,11 +85,17 @@
 
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
-  inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
-  inline static jint     add    (jint     add_value, volatile jint*     dest);
-  inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
-  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
-  inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
+
+  template<typename I, typename D>
+  inline static D add(I add_value, D volatile* dest);
+
+  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+    return add(add_value, dest);
+  }
+
+  inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
+    return add(add_value, reinterpret_cast<char* volatile*>(dest));
+  }
 
   // Atomically increment location. inc*() provide:
   // <fence> increment-dest <membar StoreLoad|StoreStore>
@@ -156,6 +165,74 @@
   // that is needed here.
   template<typename From, typename To> struct IsPointerConvertible;
 
+  // Dispatch handler for add.  Provides type-based validity checking
+  // and limited conversions around calls to the platform-specific
+  // implementation layer provided by PlatformAdd.
+  template<typename I, typename D, typename Enable = void>
+  struct AddImpl;
+
+  // Platform-specific implementation of add.  Support for sizes of 4
+  // bytes and (if different) pointer size bytes are required.  The
+  // class is a function object that must be default constructable,
+  // with these requirements:
+  //
+  // - dest is of type D*, an integral or pointer type.
+  // - add_value is of type I, an integral type.
+  // - sizeof(I) == sizeof(D).
+  // - if D is an integral type, I == D.
+  // - platform_add is an object of type PlatformAdd<sizeof(D)>.
+  //
+  // Then
+  //   platform_add(add_value, dest)
+  // must be a valid expression, returning a result convertible to D.
+  //
+  // No definition is provided; all platforms must explicitly define
+  // this class and any needed specializations.
+  template<size_t byte_size> struct PlatformAdd;
+
+  // Helper base classes for defining PlatformAdd.  To use, define
+  // PlatformAdd or a specialization that derives from one of these,
+  // and include in the PlatformAdd definition the support function
+  // (described below) required by the base class.
+  //
+  // These classes implement the required function object protocol for
+  // PlatformAdd, using a support function template provided by the
+  // derived class.  Let add_value (of type I) and dest (of type D) be
+  // the arguments the object is called with.  If D is a pointer type
+  // P*, then let addend (of type I) be add_value * sizeof(P);
+  // otherwise, addend is add_value.
+  //
+  // FetchAndAdd requires the derived class to provide
+  //   fetch_and_add(addend, dest)
+  // atomically adding addend to the value of dest, and returning the
+  // old value.
+  //
+  // AddAndFetch requires the derived class to provide
+  //   add_and_fetch(addend, dest)
+  // atomically adding addend to the value of dest, and returning the
+  // new value.
+  //
+  // When D is a pointer type P*, both fetch_and_add and add_and_fetch
+  // treat it as if it were a uintptr_t; they do not perform any
+  // scaling of the addend, as that has already been done by the
+  // caller.
+public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
+  template<typename Derived> struct FetchAndAdd;
+  template<typename Derived> struct AddAndFetch;
+private:
+
+  // Support for platforms that implement some variants of add using a
+  // (typically out of line) non-template helper function.  The
+  // generic arguments passed to PlatformAdd need to be translated to
+  // the appropriate type for the helper function, the helper function
+  // invoked on the translated arguments, and the result translated
+  // back.  Type is the parameter / return type of the helper
+  // function.  No scaling of add_value is performed when D is a pointer
+  // type, so this function can be used to implement the support function
+  // required by AddAndFetch.
+  template<typename Type, typename Fn, typename I, typename D>
+  static D add_using_helper(Fn fn, I add_value, D volatile* dest);
+
   // Dispatch handler for cmpxchg.  Provides type-based validity
   // checking and limited conversions around calls to the
   // platform-specific implementation layer provided by
@@ -219,6 +296,22 @@
   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 };
 
+// Define FetchAndAdd and AddAndFetch helper classes before including
+// platform file, which may use these as base classes, requiring they
+// be complete.
+
+template<typename Derived>
+struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
+  template<typename I, typename D>
+  D operator()(I add_value, D volatile* dest) const;
+};
+
+template<typename Derived>
+struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
+  template<typename I, typename D>
+  D operator()(I add_value, D volatile* dest) const;
+};
+
 // Define the class before including platform file, which may specialize
 // the operator definition.  No generic definition of specializations
 // of the operator template are provided, nor are there any generic
@@ -255,8 +348,93 @@
 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 #endif
 
-inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
-  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
+template<typename I, typename D>
+inline D Atomic::add(I add_value, D volatile* dest) {
+  return AddImpl<I, D>()(add_value, dest);
+}
+
+template<typename I, typename D>
+struct Atomic::AddImpl<
+  I, D,
+  typename EnableIf<IsIntegral<I>::value &&
+                    IsIntegral<D>::value &&
+                    (sizeof(I) <= sizeof(D)) &&
+                    (IsSigned<I>::value == IsSigned<D>::value)>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  D operator()(I add_value, D volatile* dest) const {
+    D addend = add_value;
+    return PlatformAdd<sizeof(D)>()(addend, dest);
+  }
+};
+
+template<typename I, typename P>
+struct Atomic::AddImpl<
+  I, P*,
+  typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  P* operator()(I add_value, P* volatile* dest) const {
+    STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
+    STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
+    typedef typename Conditional<IsSigned<I>::value,
+                                 intptr_t,
+                                 uintptr_t>::type CI;
+    CI addend = add_value;
+    return PlatformAdd<sizeof(P*)>()(addend, dest);
+  }
+};
+
+// Most platforms do not support atomic add on a 2-byte value. However,
+// if the value occupies the most significant 16 bits of an aligned 32-bit
+// word, then we can do this with an atomic add of (add_value << 16)
+// to the 32-bit word.
+//
+// The least significant parts of this 32-bit word will never be affected, even
+// in case of overflow/underflow.
+//
+// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
+template<>
+struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
+  jshort operator()(jshort add_value, jshort volatile* dest) const {
+#ifdef VM_LITTLE_ENDIAN
+    assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+    jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
+#else
+    assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+    jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
+#endif
+    return (jshort)(new_value >> 16); // preserves sign
+  }
+};
+
+template<typename Derived>
+template<typename I, typename D>
+inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
+  I addend = add_value;
+  // If D is a pointer type P*, scale by sizeof(P).
+  if (IsPointer<D>::value) {
+    addend *= sizeof(typename RemovePointer<D>::type);
+  }
+  D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
+  return old + add_value;
+}
+
+template<typename Derived>
+template<typename I, typename D>
+inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
+  // If D is a pointer type P*, scale by sizeof(P).
+  if (IsPointer<D>::value) {
+    add_value *= sizeof(typename RemovePointer<D>::type);
+  }
+  return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
+}
+
+template<typename Type, typename Fn, typename I, typename D>
+inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
+  return PrimitiveConversions::cast<D>(
+    fn(PrimitiveConversions::cast<Type>(add_value),
+       reinterpret_cast<Type volatile*>(dest)));
 }
 
 inline void Atomic::inc(volatile size_t* dest) {
@@ -413,32 +591,12 @@
   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
-inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
-  // Most platforms do not support atomic add on a 2-byte value. However,
-  // if the value occupies the most significant 16 bits of an aligned 32-bit
-  // word, then we can do this with an atomic add of (add_value << 16)
-  // to the 32-bit word.
-  //
-  // The least significant parts of this 32-bit word will never be affected, even
-  // in case of overflow/underflow.
-  //
-  // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
-#ifdef VM_LITTLE_ENDIAN
-  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
-#else
-  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
-#endif
-  return (jshort)(new_value >> 16); // preserves sign
-}
-
 inline void Atomic::inc(volatile jshort* dest) {
-  (void)add(1, dest);
+  (void)add(jshort(1), dest);
 }
 
 inline void Atomic::dec(volatile jshort* dest) {
-  (void)add(-1, dest);
+  (void)add(jshort(-1), dest);
 }
 
 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
--- a/src/share/vm/runtime/deoptimization.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/deoptimization.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1373,6 +1373,30 @@
 
 }
 
+#if INCLUDE_JVMCI
+address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
+  // there is no exception handler for this pc => deoptimize
+  cm->make_not_entrant();
+
+  // Use Deoptimization::deoptimize for all of its side-effects:
+  // revoking biases of monitors, gathering traps statistics, logging...
+  // it also patches the return pc but we do not care about that
+  // since we return a continuation to the deopt_blob below.
+  JavaThread* thread = JavaThread::current();
+  RegisterMap reg_map(thread, UseBiasedLocking);
+  frame runtime_frame = thread->last_frame();
+  frame caller_frame = runtime_frame.sender(&reg_map);
+  assert(caller_frame.cb()->as_nmethod_or_null() == cm, "expect top frame nmethod");
+  Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
+
+  MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
+  if (trap_mdo != NULL) {
+    trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
+  }
+
+  return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+}
+#endif
 
 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
--- a/src/share/vm/runtime/deoptimization.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/deoptimization.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -136,6 +136,10 @@
   static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
   static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
 
+#if INCLUDE_JVMCI
+  static address deoptimize_for_missing_exception_handler(CompiledMethod* cm);
+#endif
+
   private:
   // Does the actual work for deoptimizing a single frame
   static void deoptimize_single_frame(JavaThread* thread, frame fr, DeoptReason reason);
--- a/src/share/vm/runtime/globals.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/globals.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -715,9 +715,6 @@
   product(bool, PrintVMQWaitTime, false,                                    \
           "Print out the waiting time in VM operation queue")               \
                                                                             \
-  develop(bool, TraceOopMapGeneration, false,                               \
-          "Show OopMapGeneration")                                          \
-                                                                            \
   product(bool, MethodFlushing, true,                                       \
           "Reclamation of zombie and not-entrant methods")                  \
                                                                             \
--- a/src/share/vm/runtime/memprofiler.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/memprofiler.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -129,7 +129,7 @@
   fprintf(_log_fp, UINTX_FORMAT_W(6) "," UINTX_FORMAT_W(6) ",%6ld\n",
           handles_memory_usage / K,
           resource_memory_usage / K,
-          OopMapCache::memory_usage() / K);
+          0L);
   fflush(_log_fp);
 }
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -638,20 +638,7 @@
     if (t != NULL) {
       return cm->code_begin() + t->pco();
     } else {
-      // there is no exception handler for this pc => deoptimize
-      cm->make_not_entrant();
-
-      // Use Deoptimization::deoptimize for all of its side-effects:
-      // revoking biases of monitors, gathering traps statistics, logging...
-      // it also patches the return pc but we do not care about that
-      // since we return a continuation to the deopt_blob below.
-      JavaThread* thread = JavaThread::current();
-      RegisterMap reg_map(thread, UseBiasedLocking);
-      frame runtime_frame = thread->last_frame();
-      frame caller_frame = runtime_frame.sender(&reg_map);
-      Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
-
-      return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+      return Deoptimization::deoptimize_for_missing_exception_handler(cm);
     }
   }
 #endif // INCLUDE_JVMCI
--- a/src/share/vm/runtime/synchronizer.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/synchronizer.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1407,7 +1407,6 @@
       assert(inf->header()->is_neutral(), "invariant");
       assert(inf->object() == object, "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
-      event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
       return inf;
     }
 
--- a/src/share/vm/runtime/vframe.cpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/runtime/vframe.cpp	Tue Aug 29 17:17:58 2017 +0200
@@ -396,14 +396,7 @@
 StackValueCollection* interpretedVFrame::stack_data(bool expressions) const {
 
   InterpreterOopMap oop_mask;
-  // oopmap for current bci
-  if ((TraceDeoptimization && Verbose) JVMCI_ONLY( || PrintDeoptimizationDetails)) {
-    methodHandle m_h(Thread::current(), method());
-    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-  } else {
-    method()->mask_for(bci(), &oop_mask);
-  }
-
+  method()->mask_for(bci(), &oop_mask);
   const int mask_len = oop_mask.number_of_entries();
 
   // If the method is native, method()->max_locals() is not telling the truth.
--- a/src/share/vm/services/mallocTracker.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/services/mallocTracker.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -53,7 +53,7 @@
   }
 
   inline void allocate(size_t sz) {
-    Atomic::add(1, &_count);
+    Atomic::inc(&_count);
     if (sz > 0) {
       Atomic::add(sz, &_size);
       DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
@@ -64,7 +64,7 @@
   inline void deallocate(size_t sz) {
     assert(_count > 0, "Nothing allocated yet");
     assert(_size >= sz, "deallocation > allocated");
-    Atomic::add(-1, &_count);
+    Atomic::dec(&_count);
     if (sz > 0) {
       // unary minus operator applied to unsigned type, result still unsigned
       #pragma warning(suppress: 4146)
@@ -74,7 +74,7 @@
 
   inline void resize(long sz) {
     if (sz != 0) {
-      Atomic::add(sz, &_size);
+      Atomic::add(size_t(sz), &_size);
       DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
     }
   }
--- a/src/share/vm/trace/traceDataTypes.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/trace/traceDataTypes.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include <stddef.h>
 
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/ticks.hpp"
 
 enum {
   CONTENT_TYPE_NONE             = 0,
@@ -54,10 +55,11 @@
   NUM_RESERVED_EVENTS = JVM_CONTENT_TYPES_END
 };
 
-typedef enum ReservedEvent ReservedEvent;
-
 typedef u8 traceid;
 
+class ClassLoaderData;
+class Klass;
+class Method;
 class ModuleEntry;
 class PackageEntry;
 class Symbol;
--- a/src/share/vm/trace/traceEvent.hpp	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/trace/traceEvent.hpp	Tue Aug 29 17:17:58 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_TRACE_TRACEEVENT_HPP
 #define SHARE_VM_TRACE_TRACEEVENT_HPP
 
+#include "trace/traceTime.hpp"
 #include "utilities/macros.hpp"
 
 enum EventStartTime {
@@ -34,25 +35,18 @@
 
 #if INCLUDE_TRACE
 #include "trace/traceBackend.hpp"
-#include "trace/tracing.hpp"
 #include "tracefiles/traceEventIds.hpp"
-#include "tracefiles/traceTypes.hpp"
 #include "utilities/ticks.hpp"
 
 template<typename T>
-class TraceEvent : public StackObj {
+class TraceEvent {
  private:
   bool _started;
-#ifdef ASSERT
-  bool _committed;
-  bool _cancelled;
- protected:
-  bool _ignore_check;
-#endif
 
  protected:
   jlong _startTime;
   jlong _endTime;
+  DEBUG_ONLY(bool _committed;)
 
   void set_starttime(const TracingTime& time) {
     _startTime = time;
@@ -67,10 +61,7 @@
     _endTime(0),
     _started(false)
 #ifdef ASSERT
-    ,
-    _committed(false),
-    _cancelled(false),
-    _ignore_check(false)
+    , _committed(false)
 #endif
   {
     if (T::is_enabled()) {
@@ -100,10 +91,9 @@
 
   void commit() {
     if (!should_commit()) {
-      DEBUG_ONLY(cancel());
       return;
     }
-    assert(!_cancelled, "Committing an event that has already been cancelled");
+    assert(!_committed, "event already committed");
     if (_startTime == 0) {
       static_cast<T*>(this)->set_starttime(Tracing::time());
     } else if (_endTime == 0) {
@@ -111,8 +101,8 @@
     }
     if (static_cast<T*>(this)->should_write()) {
       static_cast<T*>(this)->writeEvent();
+      DEBUG_ONLY(_committed = true;)
     }
-    DEBUG_ONLY(set_commited());
   }
 
   static TraceEventId id() {
@@ -134,32 +124,6 @@
   static bool has_stacktrace() {
     return T::hasStackTrace;
   }
-
-  void cancel() {
-    assert(!_committed && !_cancelled,
-      "event was already committed/cancelled");
-    DEBUG_ONLY(_cancelled = true);
-  }
-
-  ~TraceEvent() {
-    if (_started) {
-      assert(_ignore_check || _committed || _cancelled,
-        "event was not committed/cancelled");
-    }
-  }
-
-#ifdef ASSERT
- protected:
-  void ignoreCheck() {
-    _ignore_check = true;
-  }
-
- private:
-  void set_commited() {
-    assert(!_committed, "event has already been committed");
-    _committed = true;
-  }
-#endif // ASSERT
 };
 
 #endif // INCLUDE_TRACE
--- a/src/share/vm/trace/traceEventClasses.xsl	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/trace/traceEventClasses.xsl	Tue Aug 29 17:17:58 2017 +0200
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,10 @@
 // INCLUDE_TRACE
 
 #include "tracefiles/traceTypes.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_TRACE
 #include "trace/traceEvent.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/ticks.hpp"
-#if INCLUDE_TRACE
 #include "trace/traceStream.hpp"
 #include "utilities/ostream.hpp"
 
@@ -57,7 +57,6 @@
   bool should_commit() const { return false; }
   static bool is_enabled() { return false; }
   void commit() {}
-  void cancel() {}
 };
 
   <xsl:apply-templates select="trace/events/struct" mode="empty"/>
--- a/src/share/vm/trace/traceTypes.xsl	Mon Aug 28 21:46:13 2017 +0200
+++ b/src/share/vm/trace/traceTypes.xsl	Tue Aug 29 17:17:58 2017 +0200
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -32,10 +32,7 @@
 #ifndef TRACEFILES_TRACETYPES_HPP
 #define TRACEFILES_TRACETYPES_HPP
 
-#include "oops/symbol.hpp"
 #include "trace/traceDataTypes.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.hpp"
 
 enum JVMContentType {
   _not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
--- a/test/compiler/jvmci/JVM_GetJVMCIRuntimeTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/JVM_GetJVMCIRuntimeTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -30,21 +30,21 @@
  * @modules jdk.internal.vm.ci/jdk.vm.ci.runtime
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
  *      -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=true
- *      -XX:+EnableJVMCI -Djvmci.Compiler=null
+ *      -XX:+EnableJVMCI
  *      compiler.jvmci.JVM_GetJVMCIRuntimeTest
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
  *      -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=false
- *      -XX:-EnableJVMCI
+ *      -XX:-EnableJVMCI -XX:-UseJVMCICompiler
  *      compiler.jvmci.JVM_GetJVMCIRuntimeTest
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
  *      -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=true
  *      -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.threaded=true
- *      -XX:+EnableJVMCI -Djvmci.Compiler=null
+ *      -XX:+EnableJVMCI
  *      compiler.jvmci.JVM_GetJVMCIRuntimeTest
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
  *      -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=false
  *      -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.threaded=true
- *      -XX:-EnableJVMCI
+ *      -XX:-EnableJVMCI -XX:-UseJVMCICompiler
  *      compiler.jvmci.JVM_GetJVMCIRuntimeTest
 
  */
--- a/test/compiler/jvmci/SecurityRestrictionsTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/SecurityRestrictionsTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -43,11 +43,11 @@
  *      compiler.jvmci.SecurityRestrictionsTest
  *      ALL_PERM
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
- *      -XX:+EnableJVMCI
+ *      -XX:+EnableJVMCI -XX:-UseJVMCICompiler
  *      compiler.jvmci.SecurityRestrictionsTest
  *      NO_JVMCI_ACCESS_PERM
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
- *      -XX:-EnableJVMCI
+ *      -XX:-EnableJVMCI -XX:-UseJVMCICompiler
  *      compiler.jvmci.SecurityRestrictionsTest
  *      NO_JVMCI
  */
--- a/test/compiler/jvmci/compilerToVM/DisassembleCodeBlobTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/DisassembleCodeBlobTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -61,7 +61,6 @@
                 = new DisassembleCodeBlobTest();
         List<CompileCodeTestCase> testCases
                 = CompileCodeTestCase.generate(/* bci = */ -1);
-        testCases.addAll(CompileCodeTestCase.generate(/* bci = */ 0));
         testCases.forEach(test::check);
         testCases.stream().findAny().ifPresent(test::checkZero);
         test.checkNull();
--- a/test/compiler/jvmci/compilerToVM/HasCompiledCodeForOSRTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/HasCompiledCodeForOSRTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -39,7 +39,7 @@
  * @run main/othervm -Xbootclasspath/a:.
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
- *                   -XX:-BackgroundCompilation -Djvmci.Compiler=null
+ *                   -XX:-BackgroundCompilation
  *                   compiler.jvmci.compilerToVM.HasCompiledCodeForOSRTest
  */
 
--- a/test/compiler/jvmci/compilerToVM/InvalidateInstalledCodeTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/InvalidateInstalledCodeTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -42,7 +42,6 @@
  * @run main/othervm -Xbootclasspath/a:.
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
- *                   -Djvmci.Compiler=null
  *                   compiler.jvmci.compilerToVM.InvalidateInstalledCodeTest
  */
 
--- a/test/compiler/jvmci/compilerToVM/IsMatureVsReprofileTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/IsMatureVsReprofileTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires vm.jvmci
+ * @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4)
  * @library / /test/lib
  *          ../common/patches
  * @modules java.base/jdk.internal.misc
@@ -38,7 +38,6 @@
  *                                sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *     -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI -Xbatch
- *     -Djvmci.Compiler=null
  *     compiler.jvmci.compilerToVM.IsMatureVsReprofileTest
  */
 
--- a/test/compiler/jvmci/compilerToVM/JVM_RegisterJVMCINatives.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/JVM_RegisterJVMCINatives.java	Tue Aug 29 17:17:58 2017 +0200
@@ -35,9 +35,8 @@
  *      compiler.jvmci.compilerToVM.JVM_RegisterJVMCINatives
  * @run main/othervm -XX:+UnlockExperimentalVMOptions
  *      -Dcompiler.jvmci.compilerToVM.JVM_RegisterJVMCINatives.positive=false
- *      -XX:-EnableJVMCI
+ *      -XX:-EnableJVMCI -XX:-UseJVMCICompiler
  *      compiler.jvmci.compilerToVM.JVM_RegisterJVMCINatives
-
  */
 
 package compiler.jvmci.compilerToVM;
--- a/test/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -49,7 +49,6 @@
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=true
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=false
- *                   -Djvmci.Compiler=null
  *                   compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
  * @run main/othervm -Xmixed -Xbatch -Xbootclasspath/a:.
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
@@ -61,7 +60,6 @@
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=false
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=false
- *                   -Djvmci.Compiler=null
  *                   compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
  * @run main/othervm -Xmixed -Xbatch -Xbootclasspath/a:.
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
@@ -73,7 +71,6 @@
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=true
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=true
- *                   -Djvmci.Compiler=null
  *                   compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
  * @run main/othervm -Xmixed -Xbatch -Xbootclasspath/a:.
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
@@ -85,7 +82,6 @@
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=false
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=true
- *                   -Djvmci.Compiler=null
  *                   compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
  */
 
--- a/test/compiler/jvmci/compilerToVM/ReprofileTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/compilerToVM/ReprofileTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 3)
+ * @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4)
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
@@ -40,7 +40,7 @@
  * @run main/othervm -Xbootclasspath/a:.
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
- *                   -Xmixed -Xbatch -Djvmci.Compiler=null
+ *                   -Xmixed -Xbatch
  *                   compiler.jvmci.compilerToVM.ReprofileTest
  */
 
--- a/test/compiler/jvmci/events/JvmciShutdownEventTest.java	Mon Aug 28 21:46:13 2017 +0200
+++ b/test/compiler/jvmci/events/JvmciShutdownEventTest.java	Tue Aug 29 17:17:58 2017 +0200
@@ -74,7 +74,7 @@
                 "Unexpected exit code with -EnableJVMCI",
                 "Unexpected output with -EnableJVMCI", ExitCode.OK,
                 addTestVMOptions, "-XX:+UnlockExperimentalVMOptions",
-                "-XX:-EnableJVMCI", "-Xbootclasspath/a:.",
+                "-XX:-EnableJVMCI", "-XX:-UseJVMCICompiler", "-Xbootclasspath/a:.",
                 JvmciShutdownEventListener.class.getName()
         );
     }