changeset 11905:8dcab338ec58

Merge
author iveresov
date Fri, 26 Aug 2016 14:47:52 -0700
parents 119a2a3cc29b b94f7c960bc4
children 6ac1e2e55eaa fbb492c97959
files src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp src/share/vm/c1/c1_GraphBuilder.cpp src/share/vm/runtime/atomic.inline.hpp src/share/vm/runtime/globals.hpp test/serviceability/sa/jmap-hashcode/Test8028623.java
diffstat 231 files changed, 6107 insertions(+), 4127 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Aug 25 02:10:03 2016 -0700
+++ b/.hgtags	Fri Aug 26 14:47:52 2016 -0700
@@ -535,3 +535,4 @@
 7d54c7056328b6a2bf4877458b8f4d8cd870f93b jdk-9+130
 943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131
 713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132
+a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133
--- a/src/cpu/aarch64/vm/vm_version_aarch64.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -28,6 +28,7 @@
 
 #include "runtime/globals_extension.hpp"
 #include "runtime/vm_version.hpp"
+#include "utilities/sizes.hpp"
 
 class VM_Version : public Abstract_VM_Version {
   friend class JVMCIVMStructs;
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -37,7 +37,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Aug 26 14:47:52 2016 -0700
@@ -24,6 +24,7 @@
 
 package sun.jvm.hotspot.gc.g1;
 
+import java.io.PrintStream;
 import java.util.Iterator;
 import java.util.Observable;
 import java.util.Observer;
@@ -125,6 +126,15 @@
         return CollectedHeapName.G1_COLLECTED_HEAP;
     }
 
+    @Override
+    public void printOn(PrintStream tty) {
+        MemRegion mr = reservedRegion();
+
+        tty.print("garbage-first heap");
+        tty.print(" [" + mr.start() + ", " + mr.end() + "]");
+        tty.println(" region size " + (HeapRegion.grainBytes() / 1024) + "K");
+    }
+
     public G1CollectedHeap(Address addr) {
         super(addr);
     }
--- a/src/os/aix/vm/os_aix.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os/aix/vm/os_aix.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -52,7 +52,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -3800,10 +3800,6 @@
   return ::stat(pathbuf, sbuf);
 }
 
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -3780,11 +3780,6 @@
   return diff;
 }
 
-
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/src/os/linux/vm/os_linux.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -5174,10 +5174,6 @@
   return ::stat(pathbuf, sbuf);
 }
 
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -4589,10 +4589,6 @@
   }
 }
 
-// OS interface.
-
-bool os::check_heap(bool force) { return true; }
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/src/os/windows/vm/os_windows.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -45,7 +45,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -5258,75 +5258,6 @@
   }
 }
 
-//--------------------------------------------------------------------------------------------------
-// Non-product code
-
-static int mallocDebugIntervalCounter = 0;
-static int mallocDebugCounter = 0;
-
-// For debugging possible bugs inside HeapWalk (a ring buffer)
-#define SAVE_COUNT 8
-static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
-static int saved_heap_entry_index;
-
-bool os::check_heap(bool force) {
-  if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
-  if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
-    // Note: HeapValidate executes two hardware breakpoints when it finds something
-    // wrong; at these points, eax contains the address of the offending block (I think).
-    // To get to the exlicit error message(s) below, just continue twice.
-    //
-    // Note:  we want to check the CRT heap, which is not necessarily located in the
-    // process default heap.
-    HANDLE heap = (HANDLE) _get_heap_handle();
-    if (!heap) {
-      return true;
-    }
-
-    // If we fail to lock the heap, then gflags.exe has been used
-    // or some other special heap flag has been set that prevents
-    // locking. We don't try to walk a heap we can't lock.
-    if (HeapLock(heap) != 0) {
-      PROCESS_HEAP_ENTRY phe;
-      phe.lpData = NULL;
-      memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
-      saved_heap_entry_index = 0;
-      int count = 0;
-
-      while (HeapWalk(heap, &phe) != 0) {
-        count ++;
-        if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
-            !HeapValidate(heap, 0, phe.lpData)) {
-          tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
-          tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
-          HeapUnlock(heap);
-          fatal("corrupted C heap");
-        } else {
-          // Save previous seen entries in a ring buffer. We have seen strange
-          // heap corruption fatal errors that produced mdmp files, but when we load
-          // these mdmp files in WinDBG, "!heap -triage" shows no error.
-          // We can examine the saved_heap_entries[] array in the mdmp file to
-          // diagnose such seemingly spurious errors reported by HeapWalk.
-          saved_heap_entries[saved_heap_entry_index++] = phe;
-          if (saved_heap_entry_index >= SAVE_COUNT) {
-            saved_heap_entry_index = 0;
-          }
-        }
-      }
-      DWORD err = GetLastError();
-      if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED &&
-         (err == ERROR_INVALID_FUNCTION && phe.lpData != NULL)) {
-        HeapUnlock(heap);
-        fatal("heap walk aborted with error %d", err);
-      }
-      HeapUnlock(heap);
-    }
-    mallocDebugIntervalCounter = 0;
-  }
-  return true;
-}
-
-
 bool os::find(address addr, outputStream* st) {
   int offset = -1;
   bool result = false;
--- a/src/os/windows/vm/threadCritical_windows.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os/windows/vm/threadCritical_windows.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
+
+#ifndef _LP64
+#error "Atomic currently only impleneted for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+//
+//   machine barrier instructions:
+//
+//   - ppc_sync            two-way memory barrier, aka fence
+//   - ppc_lwsync          orders  Store|Store,
+//                                  Load|Store,
+//                                  Load|Load,
+//                         but not Store|Load
+//   - ppc_eieio           orders memory accesses for device memory (only)
+//   - ppc_isync           invalidates speculatively executed instructions
+//                         From the POWER ISA 2.06 documentation:
+//                          "[...] an isync instruction prevents the execution of
+//                         instructions following the isync until instructions
+//                         preceding the isync have completed, [...]"
+//                         From IBM's AIX assembler reference:
+//                          "The isync [...] instructions causes the processor to
+//                         refetch any instructions that might have been fetched
+//                         prior to the isync instruction. The instruction isync
+//                         causes the processor to wait for all previous instructions
+//                         to complete. Then any instructions already fetched are
+//                         discarded and instruction processing continues in the
+//                         environment established by the previous instructions."
+//
+//   semantic barrier instructions:
+//   (as defined in orderAccess.hpp)
+//
+//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
+//                                 Load|Store
+//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
+//                                 Load|Load
+//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
+//                                 Load|Store,
+//                                 Load|Load,
+//                                Store|Load
+//
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jbyte)(unsigned char)old_value;
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
--- a/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-#ifndef _LP64
-#error "Atomic currently only impleneted for PPC64"
-#endif
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-//
-//   machine barrier instructions:
-//
-//   - ppc_sync            two-way memory barrier, aka fence
-//   - ppc_lwsync          orders  Store|Store,
-//                                  Load|Store,
-//                                  Load|Load,
-//                         but not Store|Load
-//   - ppc_eieio           orders memory accesses for device memory (only)
-//   - ppc_isync           invalidates speculatively executed instructions
-//                         From the POWER ISA 2.06 documentation:
-//                          "[...] an isync instruction prevents the execution of
-//                         instructions following the isync until instructions
-//                         preceding the isync have completed, [...]"
-//                         From IBM's AIX assembler reference:
-//                          "The isync [...] instructions causes the processor to
-//                         refetch any instructions that might have been fetched
-//                         prior to the isync instruction. The instruction isync
-//                         causes the processor to wait for all previous instructions
-//                         to complete. Then any instructions already fetched are
-//                         discarded and instruction processing continues in the
-//                         environment established by the previous instructions."
-//
-//   semantic barrier instructions:
-//   (as defined in orderAccess.hpp)
-//
-//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
-//                                 Load|Store
-//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
-//                                 Load|Load
-//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
-//                                 Load|Store,
-//                                 Load|Load,
-//                                Store|Load
-//
-
-#define strasm_sync                       "\n  sync    \n"
-#define strasm_lwsync                     "\n  lwsync  \n"
-#define strasm_isync                      "\n  isync   \n"
-#define strasm_release                    strasm_lwsync
-#define strasm_acquire                    strasm_lwsync
-#define strasm_fence                      strasm_sync
-#define strasm_nobarrier                  ""
-#define strasm_nobarrier_clobber_memory   ""
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
-  unsigned int result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: lwarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (jint) result;
-}
-
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
-  long result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: ldarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  // Using 32 bit internally.
-  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
-
-#ifdef VM_LITTLE_ENDIAN
-  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
-#else
-  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
-#endif
-  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
-                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
-                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
-
-  unsigned int old_value, value32;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lbz     %[old_value], 0(%[dest])                  \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* atomic loop */
-    "1:                                                   \n"
-    "   lwarx   %[value32], 0, %[dest_base]               \n"
-    /* extract byte and compare */
-    "   srd     %[old_value], %[value32], %[shift_amount] \n"
-    "   clrldi  %[old_value], %[old_value], 56            \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* replace byte and try to store */
-    "   xor     %[value32], %[xor_value], %[value32]      \n"
-    "   stwcx.  %[value32], 0, %[dest_base]               \n"
-    "   bne-    1b                                        \n"
-    /* exit */
-    "2:                                                   \n"
-    /* out */
-    : [old_value]           "=&r"   (old_value),
-      [value32]             "=&r"   (value32),
-                            "=m"    (*dest),
-                            "=m"    (*dest_base)
-    /* in */
-    : [dest]                "b"     (dest),
-      [dest_base]           "b"     (dest_base),
-      [shift_amount]        "r"     (shift_amount),
-      [masked_compare_val]  "r"     (masked_compare_val),
-      [xor_value]           "r"     (xor_value),
-                            "m"     (*dest),
-                            "m"     (*dest_base)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jbyte)(unsigned char)old_value;
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lwz     %[old_value], 0(%[dest])                \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jint) old_value;
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   ld      %[old_value], 0(%[dest])                \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-#undef strasm_sync
-#undef strasm_lwsync
-#undef strasm_isync
-#undef strasm_release
-#undef strasm_acquire
-#undef strasm_fence
-#undef strasm_nobarrier
-#undef strasm_nobarrier_clobber_memory
-
-#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
+#define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+// Adding a lock prefix to an instruction on MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  jint addend = add_value;
+  int mp = os::is_MP();
+  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (exchange_value)
+                    : "0" (exchange_value), "r" (dest)
+                    : "memory");
+  return exchange_value;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t addend = add_value;
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+  return exchange_value;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return exchange_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+extern "C" {
+  // defined in bsd_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
--- a/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
-#define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (exchange_value)
-                    : "0" (exchange_value), "r" (dest)
-                    : "memory");
-  return exchange_value;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-  return exchange_value;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return exchange_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-extern "C" {
-  // defined in bsd_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
--- a/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
+#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+#ifdef M68K
+
+/*
+ * __m68k_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Returns newval on success and oldval if no exchange happened.
+ * This implementation is processor specific and works on
+ * 68020 68030 68040 and 68060.
+ *
+ * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
+ * instruction.
+ * Using a kernelhelper would be better for arch complete implementation.
+ *
+ */
+
+static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
+  int ret;
+  __asm __volatile ("cas%.l %0,%2,%1"
+                   : "=d" (ret), "+m" (*(ptr))
+                   : "d" (newval), "0" (oldval));
+  return ret;
+}
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int m68k_compare_and_swap(volatile int *ptr,
+                                        int oldval,
+                                        int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until success.
+
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until success.
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
+        return prev;
+    }
+}
+#endif // M68K
+
+#ifdef ARM
+
+/*
+ * __kernel_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ */
+
+typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int arm_compare_and_swap(volatile int *ptr,
+                                       int oldval,
+                                       int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        return prev;
+    }
+}
+#endif // ARM
+
+inline void Atomic::store(jint store_value, volatile jint* dest) {
+#if !defined(ARM) && !defined(M68K)
+  __sync_synchronize();
+#endif
+  *dest = store_value;
+}
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
+#if !defined(ARM) && !defined(M68K)
+  __sync_synchronize();
+#endif
+  *dest = store_value;
+}
+
+inline jint Atomic::add(jint add_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc(volatile jint* dest) {
+  add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::dec(volatile jint* dest) {
+  add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest) {
+  add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  // __sync_lock_test_and_set is a bizarrely named atomic exchange
+  // operation.  Note that some platforms only support this with the
+  // limitation that the only valid value to store is the immediate
+  // constant 1.  There is a test for this in JNI_CreateJavaVM().
+  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  // All atomic operations are expected to be full memory barriers
+  // (see atomic.hpp). However, __sync_lock_test_and_set is not
+  // a full memory barrier, but an acquire barrier. Hence, this added
+  // barrier.
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
+                                 volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value,
+                            volatile jint* dest,
+                            jint compare_value,
+                            cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value,
+                             volatile jlong* dest,
+                             jlong compare_value,
+                             cmpxchg_memory_order order) {
+
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
+                                    volatile intptr_t* dest,
+                                    intptr_t compare_value,
+                                    cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value,
+                                 volatile void* dest,
+                                 void* compare_value,
+                                 cmpxchg_memory_order order) {
+
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  os::atomic_copy64(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, dest);
+}
+
+#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
--- a/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,334 +0,0 @@
-/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
-#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
-  int ret;
-  __asm __volatile ("cas%.l %0,%2,%1"
-                   : "=d" (ret), "+m" (*(ptr))
-                   : "d" (newval), "0" (oldval));
-  return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until success.
-
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until success.
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
-        return prev;
-    }
-}
-#endif // M68K
-
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        return prev;
-    }
-}
-#endif // ARM
-
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  // __sync_lock_test_and_set is a bizarrely named atomic exchange
-  // operation.  Note that some platforms only support this with the
-  // limitation that the only valid value to store is the immediate
-  // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
-  // All atomic operations are expected to be full memory barriers
-  // (see atomic.hpp). However, __sync_lock_test_and_set is not
-  // a full memory barrier, but an acquire barrier. Hence, this added
-  // barrier.
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
-}
-
-#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
+#define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
+
+#include "vm_version_aarch64.hpp"
+
+// Implementation of class atomic
+
+#define FULL_MEM_BARRIER  __sync_synchronize()
+#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+inline jint Atomic::add(jint add_value, volatile jint* dest)
+{
+ return __sync_add_and_fetch(dest, add_value);
+}
+
+inline void Atomic::inc(volatile jint* dest)
+{
+ add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest)
+{
+ add_ptr(1, dest);
+}
+
+inline void Atomic::dec (volatile jint* dest)
+{
+ add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest)
+{
+ add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
+{
+  jint res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
+{
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
+                                        T compare_value, cmpxchg_memory_order order)
+{
+  if (order == memory_order_relaxed) {
+    T value = compare_value;
+    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+    return value;
+  } else {
+    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+{
+ return __sync_add_and_fetch(dest, add_value);
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
+{
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest)
+{
+ add_ptr(1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest)
+{
+ add_ptr(-1, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
+{
+  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
+}
+
+inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
+{
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
--- a/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
-#define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-#include "vm_version_aarch64.hpp"
-
-// Implementation of class atomic
-
-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-inline jint Atomic::add(jint add_value, volatile jint* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void Atomic::inc(volatile jint* dest)
-{
- add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec (volatile jint* dest)
-{
- add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
-{
-  jint res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
-{
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
-                                        T compare_value, cmpxchg_memory_order order)
-{
-  if (order == memory_order_relaxed) {
-    T value = compare_value;
-    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
-                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
-    return value;
-  } else {
-    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
-{
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
-{
-  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
-{
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
--- a/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,7 +26,7 @@
 #ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
 #define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_aarch64.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
+
+#ifndef PPC64
+#error "Atomic currently only implemented for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+//
+// machine barrier instructions:
+//
+// - sync            two-way memory barrier, aka fence
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders memory accesses for device memory (only)
+// - isync           invalidates speculatively executed instructions
+//                   From the POWER ISA 2.06 documentation:
+//                    "[...] an isync instruction prevents the execution of
+//                   instructions following the isync until instructions
+//                   preceding the isync have completed, [...]"
+//                   From IBM's AIX assembler reference:
+//                    "The isync [...] instructions causes the processor to
+//                   refetch any instructions that might have been fetched
+//                   prior to the isync instruction. The instruction isync
+//                   causes the processor to wait for all previous instructions
+//                   to complete. Then any instructions already fetched are
+//                   discarded and instruction processing continues in the
+//                   environment established by the previous instructions."
+//
+// semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jbyte)(unsigned char)old_value;
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
-#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-#ifndef PPC64
-#error "Atomic currently only implemented for PPC64"
-#endif
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-//
-// machine barrier instructions:
-//
-// - sync            two-way memory barrier, aka fence
-// - lwsync          orders  Store|Store,
-//                            Load|Store,
-//                            Load|Load,
-//                   but not Store|Load
-// - eieio           orders memory accesses for device memory (only)
-// - isync           invalidates speculatively executed instructions
-//                   From the POWER ISA 2.06 documentation:
-//                    "[...] an isync instruction prevents the execution of
-//                   instructions following the isync until instructions
-//                   preceding the isync have completed, [...]"
-//                   From IBM's AIX assembler reference:
-//                    "The isync [...] instructions causes the processor to
-//                   refetch any instructions that might have been fetched
-//                   prior to the isync instruction. The instruction isync
-//                   causes the processor to wait for all previous instructions
-//                   to complete. Then any instructions already fetched are
-//                   discarded and instruction processing continues in the
-//                   environment established by the previous instructions."
-//
-// semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-// - release         orders Store|Store,       (maps to lwsync)
-//                           Load|Store
-// - acquire         orders  Load|Store,       (maps to lwsync)
-//                           Load|Load
-// - fence           orders Store|Store,       (maps to sync)
-//                           Load|Store,
-//                           Load|Load,
-//                          Store|Load
-//
-
-#define strasm_sync                       "\n  sync    \n"
-#define strasm_lwsync                     "\n  lwsync  \n"
-#define strasm_isync                      "\n  isync   \n"
-#define strasm_release                    strasm_lwsync
-#define strasm_acquire                    strasm_lwsync
-#define strasm_fence                      strasm_sync
-#define strasm_nobarrier                  ""
-#define strasm_nobarrier_clobber_memory   ""
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
-  unsigned int result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: lwarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (jint) result;
-}
-
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
-  long result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: ldarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  // Using 32 bit internally.
-  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
-
-#ifdef VM_LITTLE_ENDIAN
-  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
-#else
-  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
-#endif
-  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
-                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
-                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
-
-  unsigned int old_value, value32;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lbz     %[old_value], 0(%[dest])                  \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* atomic loop */
-    "1:                                                   \n"
-    "   lwarx   %[value32], 0, %[dest_base]               \n"
-    /* extract byte and compare */
-    "   srd     %[old_value], %[value32], %[shift_amount] \n"
-    "   clrldi  %[old_value], %[old_value], 56            \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* replace byte and try to store */
-    "   xor     %[value32], %[xor_value], %[value32]      \n"
-    "   stwcx.  %[value32], 0, %[dest_base]               \n"
-    "   bne-    1b                                        \n"
-    /* exit */
-    "2:                                                   \n"
-    /* out */
-    : [old_value]           "=&r"   (old_value),
-      [value32]             "=&r"   (value32),
-                            "=m"    (*dest),
-                            "=m"    (*dest_base)
-    /* in */
-    : [dest]                "b"     (dest),
-      [dest_base]           "b"     (dest_base),
-      [shift_amount]        "r"     (shift_amount),
-      [masked_compare_val]  "r"     (masked_compare_val),
-      [xor_value]           "r"     (xor_value),
-                            "m"     (*dest),
-                            "m"     (*dest_base)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jbyte)(unsigned char)old_value;
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lwz     %[old_value], 0(%[dest])                \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jint) old_value;
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   ld      %[old_value], 0(%[dest])                \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-#undef strasm_sync
-#undef strasm_lwsync
-#undef strasm_isync
-#undef strasm_release
-#undef strasm_acquire
-#undef strasm_fence
-#undef strasm_nobarrier
-#undef strasm_nobarrier_clobber_memory
-
-#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
--- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,6 @@
 #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
 #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
 
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
 // Implementation of class atomic
 
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+// Adding a lock prefix to an instruction on MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  jint addend = add_value;
+  int mp = os::is_MP();
+  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (exchange_value)
+                    : "0" (exchange_value), "r" (dest)
+                    : "memory");
+  return exchange_value;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t addend = add_value;
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+  return exchange_value;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return exchange_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+extern "C" {
+  // defined in linux_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
-#define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (exchange_value)
-                    : "0" (exchange_value), "r" (dest)
-                    : "memory");
-  return exchange_value;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-  return exchange_value;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return exchange_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-extern "C" {
-  // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
--- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
+#define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+#ifdef M68K
+
+/*
+ * __m68k_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Returns newval on success and oldval if no exchange happened.
+ * This implementation is processor specific and works on
+ * 68020 68030 68040 and 68060.
+ *
+ * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
+ * instruction.
+ * Using a kernelhelper would be better for arch complete implementation.
+ *
+ */
+
+static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
+  int ret;
+  __asm __volatile ("cas%.l %0,%2,%1"
+                   : "=d" (ret), "+m" (*(ptr))
+                   : "d" (newval), "0" (oldval));
+  return ret;
+}
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int m68k_compare_and_swap(volatile int *ptr,
+                                        int oldval,
+                                        int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until success.
+
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until success.
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
+        return prev;
+    }
+}
+#endif // M68K
+
+#ifdef ARM
+
+/*
+ * __kernel_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ */
+
+typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int arm_compare_and_swap(volatile int *ptr,
+                                       int oldval,
+                                       int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        return prev;
+    }
+}
+#endif // ARM
+
+inline void Atomic::store(jint store_value, volatile jint* dest) {
+  *dest = store_value;
+}
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
+  *dest = store_value;
+}
+
+inline jint Atomic::add(jint add_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc(volatile jint* dest) {
+  add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::dec(volatile jint* dest) {
+  add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest) {
+  add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  // __sync_lock_test_and_set is a bizarrely named atomic exchange
+  // operation.  Note that some platforms only support this with the
+  // limitation that the only valid value to store is the immediate
+  // constant 1.  There is a test for this in JNI_CreateJavaVM().
+  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  // All atomic operations are expected to be full memory barriers
+  // (see atomic.hpp). However, __sync_lock_test_and_set is not
+  // a full memory barrier, but an acquire barrier. Hence, this added
+  // barrier.
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
+                                 volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value,
+                            volatile jint* dest,
+                            jint compare_value,
+                            cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value,
+                             volatile jlong* dest,
+                             jlong compare_value,
+                             cmpxchg_memory_order order) {
+
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
+                                    volatile intptr_t* dest,
+                                    intptr_t compare_value,
+                                    cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value,
+                                 volatile void* dest,
+                                 void* compare_value,
+                                 cmpxchg_memory_order order) {
+
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  os::atomic_copy64(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, dest);
+}
+
+#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
--- a/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,328 +0,0 @@
-/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
-#define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
-  int ret;
-  __asm __volatile ("cas%.l %0,%2,%1"
-                   : "=d" (ret), "+m" (*(ptr))
-                   : "d" (newval), "0" (oldval));
-  return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until success.
-
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until success.
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
-        return prev;
-    }
-}
-#endif // M68K
-
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        return prev;
-    }
-}
-#endif // ARM
-
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-  *dest = store_value;
-}
-
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  // __sync_lock_test_and_set is a bizarrely named atomic exchange
-  // operation.  Note that some platforms only support this with the
-  // limitation that the only valid value to store is the immediate
-  // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
-  // All atomic operations are expected to be full memory barriers
-  // (see atomic.hpp). However, __sync_lock_test_and_set is not
-  // a full memory barrier, but an acquire barrier. Hence, this added
-  // barrier.
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
-}
-
-#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
+#define OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
+inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
+inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
+
+inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
+inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
+inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
+
+
+#ifdef _LP64
+
+inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else
+
+extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
+
+inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
+  _Atomic_move_long_v9(src, dst);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif
+
+#ifdef _GNU_SOURCE
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  intptr_t rv;
+  __asm__ volatile(
+    "1: \n\t"
+    " ld     [%2], %%o2\n\t"
+    " add    %1, %%o2, %%o3\n\t"
+    " cas    [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    1b\n\t"
+    "  nop\n\t"
+    " add    %1, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+  return rv;
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t rv;
+#ifdef _LP64
+  __asm__ volatile(
+    "1: \n\t"
+    " ldx    [%2], %%o2\n\t"
+    " add    %0, %%o2, %%o3\n\t"
+    " casx   [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    %%xcc, 1b\n\t"
+    "  nop\n\t"
+    " add    %0, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+#else //_LP64
+  __asm__ volatile(
+    "1: \n\t"
+    " ld     [%2], %%o2\n\t"
+    " add    %1, %%o2, %%o3\n\t"
+    " cas    [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    1b\n\t"
+    "  nop\n\t"
+    " add    %1, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  intptr_t rv = exchange_value;
+  __asm__ volatile(
+    " swap   [%2],%1\n\t"
+    : "=r" (rv)
+    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
+    : "memory");
+  return rv;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  intptr_t rv = exchange_value;
+#ifdef _LP64
+  __asm__ volatile(
+    "1:\n\t"
+    " mov    %1, %%o3\n\t"
+    " ldx    [%2], %%o2\n\t"
+    " casx   [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    %%xcc, 1b\n\t"
+    "  nop\n\t"
+    " mov    %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (exchange_value), "r" (dest)
+    : "memory", "o2", "o3");
+#else  //_LP64
+  __asm__ volatile(
+    "swap    [%2],%1\n\t"
+    : "=r" (rv)
+    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
+    : "memory");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  jint rv;
+  __asm__ volatile(
+    " cas    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+#ifdef _LP64
+  jlong rv;
+  __asm__ volatile(
+    " casx   [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
+#else  //_LP64
+  volatile jlong_accessor evl, cvl, rv;
+  evl.long_value = exchange_value;
+  cvl.long_value = compare_value;
+
+  __asm__ volatile(
+    " sllx   %2, 32, %2\n\t"
+    " srl    %3, 0,  %3\n\t"
+    " or     %2, %3, %2\n\t"
+    " sllx   %5, 32, %5\n\t"
+    " srl    %6, 0,  %6\n\t"
+    " or     %5, %6, %5\n\t"
+    " casx   [%4], %5, %2\n\t"
+    " srl    %2, 0, %1\n\t"
+    " srlx   %2, 32, %0\n\t"
+    : "=r" (rv.words[0]), "=r" (rv.words[1])
+    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
+    : "memory");
+
+  return rv.long_value;
+#endif  //_LP64
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  intptr_t rv;
+#ifdef _LP64
+  __asm__ volatile(
+    " casx    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+#else  //_LP64
+  __asm__ volatile(
+    " cas     [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
+}
+
+#else // _GNU_SOURCE
+
+#if defined(COMPILER2) || defined(_LP64)
+
+// This is the interface to the atomic instructions in solaris_sparc.il.
+// It's very messy because we need to support v8 and these instructions
+// are illegal there.  When sparc v8 is dropped, we can drop out lots of
+// this code.  Also compiler2 does not support v8 so the conditional code
+// omits the instruction set check.
+
+extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
+extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
+
+extern "C" jint     _Atomic_cas32(jint     exchange_value, volatile jint*     dest, jint     compare_value);
+extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
+extern "C" jlong    _Atomic_casl (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
+
+extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
+extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
+
+
+inline jint     Atomic::add     (jint    add_value, volatile jint*     dest) {
+  return _Atomic_add32(add_value, dest);
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef _LP64
+  return _Atomic_add64(add_value, dest);
+#else  //_LP64
+  return _Atomic_add32(add_value, dest);
+#endif // _LP64
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  return _Atomic_swap32(exchange_value, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+#ifdef _LP64
+  return _Atomic_swap64(exchange_value, dest);
+#else  // _LP64
+  return _Atomic_swap32(exchange_value, dest);
+#endif // _LP64
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cas32(exchange_value, dest, compare_value);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+#ifdef _LP64
+  // Return 64 bit value in %o0
+  return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
+#else  // _LP64
+  // Return 64 bit value in %o0,%o1 by hand
+  return _Atomic_casl(exchange_value, dest, compare_value);
+#endif // _LP64
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+#ifdef _LP64
+  return _Atomic_cas64(exchange_value, dest, compare_value);
+#else  // _LP64
+  return _Atomic_cas32(exchange_value, dest, compare_value);
+#endif // _LP64
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
+}
+
+
+#else // _LP64 || COMPILER2
+
+
+// 32-bit compiler1 only
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  return (*os::atomic_add_func)(add_value, dest);
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  return (*os::atomic_xchg_func)(exchange_value, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+#endif // _LP64 || COMPILER2
+
+#endif // _GNU_SOURCE
+
+#endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
--- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
-#define OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
-
-#ifdef _LP64
-
-inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else
-
-extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
-
-inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
-  _Atomic_move_long_v9(src, dst);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif
-
-#ifdef _GNU_SOURCE
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  intptr_t rv;
-  __asm__ volatile(
-    "1: \n\t"
-    " ld     [%2], %%o2\n\t"
-    " add    %1, %%o2, %%o3\n\t"
-    " cas    [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    1b\n\t"
-    "  nop\n\t"
-    " add    %1, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-  return rv;
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t rv;
-#ifdef _LP64
-  __asm__ volatile(
-    "1: \n\t"
-    " ldx    [%2], %%o2\n\t"
-    " add    %0, %%o2, %%o3\n\t"
-    " casx   [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    %%xcc, 1b\n\t"
-    "  nop\n\t"
-    " add    %0, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-#else //_LP64
-  __asm__ volatile(
-    "1: \n\t"
-    " ld     [%2], %%o2\n\t"
-    " add    %1, %%o2, %%o3\n\t"
-    " cas    [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    1b\n\t"
-    "  nop\n\t"
-    " add    %1, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-#endif // _LP64
-  return rv;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  intptr_t rv = exchange_value;
-  __asm__ volatile(
-    " swap   [%2],%1\n\t"
-    : "=r" (rv)
-    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
-    : "memory");
-  return rv;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  intptr_t rv = exchange_value;
-#ifdef _LP64
-  __asm__ volatile(
-    "1:\n\t"
-    " mov    %1, %%o3\n\t"
-    " ldx    [%2], %%o2\n\t"
-    " casx   [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    %%xcc, 1b\n\t"
-    "  nop\n\t"
-    " mov    %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (exchange_value), "r" (dest)
-    : "memory", "o2", "o3");
-#else  //_LP64
-  __asm__ volatile(
-    "swap    [%2],%1\n\t"
-    : "=r" (rv)
-    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
-    : "memory");
-#endif // _LP64
-  return rv;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  jint rv;
-  __asm__ volatile(
-    " cas    [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-  return rv;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
-  jlong rv;
-  __asm__ volatile(
-    " casx   [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-  return rv;
-#else  //_LP64
-  volatile jlong_accessor evl, cvl, rv;
-  evl.long_value = exchange_value;
-  cvl.long_value = compare_value;
-
-  __asm__ volatile(
-    " sllx   %2, 32, %2\n\t"
-    " srl    %3, 0,  %3\n\t"
-    " or     %2, %3, %2\n\t"
-    " sllx   %5, 32, %5\n\t"
-    " srl    %6, 0,  %6\n\t"
-    " or     %5, %6, %5\n\t"
-    " casx   [%4], %5, %2\n\t"
-    " srl    %2, 0, %1\n\t"
-    " srlx   %2, 32, %0\n\t"
-    : "=r" (rv.words[0]), "=r" (rv.words[1])
-    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
-    : "memory");
-
-  return rv.long_value;
-#endif  //_LP64
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  intptr_t rv;
-#ifdef _LP64
-  __asm__ volatile(
-    " casx    [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-#else  //_LP64
-  __asm__ volatile(
-    " cas     [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-#endif // _LP64
-  return rv;
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
-}
-
-#else // _GNU_SOURCE
-
-#if defined(COMPILER2) || defined(_LP64)
-
-// This is the interface to the atomic instructions in solaris_sparc.il.
-// It's very messy because we need to support v8 and these instructions
-// are illegal there.  When sparc v8 is dropped, we can drop out lots of
-// this code.  Also compiler2 does not support v8 so the conditional code
-// omits the instruction set check.
-
-extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
-extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
-
-extern "C" jint     _Atomic_cas32(jint     exchange_value, volatile jint*     dest, jint     compare_value);
-extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
-extern "C" jlong    _Atomic_casl (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
-
-extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
-extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
-
-
-inline jint     Atomic::add     (jint    add_value, volatile jint*     dest) {
-  return _Atomic_add32(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef _LP64
-  return _Atomic_add64(add_value, dest);
-#else  //_LP64
-  return _Atomic_add32(add_value, dest);
-#endif // _LP64
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return _Atomic_swap32(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-#ifdef _LP64
-  return _Atomic_swap64(exchange_value, dest);
-#else  // _LP64
-  return _Atomic_swap32(exchange_value, dest);
-#endif // _LP64
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cas32(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
-  // Return 64 bit value in %o0
-  return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
-#else  // _LP64
-  // Return 64 bit value in %o0,%o1 by hand
-  return _Atomic_casl(exchange_value, dest, compare_value);
-#endif // _LP64
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
-  return _Atomic_cas64(exchange_value, dest, compare_value);
-#else  // _LP64
-  return _Atomic_cas32(exchange_value, dest, compare_value);
-#endif // _LP64
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
-}
-
-
-#else // _LP64 || COMPILER2
-
-
-// 32-bit compiler1 only
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (*os::atomic_add_func)(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return (*os::atomic_xchg_func)(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-#endif // _LP64 || COMPILER2
-
-#endif // _GNU_SOURCE
-
-#endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
--- a/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
 #define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 
 // Compiler version last used for testing: solaris studio 12u3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
+#define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
+
+#include "runtime/os.hpp"
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
+inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
+inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
+
+inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
+inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
+inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
+
+// For Sun Studio - implementation is in solaris_x86_[32/64].il.
+// For gcc - implementation is just below.
+
+// The lock prefix can be omitted for certain instructions on uniprocessors; to
+// facilitate this, os::is_MP() is passed as an additional argument.  64-bit
+// processors are assumed to be multi-threaded and/or multi-core, so the extra
+// argument is unnecessary.
+#ifndef _LP64
+#define IS_MP_DECL() , int is_mp
+#define IS_MP_ARG()  , (int) os::is_MP()
+#else
+#define IS_MP_DECL()
+#define IS_MP_ARG()
+#endif // _LP64
+
+extern "C" {
+  jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
+  jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
+  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
+                       jbyte compare_value IS_MP_DECL());
+  jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
+                       jint compare_value IS_MP_DECL());
+  jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
+                             jlong compare_value IS_MP_DECL());
+}
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  return _Atomic_add(add_value, dest IS_MP_ARG());
+}
+
+inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
+  return _Atomic_xchg(exchange_value, dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
+extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#ifdef _GNU_SOURCE
+// Add a lock prefix to an instruction on an MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+extern "C" {
+  inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) {
+    jint addend = add_value;
+    __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return addend + add_value;
+  }
+
+#ifdef AMD64
+  inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) {
+    intptr_t addend = add_value;
+    __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+    return addend + add_value;
+  }
+
+  inline jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest) {
+    __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+    return exchange_value;
+  }
+
+#endif // AMD64
+
+  inline jint _Atomic_xchg(jint exchange_value, volatile jint* dest) {
+    __asm__ __volatile__ ("xchgl (%2),%0"
+                          : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+    return exchange_value;
+  }
+
+  inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) {
+    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return exchange_value;
+  }
+
+
+  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
+    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return exchange_value;
+  }
+
+  // This is the interface to the atomic instruction in solaris_i486.s.
+  jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
+
+  inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) {
+#ifdef AMD64
+    __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+    return exchange_value;
+#else
+    return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP());
+
+    #if 0
+    // The code below does not work presumably because of the bug in gcc
+    // The error message says:
+    //   can't find a register in class BREG while reloading asm
+    // However I want to save this code and later replace _Atomic_cmpxchg_long_gcc
+    // with such inline asm code:
+
+    volatile jlong_accessor evl, cvl, rv;
+    evl.long_value = exchange_value;
+    cvl.long_value = compare_value;
+    int mp = os::is_MP();
+
+    __asm__ volatile ("cmp $0, %%esi\n\t"
+       "je 1f \n\t"
+       "lock\n\t"
+       "1: cmpxchg8b (%%edi)\n\t"
+       : "=a"(cvl.words[0]),   "=d"(cvl.words[1])
+       : "a"(cvl.words[0]), "d"(cvl.words[1]),
+         "b"(evl.words[0]), "c"(evl.words[1]),
+         "D"(dest), "S"(mp)
+       :  "cc", "memory");
+    return cvl.long_value;
+    #endif // if 0
+#endif // AMD64
+  }
+}
+#undef LOCK_IF_MP
+
+#endif // _GNU_SOURCE
+
+#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,279 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
-#define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
-// For Sun Studio - implementation is in solaris_x86_[32/64].il.
-// For gcc - implementation is just below.
-
-// The lock prefix can be omitted for certain instructions on uniprocessors; to
-// facilitate this, os::is_MP() is passed as an additional argument.  64-bit
-// processors are assumed to be multi-threaded and/or multi-core, so the extra
-// argument is unnecessary.
-#ifndef _LP64
-#define IS_MP_DECL() , int is_mp
-#define IS_MP_ARG()  , (int) os::is_MP()
-#else
-#define IS_MP_DECL()
-#define IS_MP_ARG()
-#endif // _LP64
-
-extern "C" {
-  jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
-  jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
-  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
-                       jbyte compare_value IS_MP_DECL());
-  jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
-                       jint compare_value IS_MP_DECL());
-  jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
-                             jlong compare_value IS_MP_DECL());
-}
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return _Atomic_add(add_value, dest IS_MP_ARG());
-}
-
-inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
-  return _Atomic_xchg(exchange_value, dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
-}
-
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
-extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#ifdef _GNU_SOURCE
-// Add a lock prefix to an instruction on an MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-extern "C" {
-  inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) {
-    jint addend = add_value;
-    __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-    return addend + add_value;
-  }
-
-#ifdef AMD64
-  inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) {
-    intptr_t addend = add_value;
-    __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-    return addend + add_value;
-  }
-
-  inline jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest) {
-    __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-    return exchange_value;
-  }
-
-#endif // AMD64
-
-  inline jint _Atomic_xchg(jint exchange_value, volatile jint* dest) {
-    __asm__ __volatile__ ("xchgl (%2),%0"
-                          : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-    return exchange_value;
-  }
-
-  inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) {
-    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-    return exchange_value;
-  }
-
-
-  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
-    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-    return exchange_value;
-  }
-
-  // This is the interface to the atomic instruction in solaris_i486.s.
-  jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
-
-  inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) {
-#ifdef AMD64
-    __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-    return exchange_value;
-#else
-    return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP());
-
-    #if 0
-    // The code below does not work presumably because of the bug in gcc
-    // The error message says:
-    //   can't find a register in class BREG while reloading asm
-    // However I want to save this code and later replace _Atomic_cmpxchg_long_gcc
-    // with such inline asm code:
-
-    volatile jlong_accessor evl, cvl, rv;
-    evl.long_value = exchange_value;
-    cvl.long_value = compare_value;
-    int mp = os::is_MP();
-
-    __asm__ volatile ("cmp $0, %%esi\n\t"
-       "je 1f \n\t"
-       "lock\n\t"
-       "1: cmpxchg8b (%%edi)\n\t"
-       : "=a"(cvl.words[0]),   "=d"(cvl.words[1])
-       : "a"(cvl.words[0]), "d"(cvl.words[1]),
-         "b"(evl.words[0]), "c"(evl.words[1]),
-         "D"(dest), "S"(mp)
-       :  "cc", "memory");
-    return cvl.long_value;
-    #endif // if 0
-#endif // AMD64
-  }
-}
-#undef LOCK_IF_MP
-
-#endif // _GNU_SOURCE
-
-#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
--- a/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 #define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -38,7 +38,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
+#define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
+
+#include "runtime/os.hpp"
+
+// The following alternative implementations are needed because
+// Windows 95 doesn't support (some of) the corresponding Windows NT
+// calls. Furthermore, these versions allow inlining in the caller.
+// (More precisely: The documentation for InterlockedExchange says
+// it is supported for Windows 95. However, when single-stepping
+// through the assembly code we cannot step into the routine and
+// when looking at the routine address we see only garbage code.
+// Better safe then sorry!). Was bug 7/31/98 (gri).
+//
+// Performance note: On uniprocessors, the 'lock' prefixes are not
+// necessary (and expensive). We should generate separate cases if
+// this becomes a performance problem.
+
+#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+
+
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+// Adding a lock prefix to an instruction on MP machine
+// VC++ doesn't like the lock prefix to be on a single line
+// so we can't insert a label after the lock prefix.
+// By emitting a lock prefix, we can define a label after it.
+#define LOCK_IF_MP(mp) __asm cmp mp, 0  \
+                       __asm je L0      \
+                       __asm _emit 0xF0 \
+                       __asm L0:
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  return (jint)(*os::atomic_add_func)(add_value, dest);
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  (void)add    (1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  (void)add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  (void)add_ptr(1, dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  (void)add    (-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  (void)add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  (void)add_ptr(-1, dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest;
+    mov eax, add_value;
+    mov ecx, eax;
+    LOCK_IF_MP(mp)
+    xadd dword ptr [edx], eax;
+    add eax, ecx;
+  }
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  // alternative for InterlockedIncrement
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest;
+    LOCK_IF_MP(mp)
+    add dword ptr [edx], 1;
+  }
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  // alternative for InterlockedDecrement
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest;
+    LOCK_IF_MP(mp)
+    sub dword ptr [edx], 1;
+  }
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec((volatile jint*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  // alternative for InterlockedExchange
+  __asm {
+    mov eax, exchange_value;
+    mov ecx, dest;
+    xchg eax, dword ptr [ecx];
+  }
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  // alternative for InterlockedCompareExchange
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest
+    mov cl, exchange_value
+    mov al, compare_value
+    LOCK_IF_MP(mp)
+    cmpxchg byte ptr [edx], cl
+  }
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  // alternative for InterlockedCompareExchange
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest
+    mov ecx, exchange_value
+    mov eax, compare_value
+    LOCK_IF_MP(mp)
+    cmpxchg dword ptr [edx], ecx
+  }
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  jint ex_lo  = (jint)exchange_value;
+  jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
+  jint cmp_lo = (jint)compare_value;
+  jint cmp_hi = *( ((jint*)&compare_value) + 1 );
+  __asm {
+    push ebx
+    push edi
+    mov eax, cmp_lo
+    mov edx, cmp_hi
+    mov edi, dest
+    mov ebx, ex_lo
+    mov ecx, ex_hi
+    LOCK_IF_MP(mp)
+    cmpxchg8b qword ptr [edi]
+    pop edi
+    pop ebx
+  }
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  volatile jlong* pdest = &dest;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, pdest
+    fistp    qword ptr [eax]
+  }
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  volatile jlong* src = &store_value;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, dest
+    fistp    qword ptr [eax]
+  }
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic::store(store_value, (volatile jlong*)dest);
+}
+
+#endif // AMD64
+
+#pragma warning(default: 4035) // Enables warnings reporting missing return statement
+
+#endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,304 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
-#define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// The following alternative implementations are needed because
-// Windows 95 doesn't support (some of) the corresponding Windows NT
-// calls. Furthermore, these versions allow inlining in the caller.
-// (More precisely: The documentation for InterlockedExchange says
-// it is supported for Windows 95. However, when single-stepping
-// through the assembly code we cannot step into the routine and
-// when looking at the routine address we see only garbage code.
-// Better safe then sorry!). Was bug 7/31/98 (gri).
-//
-// Performance note: On uniprocessors, the 'lock' prefixes are not
-// necessary (and expensive). We should generate separate cases if
-// this becomes a performance problem.
-
-#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-// Adding a lock prefix to an instruction on MP machine
-// VC++ doesn't like the lock prefix to be on a single line
-// so we can't insert a label after the lock prefix.
-// By emitting a lock prefix, we can define a label after it.
-#define LOCK_IF_MP(mp) __asm cmp mp, 0  \
-                       __asm je L0      \
-                       __asm _emit 0xF0 \
-                       __asm L0:
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_add_func)(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  (void)add    (1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  (void)add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  (void)add_ptr(1, dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  (void)add    (-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  (void)add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  (void)add_ptr(-1, dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest;
-    mov eax, add_value;
-    mov ecx, eax;
-    LOCK_IF_MP(mp)
-    xadd dword ptr [edx], eax;
-    add eax, ecx;
-  }
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  // alternative for InterlockedIncrement
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest;
-    LOCK_IF_MP(mp)
-    add dword ptr [edx], 1;
-  }
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  // alternative for InterlockedDecrement
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest;
-    LOCK_IF_MP(mp)
-    sub dword ptr [edx], 1;
-  }
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec((volatile jint*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  // alternative for InterlockedExchange
-  __asm {
-    mov eax, exchange_value;
-    mov ecx, dest;
-    xchg eax, dword ptr [ecx];
-  }
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  // alternative for InterlockedCompareExchange
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest
-    mov cl, exchange_value
-    mov al, compare_value
-    LOCK_IF_MP(mp)
-    cmpxchg byte ptr [edx], cl
-  }
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  // alternative for InterlockedCompareExchange
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest
-    mov ecx, exchange_value
-    mov eax, compare_value
-    LOCK_IF_MP(mp)
-    cmpxchg dword ptr [edx], ecx
-  }
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  jint ex_lo  = (jint)exchange_value;
-  jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
-  jint cmp_lo = (jint)compare_value;
-  jint cmp_hi = *( ((jint*)&compare_value) + 1 );
-  __asm {
-    push ebx
-    push edi
-    mov eax, cmp_lo
-    mov edx, cmp_hi
-    mov edi, dest
-    mov ebx, ex_lo
-    mov ecx, ex_hi
-    LOCK_IF_MP(mp)
-    cmpxchg8b qword ptr [edi]
-    pop edi
-    pop ebx
-  }
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  volatile jlong* pdest = &dest;
-  __asm {
-    mov eax, src
-    fild     qword ptr [eax]
-    mov eax, pdest
-    fistp    qword ptr [eax]
-  }
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  volatile jlong* src = &store_value;
-  __asm {
-    mov eax, src
-    fild     qword ptr [eax]
-    mov eax, dest
-    fistp    qword ptr [eax]
-  }
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic::store(store_value, (volatile jlong*)dest);
-}
-
-#endif // AMD64
-
-#pragma warning(default: 4035) // Enables warnings reporting missing return statement
-
-#endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
--- a/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 
 #include <intrin.h>
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- a/src/share/vm/asm/assembler.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/asm/assembler.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "asm/codeBuffer.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -4266,7 +4266,7 @@
 #if INCLUDE_TRACE
   EventCompilerInlining event;
   if (event.should_commit()) {
-    event.set_compileID(compilation()->env()->task()->compile_id());
+    event.set_compileId(compilation()->env()->task()->compile_id());
     event.set_message(msg);
     event.set_succeeded(success);
     event.set_bci(bci());
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -49,7 +49,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/src/share/vm/ci/ciEnv.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1149,10 +1149,10 @@
 
 void ciEnv::report_failure(const char* reason) {
   // Create and fire JFR event
-  EventCompilerFailure event;
+  EventCompilationFailure event;
   if (event.should_commit()) {
-    event.set_compileID(compile_id());
-    event.set_failure(reason);
+    event.set_compileId(compile_id());
+    event.set_failureMessage(reason);
     event.commit();
   }
 }
--- a/src/share/vm/ci/ciMethod.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/ci/ciMethod.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1410,11 +1410,11 @@
 }
 
 #if INCLUDE_TRACE
-TraceStructCiMethod ciMethod::to_trace_struct() const {
-  TraceStructCiMethod result;
-  result.set_class(holder()->name()->as_utf8());
+TraceStructCalleeMethod ciMethod::to_trace_struct() const {
+  TraceStructCalleeMethod result;
+  result.set_type(holder()->name()->as_utf8());
   result.set_name(name()->as_utf8());
-  result.set_signature(signature()->as_symbol()->as_utf8());
+  result.set_descriptor(signature()->as_symbol()->as_utf8());
   return result;
 }
 #endif
--- a/src/share/vm/ci/ciMethod.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/ci/ciMethod.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -342,7 +342,7 @@
   void print_short_name(outputStream* st = tty);
 
 #if INCLUDE_TRACE
-  TraceStructCiMethod to_trace_struct() const;
+  TraceStructCalleeMethod to_trace_struct() const;
 #endif
 };
 
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -5402,6 +5402,17 @@
   debug_only(ik->verify();)
 }
 
+static bool relax_format_check_for(ClassLoaderData* loader_data) {
+  bool trusted = (loader_data->is_the_null_class_loader_data() ||
+                  SystemDictionary::is_platform_class_loader(loader_data->class_loader()));
+  bool need_verify =
+    // verifyAll
+    (BytecodeVerificationLocal && BytecodeVerificationRemote) ||
+    // verifyRemote
+    (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted);
+  return !need_verify;
+}
+
 ClassFileParser::ClassFileParser(ClassFileStream* stream,
                                  Symbol* name,
                                  ClassLoaderData* loader_data,
@@ -5490,7 +5501,7 @@
 
   // Check if verification needs to be relaxed for this class file
   // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
-  _relax_verify = Verifier::relax_verify_for(_loader_data->class_loader());
+  _relax_verify = relax_format_check_for(_loader_data);
 
   parse_stream(stream, CHECK);
 
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/classLoaderData.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -63,7 +63,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutex.hpp"
--- a/src/share/vm/classfile/klassFactory.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/klassFactory.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,12 +31,12 @@
 #include "prims/jvmtiEnvBase.hpp"
 #include "trace/traceMacros.hpp"
 
-static ClassFileStream* prologue(ClassFileStream* stream,
-                                 Symbol* name,
-                                 ClassLoaderData* loader_data,
-                                 Handle protection_domain,
-                                 JvmtiCachedClassFileData** cached_class_file,
-                                 TRAPS) {
+static ClassFileStream* check_class_file_load_hook(ClassFileStream* stream,
+                                                   Symbol* name,
+                                                   ClassLoaderData* loader_data,
+                                                   Handle protection_domain,
+                                                   JvmtiCachedClassFileData** cached_class_file,
+                                                   TRAPS) {
 
   assert(stream != NULL, "invariant");
 
@@ -102,8 +102,6 @@
   assert(loader_data != NULL, "invariant");
   assert(THREAD->is_Java_thread(), "must be a JavaThread");
 
-  bool changed_by_loadhook = false;
-
   ResourceMark rm;
   HandleMark hm;
 
@@ -111,12 +109,15 @@
 
   ClassFileStream* old_stream = stream;
 
-  stream = prologue(stream,
-                    name,
-                    loader_data,
-                    protection_domain,
-                    &cached_class_file,
-                    CHECK_NULL);
+  // Skip this processing for VM anonymous classes
+  if (host_klass == NULL) {
+    stream = check_class_file_load_hook(stream,
+                                        name,
+                                        loader_data,
+                                        protection_domain,
+                                        &cached_class_file,
+                                        CHECK_NULL);
+  }
 
   ClassFileParser parser(stream,
                          name,
--- a/src/share/vm/classfile/stringTable.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/stringTable.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -34,7 +34,7 @@
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/classfile/symbolTable.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/symbolTable.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -34,7 +34,7 @@
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
 
--- a/src/share/vm/classfile/systemDictionary.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1641,7 +1641,6 @@
       JvmtiExport::post_class_load((JavaThread *) THREAD, k());
 
   }
-  TRACE_KLASS_DEFINITION(k, THREAD);
   class_define_event(k);
 }
 
--- a/src/share/vm/classfile/verifier.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/verifier.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -88,7 +88,7 @@
     BytecodeVerificationLocal : BytecodeVerificationRemote;
 }
 
-bool Verifier::relax_verify_for(oop loader) {
+bool Verifier::relax_access_for(oop loader) {
   bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
   bool need_verify =
     // verifyAll
--- a/src/share/vm/classfile/verifier.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/classfile/verifier.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,8 @@
   // -Xverify:all/none override this value
   static bool should_verify_for(oop class_loader, bool should_verify_class);
 
-  // Relax certain verifier checks to enable some broken 1.1 apps to run on 1.2.
-  static bool relax_verify_for(oop class_loader);
+  // Relax certain access checks to enable some broken 1.1 apps to run on 1.2.
+  static bool relax_access_for(oop class_loader);
 
   // Print output for class+resolve
   static void trace_class_resolution(Klass* resolve_class, InstanceKlass* verify_class);
--- a/src/share/vm/code/nmethod.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/code/nmethod.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -41,7 +41,7 @@
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/nativeLookup.hpp"
 #include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -1755,7 +1755,7 @@
   assert(task->compile_id() != CICrashAt, "just as planned");
   if (event.should_commit()) {
     event.set_method(task->method());
-    event.set_compileID(task->compile_id());
+    event.set_compileId(task->compile_id());
     event.set_compileLevel(task->comp_level());
     event.set_succeded(task->is_success());
     event.set_isOsr(task->osr_bci() != CompileBroker::standard_entry_bci);
@@ -2399,4 +2399,3 @@
     }
   }
 }
-
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -61,7 +61,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
--- a/src/share/vm/gc/cms/parNewGeneration.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/cms/parNewGeneration.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -50,7 +50,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
--- a/src/share/vm/gc/g1/collectionSetChooser.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/collectionSetChooser.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -26,7 +26,7 @@
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/shared/space.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 // Even though we don't use the GC efficiency in our heuristics as
 // much as we used to, we still order according to GC efficiency. This
--- a/src/share/vm/gc/g1/dirtyCardQueue.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/dirtyCardQueue.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -27,7 +27,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/workgroup.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/share/vm/gc/g1/g1Analytics.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1Analytics.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -316,14 +316,10 @@
   return get_new_size_prediction(_pending_cards_seq);
 }
 
-double G1Analytics::oldest_known_gc_end_time_sec() const {
+double G1Analytics::last_known_gc_end_time_sec() const {
   return _recent_prev_end_times_for_all_gcs_sec->oldest();
 }
 
-double G1Analytics::last_known_gc_end_time_sec() const {
-  return _recent_prev_end_times_for_all_gcs_sec->last();
-}
-
 void G1Analytics::update_recent_gc_times(double end_time_sec,
                                          double pause_time_ms) {
   _recent_gc_times_ms->add(pause_time_ms);
--- a/src/share/vm/gc/g1/g1Analytics.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1Analytics.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -155,7 +155,6 @@
   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
   void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
 
-  double oldest_known_gc_end_time_sec() const;
   double last_known_gc_end_time_sec() const;
 };
 
--- a/src/share/vm/gc/g1/g1CardLiveData.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1CardLiveData.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
 #include "utilities/bitMap.inline.hpp"
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -28,7 +28,6 @@
 #include "classfile/symbolTable.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
-#include "gc/g1/g1Analytics.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
@@ -75,7 +74,7 @@
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
@@ -2474,19 +2473,8 @@
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
-  jlong now = os::elapsed_counter() / NANOSECS_PER_MILLISEC;
-  const G1Analytics* analytics = _g1_policy->analytics();
-  double last = analytics->last_known_gc_end_time_sec();
-  jlong ret_val = now - (last * 1000);
-  if (ret_val < 0) {
-    // See the notes in GenCollectedHeap::millis_since_last_gc()
-    // for more information about the implementation.
-    log_warning(gc)("Detected clock going backwards. "
-      "Milliseconds since last GC would be " JLONG_FORMAT
-      ". returning zero instead.", ret_val);
-    return 0;
-  }
-  return ret_val;
+  // assert(false, "NYI");
+  return 0;
 }
 
 void G1CollectedHeap::prepare_for_verify() {
--- a/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -52,7 +52,7 @@
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/prefetch.inline.hpp"
--- a/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -604,7 +604,7 @@
     _analytics->report_alloc_rate_ms(alloc_rate_ms);
 
     double interval_ms =
-      (end_time_sec - _analytics->oldest_known_gc_end_time_sec()) * 1000.0;
+      (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
     _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
   }
--- a/src/share/vm/gc/g1/g1EvacStats.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1EvacStats.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP
 
 #include "gc/g1/g1EvacStats.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 inline void G1EvacStats::add_direct_allocated(size_t value) {
   Atomic::add_ptr(value, &_direct_allocated);
--- a/src/share/vm/gc/g1/g1HotCardCache.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1HotCardCache.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
--- a/src/share/vm/gc/g1/g1MarkSweep.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/synchronizer.hpp"
--- a/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/share/vm/gc/g1/g1StringDedup.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1StringDedup.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,7 +31,7 @@
 #include "gc/g1/g1StringDedupStat.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 bool G1StringDedup::_enabled = false;
 
--- a/src/share/vm/gc/g1/g1StringDedup.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1StringDedup.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,6 @@
 
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
 
 class OopClosure;
 class BoolObjectClosure;
--- a/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/stack.inline.hpp"
 
--- a/src/share/vm/gc/g1/g1StringDedupThread.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/g1StringDedupThread.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,7 +31,7 @@
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 G1StringDedupThread* G1StringDedupThread::_thread = NULL;
 
--- a/src/share/vm/gc/g1/heapRegion.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/heapRegion.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -39,7 +39,7 @@
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 
 int    HeapRegion::LogOfHRGrainBytes = 0;
--- a/src/share/vm/gc/g1/heapRegion.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/heapRegion.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/space.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
                                                   size_t desired_word_size,
--- a/src/share/vm/gc/g1/heapRegionRemSet.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/heapRegionRemSet.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -33,7 +33,7 @@
 #include "memory/allocation.hpp"
 #include "memory/padded.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
--- a/src/share/vm/gc/g1/heapRegionTracer.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/heapRegionTracer.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -39,7 +39,7 @@
     e.set_to(to);
     e.set_start(start);
     e.set_used(used);
-    e.set_allocContext(allocationContext);
+    e.set_allocationContext(allocationContext);
     e.commit();
   }
 }
--- a/src/share/vm/gc/g1/sparsePRT.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/g1/sparsePRT.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 
 // Check that the size of the SparsePRTEntry is evenly divisible by the maximum
--- a/src/share/vm/gc/parallel/gcTaskThread.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/parallel/gcTaskThread.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
--- a/src/share/vm/gc/parallel/mutableNUMASpace.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/parallel/mutableNUMASpace.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,6 +1,5 @@
-
 /*
- * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +27,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 
 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
--- a/src/share/vm/gc/parallel/mutableSpace.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/parallel/mutableSpace.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "gc/parallel/mutableSpace.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/gc/parallel/parMarkBitMap.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/parallel/parMarkBitMap.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -27,7 +27,7 @@
 #include "gc/parallel/psCompactionManager.inline.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/bitMap.inline.hpp"
--- a/src/share/vm/gc/parallel/psCompactionManager.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/parallel/psCompactionManager.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -38,7 +38,7 @@
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 PSOldGen*            ParCompactionManager::_old_gen = NULL;
 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
--- a/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -57,7 +57,7 @@
 #include "oops/methodData.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/serial/defNewGeneration.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -46,7 +46,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/share/vm/gc/shared/allocTracer.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/allocTracer.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -29,18 +29,18 @@
 #include "utilities/globalDefinitions.hpp"
 
 void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) {
-  EventAllocObjectOutsideTLAB event;
+  EventObjectAllocationOutsideTLAB event;
   if (event.should_commit()) {
-    event.set_class(klass());
+    event.set_objectClass(klass());
     event.set_allocationSize(alloc_size);
     event.commit();
   }
 }
 
 void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) {
-  EventAllocObjectInNewTLAB event;
+  EventObjectAllocationInNewTLAB event;
   if (event.should_commit()) {
-    event.set_class(klass());
+    event.set_objectClass(klass());
     event.set_allocationSize(alloc_size);
     event.set_tlabSize(tlab_size);
     event.commit();
--- a/src/share/vm/gc/shared/cardTableRS.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/cardTableRS.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -29,7 +29,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/gc/shared/gcLocker.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/gcLocker.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 
 volatile jint GCLocker::_jni_lock_count = 0;
--- a/src/share/vm/gc/shared/gcTraceSend.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/gcTraceSend.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -43,7 +43,7 @@
 typedef uintptr_t TraceAddress;
 
 void GCTracer::send_garbage_collection_event() const {
-  EventGCGarbageCollection event(UNTIMED);
+  EventGarbageCollection event(UNTIMED);
   if (event.should_commit()) {
     event.set_gcId(GCId::current());
     event.set_name(_shared_gc_info.name());
@@ -91,7 +91,7 @@
 }
 
 void ParallelOldTracer::send_parallel_old_event() const {
-  EventGCParallelOld e(UNTIMED);
+  EventParallelOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
@@ -102,7 +102,7 @@
 }
 
 void YoungGCTracer::send_young_gc_event() const {
-  EventGCYoungGarbageCollection e(UNTIMED);
+  EventYoungGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_tenuringThreshold(_tenuring_threshold);
@@ -127,7 +127,7 @@
   EventPromoteObjectInNewPLAB event;
   if (event.should_commit()) {
     event.set_gcId(GCId::current());
-    event.set_class(klass);
+    event.set_objectClass(klass);
     event.set_objectSize(obj_size);
     event.set_tenured(tenured);
     event.set_tenuringAge(age);
@@ -142,7 +142,7 @@
   EventPromoteObjectOutsidePLAB event;
   if (event.should_commit()) {
     event.set_gcId(GCId::current());
-    event.set_class(klass);
+    event.set_objectClass(klass);
     event.set_objectSize(obj_size);
     event.set_tenured(tenured);
     event.set_tenuringAge(age);
@@ -151,7 +151,7 @@
 }
 
 void OldGCTracer::send_old_gc_event() const {
-  EventGCOldGarbageCollection e(UNTIMED);
+  EventOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_starttime(_shared_gc_info.start_timestamp());
@@ -173,7 +173,7 @@
   EventPromotionFailed e;
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
-    e.set_data(to_trace_struct(pf_info));
+    e.set_promotionFailed(to_trace_struct(pf_info));
     e.set_thread(pf_info.thread_trace_id());
     e.commit();
   }
@@ -190,7 +190,7 @@
 
 #if INCLUDE_ALL_GCS
 void G1NewTracer::send_g1_young_gc_event() {
-  EventGCG1GarbageCollection e(UNTIMED);
+  EventG1GarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_type(_g1_young_gc_info.type());
@@ -201,7 +201,7 @@
 }
 
 void G1MMUTracer::send_g1_mmu_event(double timeSlice, double gcTime, double maxTime) {
-  EventGCG1MMU e;
+  EventG1MMU e;
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_timeSlice(timeSlice);
@@ -212,15 +212,15 @@
 }
 
 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
-  EventEvacuationInfo e;
+  EventEvacuationInformation e;
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_cSetRegions(info->collectionset_regions());
     e.set_cSetUsedBefore(info->collectionset_used_before());
     e.set_cSetUsedAfter(info->collectionset_used_after());
     e.set_allocationRegions(info->allocation_regions());
-    e.set_allocRegionsUsedBefore(info->alloc_regions_used_before());
-    e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
+    e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before());
+    e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
     e.set_bytesCopied(info->bytes_copied());
     e.set_regionsFreed(info->regions_freed());
     e.commit();
@@ -231,13 +231,14 @@
   EventEvacuationFailed e;
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
-    e.set_data(to_trace_struct(ef_info));
+    e.set_evacuationFailed(to_trace_struct(ef_info));
     e.commit();
   }
 }
 
-static TraceStructG1EvacStats create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
-  TraceStructG1EvacStats s;
+static TraceStructG1EvacuationStatistics
+create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
+  TraceStructG1EvacuationStatistics s;
   s.set_gcId(gcid);
   s.set_allocated(summary.allocated() * HeapWordSize);
   s.set_wasted(summary.wasted() * HeapWordSize);
@@ -252,17 +253,17 @@
 }
 
 void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
-  EventGCG1EvacuationYoungStatistics surv_evt;
+  EventG1EvacuationYoungStatistics surv_evt;
   if (surv_evt.should_commit()) {
-    surv_evt.set_stats(create_g1_evacstats(GCId::current(), summary));
+    surv_evt.set_statistics(create_g1_evacstats(GCId::current(), summary));
     surv_evt.commit();
   }
 }
 
 void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
-  EventGCG1EvacuationOldStatistics old_evt;
+  EventG1EvacuationOldStatistics old_evt;
   if (old_evt.should_commit()) {
-    old_evt.set_stats(create_g1_evacstats(GCId::current(), summary));
+    old_evt.set_statistics(create_g1_evacstats(GCId::current(), summary));
     old_evt.commit();
   }
 }
@@ -273,7 +274,7 @@
                                              size_t last_allocation_size,
                                              double last_allocation_duration,
                                              double last_marking_length) {
-  EventGCG1BasicIHOP evt;
+  EventG1BasicIHOP evt;
   if (evt.should_commit()) {
     evt.set_gcId(GCId::current());
     evt.set_threshold(threshold);
@@ -295,7 +296,7 @@
                                                 double predicted_allocation_rate,
                                                 double predicted_marking_length,
                                                 bool prediction_active) {
-  EventGCG1AdaptiveIHOP evt;
+  EventG1AdaptiveIHOP evt;
   if (evt.should_commit()) {
     evt.set_gcId(GCId::current());
     evt.set_threshold(threshold);
--- a/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/genCollectedHeap.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1256,21 +1256,21 @@
 };
 
 jlong GenCollectedHeap::millis_since_last_gc() {
-  // javaTimeNanos() is guaranteed to be monotonically non-decreasing
-  // provided the underlying platform provides such a time source
-  // (and it is bug free). So we still have to guard against getting
-  // back a time later than 'now'.
+  // We need a monotonically non-decreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   GenTimeOfLastGCClosure tolgc_cl(now);
   // iterate over generations getting the oldest
   // time that a generation was collected
   generation_iterate(&tolgc_cl, false);
 
+  // javaTimeNanos() is guaranteed to be monotonically non-decreasing
+  // provided the underlying platform provides such a time source
+  // (and it is bug free). So we still have to guard against getting
+  // back a time later than 'now'.
   jlong retVal = now - tolgc_cl.time();
   if (retVal < 0) {
-    log_warning(gc)("Detected clock going backwards. "
-      "Milliseconds since last GC would be " JLONG_FORMAT
-      ". returning zero instead.", retVal);
+    NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, retVal);)
     return 0;
   }
   return retVal;
--- a/src/share/vm/gc/shared/objectCountEventSender.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/objectCountEventSender.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -40,7 +40,7 @@
 
   EventObjectCountAfterGC event(UNTIMED);
   event.set_gcId(GCId::current());
-  event.set_class(entry->klass());
+  event.set_objectClass(entry->klass());
   event.set_count(entry->count());
   event.set_totalSize(entry->words() * BytesPerWord);
   event.set_endtime(timestamp);
--- a/src/share/vm/gc/shared/plab.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/plab.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/plab.hpp"
 #include "memory/allocation.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 inline HeapWord* PLAB::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
   HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes);
--- a/src/share/vm/gc/shared/space.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/space.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
 #include "gc/shared/spaceDecorator.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
--- a/src/share/vm/gc/shared/taskqueue.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/taskqueue.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "gc/shared/taskqueue.hpp"
 #include "oops/oop.inline.hpp"
 #include "logging/log.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/debug.hpp"
--- a/src/share/vm/gc/shared/taskqueue.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/taskqueue.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc/shared/taskqueue.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/stack.inline.hpp"
--- a/src/share/vm/gc/shared/workgroup.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/gc/shared/workgroup.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -28,7 +28,7 @@
 #include "gc/shared/workerManager.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/semaphore.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -39,7 +39,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -48,7 +48,7 @@
 #include "oops/symbol.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "prims/nativeLookup.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
--- a/src/share/vm/logging/logDecorations.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/logging/logDecorations.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -53,10 +53,6 @@
 
   LogDecorations(LogLevelType level, const LogTagSet& tagset, const LogDecorators& decorators);
 
-  LogLevelType level() const {
-    return _level;
-  }
-
   void set_level(LogLevelType level) {
     _level = level;
   }
--- a/src/share/vm/logging/logOutputList.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/logging/logOutputList.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #include "logging/logLevel.hpp"
 #include "logging/logOutputList.hpp"
 #include "memory/allocation.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 
--- a/src/share/vm/logging/logOutputList.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/logging/logOutputList.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 
 #include "logging/logLevel.hpp"
 #include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class LogOutput;
@@ -61,6 +60,11 @@
   void add_output(LogOutput* output, LogLevelType level);
   void update_output_level(LogOutputNode* node, LogLevelType level);
 
+  // Bookkeeping functions to keep track of number of active readers/iterators for the list.
+  jint increase_readers();
+  jint decrease_readers();
+  void wait_until_no_readers() const;
+
  public:
   LogOutputList() : _active_readers(0) {
     for (size_t i = 0; i < LogLevel::Count; i++) {
@@ -84,11 +88,6 @@
   // Set (add/update/remove) the output to the specified level.
   void set_output_level(LogOutput* output, LogLevelType level);
 
-  // Bookkeeping functions to keep track of number of active readers/iterators for the list.
-  jint increase_readers();
-  jint decrease_readers();
-  void wait_until_no_readers() const;
-
   class Iterator VALUE_OBJ_CLASS_SPEC {
     friend class LogOutputList;
    private:
--- a/src/share/vm/memory/allocation.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/memory/allocation.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/task.hpp"
 #include "runtime/threadCritical.hpp"
--- a/src/share/vm/memory/allocation.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/memory/allocation.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
 #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/globalDefinitions.hpp"
--- a/src/share/vm/memory/metaspace.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -37,7 +37,7 @@
 #include "memory/metaspaceTracer.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
--- a/src/share/vm/memory/universe.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/memory/universe.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -56,7 +56,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayKlass.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/commandLineFlagConstraintList.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/fprofiler.hpp"
@@ -1129,8 +1129,6 @@
       verify_flags |= Verify_MetaspaceAux;
     } else if (strcmp(token, "jni_handles") == 0) {
       verify_flags |= Verify_JNIHandles;
-    } else if (strcmp(token, "c-heap") == 0) {
-      verify_flags |= Verify_CHeap;
     } else if (strcmp(token, "codecache_oops") == 0) {
       verify_flags |= Verify_CodeCacheOops;
     } else {
@@ -1208,10 +1206,6 @@
     log_debug(gc, verify)("JNIHandles");
     JNIHandles::verify();
   }
-  if (should_verify_subset(Verify_CHeap)) {
-    log_debug(gc, verify)("C-heap");
-    os::check_heap();
-  }
   if (should_verify_subset(Verify_CodeCacheOops)) {
     log_debug(gc, verify)("CodeCache Oops");
     CodeCache::verify_oops();
--- a/src/share/vm/memory/universe.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/memory/universe.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -480,8 +480,7 @@
     Verify_ClassLoaderDataGraph = 64,
     Verify_MetaspaceAux = 128,
     Verify_JNIHandles = 256,
-    Verify_CHeap = 512,
-    Verify_CodeCacheOops = 1024,
+    Verify_CodeCacheOops = 512,
     Verify_All = -1
   };
   static void initialize_verify_flags();
--- a/src/share/vm/oops/compiledICHolder.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/compiledICHolder.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "oops/compiledICHolder.hpp"
 #include "oops/klass.hpp"
 #include "oops/method.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 volatile int CompiledICHolder::_live_count;
 volatile int CompiledICHolder::_live_not_claimed_count;
--- a/src/share/vm/oops/cpCache.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/cpCache.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -32,7 +32,7 @@
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/methodHandles.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/oops/instanceKlass.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -56,7 +56,7 @@
 #include "prims/jvmtiRedefineClasses.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodComparator.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/fieldDescriptor.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
--- a/src/share/vm/oops/klass.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/klass.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -36,7 +36,7 @@
 #include "oops/instanceKlass.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/oops/oop.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/oop.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -36,7 +36,7 @@
 #include "oops/klass.inline.hpp"
 #include "oops/markOop.inline.hpp"
 #include "oops/oop.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/oops/symbol.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/symbol.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -29,7 +29,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/symbol.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 
 Symbol::Symbol(const u1* name, int length, int refcount) {
@@ -229,24 +229,25 @@
 }
 
 void Symbol::increment_refcount() {
-  // Only increment the refcount if positive.  If negative either
+  // Only increment the refcount if non-negative.  If negative either
   // overflow has occurred or it is a permanent symbol in a read only
   // shared archive.
-  if (_refcount >= 0) {
+  if (_refcount >= 0) { // not a permanent symbol
     Atomic::inc(&_refcount);
     NOT_PRODUCT(Atomic::inc(&_total_count);)
   }
 }
 
 void Symbol::decrement_refcount() {
-  if (_refcount >= 0) {
-    Atomic::dec(&_refcount);
+  if (_refcount >= 0) { // not a permanent symbol
+    jshort new_value = Atomic::add(-1, &_refcount);
 #ifdef ASSERT
-    if (_refcount < 0) {
+    if (new_value == -1) { // we have transitioned from 0 -> -1
       print();
       assert(false, "reference count underflow for symbol");
     }
 #endif
+    (void)new_value;
   }
 }
 
--- a/src/share/vm/oops/symbol.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/oops/symbol.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -26,8 +26,8 @@
 #define SHARE_VM_OOPS_SYMBOL_HPP
 
 #include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
 #include "utilities/exceptions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/utf8.hpp"
 
 // A Symbol is a canonicalized string.
--- a/src/share/vm/opto/bytecodeInfo.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -508,7 +508,7 @@
 #if INCLUDE_TRACE
   EventCompilerInlining event;
   if (event.should_commit()) {
-    event.set_compileID(C->compile_id());
+    event.set_compileId(C->compile_id());
     event.set_message(inline_msg);
     event.set_succeeded(success);
     event.set_bci(caller_bci);
--- a/src/share/vm/opto/compile.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/opto/compile.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -729,7 +729,7 @@
     if (event.should_commit()) {
       event.set_starttime(C->_latest_stage_start_counter);
       event.set_phase((u1) cpt);
-      event.set_compileID(C->_compile_id);
+      event.set_compileId(C->_compile_id);
       event.set_phaseLevel(level);
       event.commit();
     }
@@ -748,7 +748,7 @@
     if (event.should_commit()) {
       event.set_starttime(C->_latest_stage_start_counter);
       event.set_phase((u1) PHASE_END);
-      event.set_compileID(C->_compile_id);
+      event.set_compileId(C->_compile_id);
       event.set_phaseLevel(level);
       event.commit();
     }
--- a/src/share/vm/opto/runtime.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/opto/runtime.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -60,7 +60,7 @@
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
 #include "prims/jvmtiThreadState.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/src/share/vm/prims/jni.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/prims/jni.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -59,7 +59,7 @@
 #include "prims/jvm_misc.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/fieldDescriptor.hpp"
 #include "runtime/fprofiler.hpp"
--- a/src/share/vm/prims/jvm.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/prims/jvm.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -54,7 +54,7 @@
 #include "prims/privilegedStack.hpp"
 #include "prims/stackwalk.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/src/share/vm/prims/jvmtiImpl.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
 #include "prims/jvmtiEventController.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/share/vm/prims/jvmtiRawMonitor.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/prims/jvmtiRawMonitor.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "prims/jvmtiRawMonitor.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -3119,6 +3119,11 @@
             }
           }
 
+          // Follow oops from compiled nmethod
+          if (jvf->cb() != NULL && jvf->cb()->is_nmethod()) {
+            blk->set_context(thread_tag, tid, depth, method);
+            jvf->cb()->as_nmethod()->oops_do(blk);
+          }
         } else {
           blk->set_context(thread_tag, tid, depth, method);
           if (is_top_frame) {
--- a/src/share/vm/prims/unsafe.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/prims/unsafe.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -32,7 +32,7 @@
 #include "prims/jni.h"
 #include "prims/jvm.h"
 #include "prims/unsafe.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
@@ -1047,7 +1047,7 @@
 
   if (event.should_commit()) {
     oop obj = thread->current_park_blocker();
-    event.set_klass((obj != NULL) ? obj->klass() : NULL);
+    event.set_parkedClass((obj != NULL) ? obj->klass() : NULL);
     event.set_timeout(time);
     event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
     event.commit();
--- a/src/share/vm/runtime/atomic.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/atomic.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_ATOMIC_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
 
 enum cmpxchg_memory_order {
   memory_order_relaxed,
@@ -75,6 +76,7 @@
 
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
+  inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
   inline static jint     add    (jint     add_value, volatile jint*     dest);
   inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
@@ -119,24 +121,120 @@
   inline static void*        cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 };
 
-// To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
-// aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
-// achieve is to place your short value next to another short value, which doesn't need atomic ops.
-//
-// Example
-//  ATOMIC_SHORT_PAIR(
-//    volatile short _refcount,  // needs atomic operation
-//    unsigned short _length     // number of UTF8 characters in the symbol (does not need atomic op)
-//  );
+// platform specific in-line definitions - must come before shared definitions
 
-#ifdef VM_LITTLE_ENDIAN
-  #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
-    non_atomic_decl;                                       \
-    atomic_decl
-#else
-  #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
-    atomic_decl;                                           \
-    non_atomic_decl
+#include OS_CPU_HEADER(atomic)
+
+// shared in-line definitions
+
+// size_t casts...
+#if (SIZE_MAX != UINTPTR_MAX)
+#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 #endif
 
+inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
+  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
+}
+
+inline void Atomic::inc(volatile size_t* dest) {
+  inc_ptr((volatile intptr_t*) dest);
+}
+
+inline void Atomic::dec(volatile size_t* dest) {
+  dec_ptr((volatile intptr_t*) dest);
+}
+
+#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+/*
+ * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
+ * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
+ * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
+ * implementation to be used instead.
+ */
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
+                             jbyte compare_value, cmpxchg_memory_order order) {
+  STATIC_ASSERT(sizeof(jbyte) == 1);
+  volatile jint* dest_int =
+      static_cast<volatile jint*>(align_ptr_down(dest, sizeof(jint)));
+  size_t offset = pointer_delta(dest, dest_int, 1);
+  jint cur = *dest_int;
+  jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
+
+  // current value may not be what we are looking for, so force it
+  // to that value so the initial cmpxchg will fail if it is different
+  cur_as_bytes[offset] = compare_value;
+
+  // always execute a real cmpxchg so that we get the required memory
+  // barriers even on initial failure
+  do {
+    // value to swap in matches current value ...
+    jint new_value = cur;
+    // ... except for the one jbyte we want to update
+    reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
+
+    jint res = cmpxchg(new_value, dest_int, cur, order);
+    if (res == cur) break; // success
+
+    // at least one jbyte in the jint changed value, so update
+    // our view of the current jint
+    cur = res;
+    // if our jbyte is still as cur we loop and try again
+  } while (cur_as_bytes[offset] == compare_value);
+
+  return cur_as_bytes[offset];
+}
+
+#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+
+inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
+  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
+                         volatile unsigned int* dest, unsigned int compare_value,
+                         cmpxchg_memory_order order) {
+  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
+                                       (jint)compare_value, order);
+}
+
+inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
+  jlong old = load(dest);
+  jlong new_value = old + add_value;
+  while (old != cmpxchg(new_value, dest, old)) {
+    old = load(dest);
+    new_value = old + add_value;
+  }
+  return old;
+}
+
+inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
+  // Most platforms do not support atomic add on a 2-byte value. However,
+  // if the value occupies the most significant 16 bits of an aligned 32-bit
+  // word, then we can do this with an atomic add of (add_value << 16)
+  // to the 32-bit word.
+  //
+  // The least significant parts of this 32-bit word will never be affected, even
+  // in case of overflow/underflow.
+  //
+  // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
+#ifdef VM_LITTLE_ENDIAN
+  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
+#else
+  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
+#endif
+  return (jshort)(new_value >> 16); // preserves sign
+}
+
+inline void Atomic::inc(volatile jshort* dest) {
+  (void)add(1, dest);
+}
+
+inline void Atomic::dec(volatile jshort* dest) {
+  (void)add(-1, dest);
+}
+
 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
--- a/src/share/vm/runtime/atomic.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
-#define SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "utilities/macros.hpp"
-
-#include OS_CPU_HEADER_INLINE(atomic)
-
-// size_t casts...
-#if (SIZE_MAX != UINTPTR_MAX)
-#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
-#endif
-
-inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
-  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
-}
-
-inline void Atomic::inc(volatile size_t* dest) {
-  inc_ptr((volatile intptr_t*) dest);
-}
-
-inline void Atomic::dec(volatile size_t* dest) {
-  dec_ptr((volatile intptr_t*) dest);
-}
-
-#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-/*
- * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
- * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
- * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
- * implementation to be used instead.
- */
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte *dest, jbyte comparand, cmpxchg_memory_order order)
-{
-  assert(sizeof(jbyte) == 1, "assumption.");
-  uintptr_t dest_addr = (uintptr_t)dest;
-  uintptr_t offset = dest_addr % sizeof(jint);
-  volatile jint* dest_int = (volatile jint*)(dest_addr - offset);
-  jint cur = *dest_int;
-  jbyte* cur_as_bytes = (jbyte*)(&cur);
-  jint new_val = cur;
-  jbyte* new_val_as_bytes = (jbyte*)(&new_val);
-  new_val_as_bytes[offset] = exchange_value;
-  while (cur_as_bytes[offset] == comparand) {
-    jint res = cmpxchg(new_val, dest_int, cur, order);
-    if (res == cur) break;
-    cur = res;
-    new_val = cur;
-    new_val_as_bytes[offset] = exchange_value;
-  }
-  return cur_as_bytes[offset];
-}
-#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-
-inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
-  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
-  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
-                         volatile unsigned int* dest, unsigned int compare_value,
-                         cmpxchg_memory_order order) {
-  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
-  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
-                                       (jint)compare_value, order);
-}
-
-inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
-  jlong old = load(dest);
-  jlong new_value = old + add_value;
-  while (old != cmpxchg(new_value, dest, old)) {
-    old = load(dest);
-    new_value = old + add_value;
-  }
-  return old;
-}
-
-inline void Atomic::inc(volatile short* dest) {
-  // Most platforms do not support atomic increment on a 2-byte value. However,
-  // if the value occupies the most significant 16 bits of an aligned 32-bit
-  // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
-  //
-  // The least significant parts of this 32-bit word will never be affected, even
-  // in case of overflow/underflow.
-  //
-  // Use the ATOMIC_SHORT_PAIR macro to get the desired alignment.
-#ifdef VM_LITTLE_ENDIAN
-  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-  (void)Atomic::add(0x10000, (volatile int*)(dest-1));
-#else
-  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-  (void)Atomic::add(0x10000, (volatile int*)(dest));
-#endif
-}
-
-inline void Atomic::dec(volatile short* dest) {
-#ifdef VM_LITTLE_ENDIAN
-  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-  (void)Atomic::add(-0x10000, (volatile int*)(dest-1));
-#else
-  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-  (void)Atomic::add(-0x10000, (volatile int*)(dest));
-#endif
-}
-
-#endif // SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
--- a/src/share/vm/runtime/biasedLocking.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/biasedLocking.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -28,7 +28,7 @@
 #include "oops/klass.inline.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/basicLock.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/task.hpp"
--- a/src/share/vm/runtime/globals.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/globals.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -975,8 +975,8 @@
 static void trace_flag_changed(const char* name, const T old_value, const T new_value, const Flag::Flags origin) {
   E e;
   e.set_name(name);
-  e.set_old_value(old_value);
-  e.set_new_value(new_value);
+  e.set_oldValue(old_value);
+  e.set_newValue(new_value);
   e.set_origin(origin);
   e.commit();
 }
--- a/src/share/vm/runtime/globals.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -2242,7 +2242,7 @@
           "in a comma separated string. Sub-systems are: "                  \
           "threads, heap, symbol_table, string_table, codecache, "          \
           "dictionary, classloader_data_graph, metaspace, jni_handles, "    \
-          "c-heap, codecache_oops")                                         \
+          "codecache_oops")                                                 \
                                                                             \
   diagnostic(bool, GCParallelVerificationEnabled, true,                     \
           "Enable parallel memory system verification")                     \
@@ -3008,16 +3008,6 @@
   notproduct(intx, ZombieALotInterval,     5,                               \
           "Number of exits until ZombieALot kicks in")                      \
                                                                             \
-  diagnostic(intx, MallocVerifyInterval,     0,                             \
-          "If non-zero, verify C heap after every N calls to "              \
-          "malloc/realloc/free")                                            \
-          range(0, max_intx)                                                \
-                                                                            \
-  diagnostic(intx, MallocVerifyStart,     0,                                \
-          "If non-zero, start verifying C heap after Nth call to "          \
-          "malloc/realloc/free")                                            \
-          range(0, max_intx)                                                \
-                                                                            \
   diagnostic(uintx, MallocMaxTestWords,     0,                              \
           "If non-zero, maximum number of words that malloc/realloc can "   \
           "allocate (for testing only)")                                    \
--- a/src/share/vm/runtime/handles.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/handles.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -26,7 +26,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/constantPool.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
--- a/src/share/vm/runtime/interfaceSupport.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/interfaceSupport.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
--- a/src/share/vm/runtime/mutex.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/mutex.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.inline.hpp"
--- a/src/share/vm/runtime/objectMonitor.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/objectMonitor.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -27,7 +27,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -390,7 +390,7 @@
   }
 
   if (event.should_commit()) {
-    event.set_klass(((oop)this->object())->klass());
+    event.set_monitorClass(((oop)this->object())->klass());
     event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
     event.commit();
@@ -1381,7 +1381,7 @@
                                             jlong timeout,
                                             bool timedout) {
   assert(event != NULL, "invariant");
-  event->set_klass(((oop)this->object())->klass());
+  event->set_monitorClass(((oop)this->object())->klass());
   event->set_timeout(timeout);
   event->set_address((TYPE_ADDRESS)this->object_addr());
   event->set_notifier(notifier_tid);
--- a/src/share/vm/runtime/orderAccess.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/orderAccess.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -26,7 +26,7 @@
 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
 #define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/share/vm/runtime/os.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/os.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -44,7 +44,7 @@
 #include "prims/jvm_misc.hpp"
 #include "prims/privilegedStack.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/java.hpp"
@@ -596,8 +596,6 @@
   }
 #endif
 
-  NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
-
   // For the test flag -XX:MallocMaxTestWords
   if (has_reached_max_malloc_test_peak(size)) {
     return NULL;
@@ -658,7 +656,6 @@
   // NMT support
   void* membase = MemTracker::malloc_base(memblock);
   verify_memory(membase);
-  NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   if (size == 0) {
     return NULL;
   }
@@ -695,7 +692,6 @@
   }
   void* membase = MemTracker::record_free(memblock);
   verify_memory(membase);
-  NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
 
   GuardedMemory guarded(membase);
   size_t size = guarded.get_user_size();
--- a/src/share/vm/runtime/os.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/os.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -709,7 +709,6 @@
   static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
 
   static void  free    (void *memblock);
-  static bool  check_heap(bool force = false);      // verify C heap integrity
   static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
   // Like strdup, but exit VM when strdup() returns NULL
   static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
--- a/src/share/vm/runtime/reflection.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/reflection.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -446,7 +446,7 @@
     (accessor_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION &&
     accessee_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION)) {
     return classloader_only &&
-      Verifier::relax_verify_for(accessor_ik->class_loader()) &&
+      Verifier::relax_access_for(accessor_ik->class_loader()) &&
       accessor_ik->protection_domain() == accessee_ik->protection_domain() &&
       accessor_ik->class_loader() == accessee_ik->class_loader();
   }
--- a/src/share/vm/runtime/safepoint.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -39,7 +39,7 @@
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
@@ -173,7 +173,7 @@
   //     block itself when it attempts transitions to a new state.
   //
   {
-    EventSafepointStateSync sync_event;
+    EventSafepointStateSynchronization sync_event;
     int initial_running = 0;
 
     _state            = _synchronizing;
--- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -49,7 +49,7 @@
 #include "prims/methodHandles.hpp"
 #include "prims/nativeLookup.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/share/vm/runtime/sweeper.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "compiler/compileBroker.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/method.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
@@ -485,7 +485,7 @@
   if (event.should_commit()) {
     event.set_starttime(sweep_start_counter);
     event.set_endtime(sweep_end_counter);
-    event.set_sweepIndex(_traversals);
+    event.set_sweepId(_traversals);
     event.set_sweptCount(swept_count);
     event.set_flushedCount(flushed_count);
     event.set_zombifiedCount(zombified_count);
--- a/src/share/vm/runtime/synchronizer.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/synchronizer.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -1819,7 +1819,7 @@
                                        const ObjectSynchronizer::InflateCause cause) {
 #if INCLUDE_TRACE
   assert(event.should_commit(), "check outside");
-  event.set_klass(obj->klass());
+  event.set_monitorClass(obj->klass());
   event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj);
   event.set_cause((u1)cause);
   event.commit();
--- a/src/share/vm/runtime/thread.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/thread.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -57,7 +57,7 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/privilegedStack.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/commandLineFlagConstraintList.hpp"
 #include "runtime/commandLineFlagWriteableList.hpp"
--- a/src/share/vm/runtime/thread.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/thread.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 
 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.hpp"
 
--- a/src/share/vm/runtime/vmThread.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -279,7 +279,6 @@
     HandleMark hm(VMThread::vm_thread());
     // Among other things, this ensures that Eden top is correct.
     Universe::heap()->prepare_for_verify();
-    os::check_heap();
     // Silent verification so as not to pollute normal output,
     // unless we really asked for it.
     Universe::verify();
--- a/src/share/vm/services/mallocTracker.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/services/mallocTracker.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
 #include "precompiled.hpp"
 
 #include "runtime/atomic.hpp"
-#include "runtime/atomic.inline.hpp"
 #include "services/mallocSiteTable.hpp"
 #include "services/mallocTracker.hpp"
 #include "services/mallocTracker.inline.hpp"
--- a/src/share/vm/services/memTracker.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/services/memTracker.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
 #else
 
-#include "runtime/atomic.hpp"
 #include "runtime/threadCritical.hpp"
 #include "services/mallocTracker.hpp"
 #include "services/virtualMemoryTracker.hpp"
--- a/src/share/vm/services/threadService.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/services/threadService.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,7 +31,7 @@
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/thread.hpp"
--- a/src/share/vm/services/virtualMemoryTracker.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/services/virtualMemoryTracker.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -23,7 +23,7 @@
  */
 #include "precompiled.hpp"
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
 #include "services/memTracker.hpp"
--- a/src/share/vm/shark/sharkRuntime.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/shark/sharkRuntime.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/thread.hpp"
--- a/src/share/vm/trace/traceDataTypes.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/trace/traceDataTypes.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -32,7 +32,6 @@
 enum {
   CONTENT_TYPE_NONE             = 0,
   CONTENT_TYPE_CLASS            = 20,
-  CONTENT_TYPE_UTF8             = 21,
   CONTENT_TYPE_THREAD           = 22,
   CONTENT_TYPE_STACKTRACE       = 23,
   CONTENT_TYPE_BYTES            = 24,
--- a/src/share/vm/trace/traceMacros.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/trace/traceMacros.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,6 @@
 #define EVENT_THREAD_EXIT(thread)
 #define EVENT_THREAD_DESTRUCT(thread)
 #define TRACE_KLASS_CREATION(k, p, t)
-#define TRACE_KLASS_DEFINITION(k, t)
 
 #define TRACE_INIT_KLASS_ID(k)
 #define TRACE_REMOVE_KLASS_ID(k)
--- a/src/share/vm/trace/traceevents.xml	Thu Aug 25 02:10:03 2016 -0700
+++ b/src/share/vm/trace/traceevents.xml	Fri Aug 26 14:47:52 2016 -0700
@@ -76,111 +76,112 @@
 
   <event id="ThreadPark" path="java/thread_park" label="Java Thread Park"
           has_thread="true" has_stacktrace="true" is_instant="false">
-    <value type="CLASS" field="klass" label="Class Parked On"/>
+    <value type="CLASS" field="parkedClass" label="Class Parked On"/>
     <value type="MILLIS" field="timeout" label="Park Timeout"/>
-    <value type="ADDRESS" field="address" label="Address of Object Parked" relation="JAVA_MONITOR_ADDRESS"/>
+    <value type="ADDRESS" field="address" label="Address of Object Parked" relation="JavaMonitorAddress"/>
   </event>
 
   <event id="JavaMonitorEnter" path="java/monitor_enter" label="Java Monitor Blocked"
           has_thread="true" has_stacktrace="true" is_instant="false">
-    <value type="CLASS" field="klass" label="Monitor Class"/>
+    <value type="CLASS" field="monitorClass" label="Monitor Class"/>
     <value type="THREAD" field="previousOwner" label="Previous Monitor Owner"/>
-    <value type="ADDRESS" field="address" label="Monitor Address" relation="JAVA_MONITOR_ADDRESS"/>
+    <value type="ADDRESS" field="address" label="Monitor Address" relation="JavaMonitorAddress"/>
   </event>
 
   <event id="JavaMonitorWait" path="java/monitor_wait" label="Java Monitor Wait" description="Waiting on a Java monitor"
           has_thread="true" has_stacktrace="true" is_instant="false">
-    <value type="CLASS" field="klass" label="Monitor Class" description="Class of object waited on"/>
+    <value type="CLASS" field="monitorClass" label="Monitor Class" description="Class of object waited on"/>
     <value type="THREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
     <value type="MILLIS" field="timeout" label="Timeout" description="Maximum wait time"/>
     <value type="BOOLEAN" field="timedOut" label="Timed Out" description="Wait has been timed out"/>
-    <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
+    <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JavaMonitorAddress"/>
   </event>
 
   <event id="JavaMonitorInflate" path="java/monitor_inflate" label="Java Monitor Inflated"
          has_thread="true" has_stacktrace="true" is_instant="false">
-    <value type="CLASS" field="klass" label="Monitor Class"/>
-    <value type="ADDRESS" field="address" label="Monitor Address" relation="JAVA_MONITOR_ADDRESS"/>
-    <value type="INFLATECAUSE" field="cause" label="Cause" description="Cause of inflation"/>
+    <value type="CLASS" field="monitorClass" label="Monitor Class"/>
+    <value type="ADDRESS" field="address" label="Monitor Address" relation="JavaMonitorAddress"/>
+    <value type="INFLATECAUSE" field="cause" label="Monitor Inflation Cause" description="Cause of inflation"/>
   </event>
 
-  <event id="ReservedStackActivation" path="java/reserved_stack_activation" label="Reserved Stack Activation" description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack"
-          has_thread="true" has_stacktrace="true" is_instant="true">
+  <event id="ReservedStackActivation" path="vm/runtime/reserved_stack_activation" label="Reserved Stack Activation"
+         description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack"
+         has_thread="true" has_stacktrace="true" is_instant="true">
       <value type="METHOD" field="method" label="Java Method"/>
   </event>
 
   <event id="ClassLoad" path="vm/class/load" label="Class Load"
-          has_thread="true" has_stacktrace="true" is_instant="false">
+         has_thread="true" has_stacktrace="true" is_instant="false">
     <value type="CLASS" field="loadedClass" label="Loaded Class"/>
     <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
     <value type="CLASS" field="initiatingClassLoader" label="Initiating Class Loader"/>
   </event>
 
   <event id="ClassDefine" path="vm/class/define" label="Class Define"
-          has_thread="true" has_stacktrace="true" is_instant="true">
+         has_thread="true" has_stacktrace="true" is_instant="true">
     <value type="CLASS" field="definedClass" label="Defined Class"/>
     <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
   </event>
 
   <event id="ClassUnload" path="vm/class/unload" label="Class Unload"
-      has_thread="true" is_instant="true">
+         has_thread="true" is_instant="true">
     <value type="CLASS" field="unloadedClass" label="Unloaded Class"/>
     <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
   </event>
 
   <event id="IntFlagChanged" path="vm/flag/int_changed" label="Int Flag Changed"
-        is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="INTEGER" field="old_value" label="Old Value" />
-    <value type="INTEGER" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="INTEGER" field="oldValue" label="Old Value" />
+    <value type="INTEGER" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
   <event id="UnsignedIntFlagChanged" path="vm/flag/uint_changed" label="Unsigned Int Flag Changed"
-        is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="UINT" field="old_value" label="Old Value" />
-    <value type="UINT" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="UINT" field="oldValue" label="Old Value" />
+    <value type="UINT" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
   <event id="LongFlagChanged" path="vm/flag/long_changed" label="Long Flag Changed"
-        is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="LONG" field="old_value" label="Old Value" />
-    <value type="LONG" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="LONG" field="oldValue" label="Old Value" />
+    <value type="LONG" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
   <event id="UnsignedLongFlagChanged" path="vm/flag/ulong_changed" label="Unsigned Long Flag Changed"
-        is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="ULONG" field="old_value" label="Old Value" />
-    <value type="ULONG" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="ULONG" field="oldValue" label="Old Value" />
+    <value type="ULONG" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
   <event id="DoubleFlagChanged" path="vm/flag/double_changed" label="Double Flag Changed"
-       is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="DOUBLE" field="old_value" label="Old Value" />
-    <value type="DOUBLE" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="DOUBLE" field="oldValue" label="Old Value" />
+    <value type="DOUBLE" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
   <event id="BooleanFlagChanged" path="vm/flag/boolean_changed" label="Boolean Flag Changed"
-       is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="BOOLEAN" field="old_value" label="Old Value" />
-    <value type="BOOLEAN" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="BOOLEAN" field="oldValue" label="Old Value" />
+    <value type="BOOLEAN" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
   <event id="StringFlagChanged" path="vm/flag/string_changed" label="String Flag Changed"
-       is_instant="true">
-    <value type="UTF8" field="name" label="Name" />
-    <value type="UTF8" field="old_value" label="Old Value" />
-    <value type="UTF8" field="new_value" label="New Value" />
+         is_instant="true">
+    <value type="STRING" field="name" label="Name" />
+    <value type="STRING" field="oldValue" label="Old Value" />
+    <value type="STRING" field="newValue" label="New Value" />
     <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
   </event>
 
@@ -200,7 +201,7 @@
   </struct>
 
   <event id="GCHeapSummary" path="vm/gc/heap/summary" label="Heap Summary" is_instant="true">
-    <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+    <value type="UINT" field="gcId" label="GC Identifier" relation="GcId"/>
     <value type="GCWHEN" field="when" label="When" />
     <structvalue type="VirtualSpace" field="heapSpace" label="Heap Space"/>
     <value type="BYTES64" field="heapUsed" label="Heap Used" description="Bytes allocated by objects in the heap"/>
@@ -213,7 +214,7 @@
   </struct>
 
   <event id="MetaspaceSummary" path="vm/gc/heap/metaspace_summary" label="Metaspace Summary" is_instant="true">
-    <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+    <value type="UINT" field="gcId" label="GC Identifier" relation="GcId"/>
     <value type="GCWHEN" field="when" label="When" />
     <value type="BYTES64" field="gcThreshold" label="GC Threshold" />