changeset 40883:765d366c28e1

Merge
author iveresov
date Fri, 26 Aug 2016 14:47:52 -0700
parents 23f882a600ce 1c22cfe35580
children 0e28c526f5d3 1a952c00b8e4
files hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp hotspot/src/share/vm/c1/c1_GraphBuilder.cpp hotspot/src/share/vm/runtime/atomic.inline.hpp hotspot/src/share/vm/runtime/globals.hpp hotspot/test/serviceability/sa/jmap-hashcode/Test8028623.java jaxp/src/java.xml/share/classes/javax/xml/catalog/CatalogUriResolver.java jaxp/src/java.xml/share/classes/javax/xml/catalog/CatalogUriResolverImpl.java jdk/make/data/cryptopolicy/limited/default_local.policy jdk/make/data/cryptopolicy/limited/exempt_local.policy jdk/make/data/cryptopolicy/unlimited/default_US_export.policy jdk/make/data/cryptopolicy/unlimited/default_local.policy jdk/make/gendata/GendataPolicyJars.gmk langtools/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/DocletAbortException.java
diffstat 637 files changed, 19924 insertions(+), 8109 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Aug 25 02:10:03 2016 -0700
+++ b/.hgtags	Fri Aug 26 14:47:52 2016 -0700
@@ -375,3 +375,4 @@
 e613affb88d178dc7c589f1679db113d589bddb4 jdk-9+130
 4d2a15091124488080d65848b704e25599b2aaeb jdk-9+131
 2e83d21d78cd9c1d52e6cd2599e9c8aa36ea1f52 jdk-9+132
+e17429a7e843c4a4ed3651458d0f950970edcbcc jdk-9+133
--- a/.hgtags-top-repo	Thu Aug 25 02:10:03 2016 -0700
+++ b/.hgtags-top-repo	Fri Aug 26 14:47:52 2016 -0700
@@ -375,3 +375,4 @@
 d94d54a3192fea79234c3ac55cd0b4052d45e954 jdk-9+130
 8728756c2f70a79a90188f4019cfd6b9a275765c jdk-9+131
 a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132
+be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133
--- a/corba/.hgtags	Thu Aug 25 02:10:03 2016 -0700
+++ b/corba/.hgtags	Fri Aug 26 14:47:52 2016 -0700
@@ -375,3 +375,4 @@
 77f9692d5976ae155773dd3e07533616bb95bae1 jdk-9+130
 f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131
 1ab4b9399c4cba584f66c1c088188f2f565fbf9c jdk-9+132
+2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133
--- a/hotspot/.hgtags	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/.hgtags	Fri Aug 26 14:47:52 2016 -0700
@@ -535,3 +535,4 @@
 7d54c7056328b6a2bf4877458b8f4d8cd870f93b jdk-9+130
 943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131
 713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132
+a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -28,6 +28,7 @@
 
 #include "runtime/globals_extension.hpp"
 #include "runtime/vm_version.hpp"
+#include "utilities/sizes.hpp"
 
 class VM_Version : public Abstract_VM_Version {
   friend class JVMCIVMStructs;
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -37,7 +37,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Aug 26 14:47:52 2016 -0700
@@ -24,6 +24,7 @@
 
 package sun.jvm.hotspot.gc.g1;
 
+import java.io.PrintStream;
 import java.util.Iterator;
 import java.util.Observable;
 import java.util.Observer;
@@ -125,6 +126,15 @@
         return CollectedHeapName.G1_COLLECTED_HEAP;
     }
 
+    @Override
+    public void printOn(PrintStream tty) {
+        MemRegion mr = reservedRegion();
+
+        tty.print("garbage-first heap");
+        tty.print(" [" + mr.start() + ", " + mr.end() + "]");
+        tty.println(" region size " + (HeapRegion.grainBytes() / 1024) + "K");
+    }
+
     public G1CollectedHeap(Address addr) {
         super(addr);
     }
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -52,7 +52,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -3800,10 +3800,6 @@
   return ::stat(pathbuf, sbuf);
 }
 
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -3780,11 +3780,6 @@
   return diff;
 }
 
-
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -5174,10 +5174,6 @@
   return ::stat(pathbuf, sbuf);
 }
 
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -4589,10 +4589,6 @@
   }
 }
 
-// OS interface.
-
-bool os::check_heap(bool force) { return true; }
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -45,7 +45,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -5258,75 +5258,6 @@
   }
 }
 
-//--------------------------------------------------------------------------------------------------
-// Non-product code
-
-static int mallocDebugIntervalCounter = 0;
-static int mallocDebugCounter = 0;
-
-// For debugging possible bugs inside HeapWalk (a ring buffer)
-#define SAVE_COUNT 8
-static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
-static int saved_heap_entry_index;
-
-bool os::check_heap(bool force) {
-  if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
-  if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
-    // Note: HeapValidate executes two hardware breakpoints when it finds something
-    // wrong; at these points, eax contains the address of the offending block (I think).
-    // To get to the exlicit error message(s) below, just continue twice.
-    //
-    // Note:  we want to check the CRT heap, which is not necessarily located in the
-    // process default heap.
-    HANDLE heap = (HANDLE) _get_heap_handle();
-    if (!heap) {
-      return true;
-    }
-
-    // If we fail to lock the heap, then gflags.exe has been used
-    // or some other special heap flag has been set that prevents
-    // locking. We don't try to walk a heap we can't lock.
-    if (HeapLock(heap) != 0) {
-      PROCESS_HEAP_ENTRY phe;
-      phe.lpData = NULL;
-      memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
-      saved_heap_entry_index = 0;
-      int count = 0;
-
-      while (HeapWalk(heap, &phe) != 0) {
-        count ++;
-        if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
-            !HeapValidate(heap, 0, phe.lpData)) {
-          tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
-          tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
-          HeapUnlock(heap);
-          fatal("corrupted C heap");
-        } else {
-          // Save previous seen entries in a ring buffer. We have seen strange
-          // heap corruption fatal errors that produced mdmp files, but when we load
-          // these mdmp files in WinDBG, "!heap -triage" shows no error.
-          // We can examine the saved_heap_entries[] array in the mdmp file to
-          // diagnose such seemingly spurious errors reported by HeapWalk.
-          saved_heap_entries[saved_heap_entry_index++] = phe;
-          if (saved_heap_entry_index >= SAVE_COUNT) {
-            saved_heap_entry_index = 0;
-          }
-        }
-      }
-      DWORD err = GetLastError();
-      if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED &&
-         (err == ERROR_INVALID_FUNCTION && phe.lpData != NULL)) {
-        HeapUnlock(heap);
-        fatal("heap walk aborted with error %d", err);
-      }
-      HeapUnlock(heap);
-    }
-    mallocDebugIntervalCounter = 0;
-  }
-  return true;
-}
-
-
 bool os::find(address addr, outputStream* st) {
   int offset = -1;
   bool result = false;
--- a/hotspot/src/os/windows/vm/threadCritical_windows.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os/windows/vm/threadCritical_windows.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
+
+#ifndef _LP64
+#error "Atomic currently only impleneted for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+//
+//   machine barrier instructions:
+//
+//   - ppc_sync            two-way memory barrier, aka fence
+//   - ppc_lwsync          orders  Store|Store,
+//                                  Load|Store,
+//                                  Load|Load,
+//                         but not Store|Load
+//   - ppc_eieio           orders memory accesses for device memory (only)
+//   - ppc_isync           invalidates speculatively executed instructions
+//                         From the POWER ISA 2.06 documentation:
+//                          "[...] an isync instruction prevents the execution of
+//                         instructions following the isync until instructions
+//                         preceding the isync have completed, [...]"
+//                         From IBM's AIX assembler reference:
+//                          "The isync [...] instructions causes the processor to
+//                         refetch any instructions that might have been fetched
+//                         prior to the isync instruction. The instruction isync
+//                         causes the processor to wait for all previous instructions
+//                         to complete. Then any instructions already fetched are
+//                         discarded and instruction processing continues in the
+//                         environment established by the previous instructions."
+//
+//   semantic barrier instructions:
+//   (as defined in orderAccess.hpp)
+//
+//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
+//                                 Load|Store
+//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
+//                                 Load|Load
+//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
+//                                 Load|Store,
+//                                 Load|Load,
+//                                Store|Load
+//
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jbyte)(unsigned char)old_value;
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-#ifndef _LP64
-#error "Atomic currently only impleneted for PPC64"
-#endif
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-//
-//   machine barrier instructions:
-//
-//   - ppc_sync            two-way memory barrier, aka fence
-//   - ppc_lwsync          orders  Store|Store,
-//                                  Load|Store,
-//                                  Load|Load,
-//                         but not Store|Load
-//   - ppc_eieio           orders memory accesses for device memory (only)
-//   - ppc_isync           invalidates speculatively executed instructions
-//                         From the POWER ISA 2.06 documentation:
-//                          "[...] an isync instruction prevents the execution of
-//                         instructions following the isync until instructions
-//                         preceding the isync have completed, [...]"
-//                         From IBM's AIX assembler reference:
-//                          "The isync [...] instructions causes the processor to
-//                         refetch any instructions that might have been fetched
-//                         prior to the isync instruction. The instruction isync
-//                         causes the processor to wait for all previous instructions
-//                         to complete. Then any instructions already fetched are
-//                         discarded and instruction processing continues in the
-//                         environment established by the previous instructions."
-//
-//   semantic barrier instructions:
-//   (as defined in orderAccess.hpp)
-//
-//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
-//                                 Load|Store
-//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
-//                                 Load|Load
-//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
-//                                 Load|Store,
-//                                 Load|Load,
-//                                Store|Load
-//
-
-#define strasm_sync                       "\n  sync    \n"
-#define strasm_lwsync                     "\n  lwsync  \n"
-#define strasm_isync                      "\n  isync   \n"
-#define strasm_release                    strasm_lwsync
-#define strasm_acquire                    strasm_lwsync
-#define strasm_fence                      strasm_sync
-#define strasm_nobarrier                  ""
-#define strasm_nobarrier_clobber_memory   ""
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
-  unsigned int result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: lwarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (jint) result;
-}
-
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
-  long result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: ldarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  // Using 32 bit internally.
-  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
-
-#ifdef VM_LITTLE_ENDIAN
-  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
-#else
-  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
-#endif
-  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
-                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
-                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
-
-  unsigned int old_value, value32;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lbz     %[old_value], 0(%[dest])                  \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* atomic loop */
-    "1:                                                   \n"
-    "   lwarx   %[value32], 0, %[dest_base]               \n"
-    /* extract byte and compare */
-    "   srd     %[old_value], %[value32], %[shift_amount] \n"
-    "   clrldi  %[old_value], %[old_value], 56            \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* replace byte and try to store */
-    "   xor     %[value32], %[xor_value], %[value32]      \n"
-    "   stwcx.  %[value32], 0, %[dest_base]               \n"
-    "   bne-    1b                                        \n"
-    /* exit */
-    "2:                                                   \n"
-    /* out */
-    : [old_value]           "=&r"   (old_value),
-      [value32]             "=&r"   (value32),
-                            "=m"    (*dest),
-                            "=m"    (*dest_base)
-    /* in */
-    : [dest]                "b"     (dest),
-      [dest_base]           "b"     (dest_base),
-      [shift_amount]        "r"     (shift_amount),
-      [masked_compare_val]  "r"     (masked_compare_val),
-      [xor_value]           "r"     (xor_value),
-                            "m"     (*dest),
-                            "m"     (*dest_base)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jbyte)(unsigned char)old_value;
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lwz     %[old_value], 0(%[dest])                \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jint) old_value;
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   ld      %[old_value], 0(%[dest])                \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-#undef strasm_sync
-#undef strasm_lwsync
-#undef strasm_isync
-#undef strasm_release
-#undef strasm_acquire
-#undef strasm_fence
-#undef strasm_nobarrier
-#undef strasm_nobarrier_clobber_memory
-
-#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
+#define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+// Adding a lock prefix to an instruction on MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  jint addend = add_value;
+  int mp = os::is_MP();
+  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (exchange_value)
+                    : "0" (exchange_value), "r" (dest)
+                    : "memory");
+  return exchange_value;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t addend = add_value;
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+  return exchange_value;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return exchange_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+extern "C" {
+  // defined in bsd_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
--- a/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
-#define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (exchange_value)
-                    : "0" (exchange_value), "r" (dest)
-                    : "memory");
-  return exchange_value;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-  return exchange_value;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return exchange_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-extern "C" {
-  // defined in bsd_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
--- a/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
+#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+#ifdef M68K
+
+/*
+ * __m68k_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Returns newval on success and oldval if no exchange happened.
+ * This implementation is processor specific and works on
+ * 68020 68030 68040 and 68060.
+ *
+ * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
+ * instruction.
+ * Using a kernelhelper would be better for arch complete implementation.
+ *
+ */
+
+static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
+  int ret;
+  __asm __volatile ("cas%.l %0,%2,%1"
+                   : "=d" (ret), "+m" (*(ptr))
+                   : "d" (newval), "0" (oldval));
+  return ret;
+}
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int m68k_compare_and_swap(volatile int *ptr,
+                                        int oldval,
+                                        int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until success.
+
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until success.
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
+        return prev;
+    }
+}
+#endif // M68K
+
+#ifdef ARM
+
+/*
+ * __kernel_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ */
+
+typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int arm_compare_and_swap(volatile int *ptr,
+                                       int oldval,
+                                       int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        return prev;
+    }
+}
+#endif // ARM
+
+inline void Atomic::store(jint store_value, volatile jint* dest) {
+#if !defined(ARM) && !defined(M68K)
+  __sync_synchronize();
+#endif
+  *dest = store_value;
+}
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
+#if !defined(ARM) && !defined(M68K)
+  __sync_synchronize();
+#endif
+  *dest = store_value;
+}
+
+inline jint Atomic::add(jint add_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc(volatile jint* dest) {
+  add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::dec(volatile jint* dest) {
+  add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest) {
+  add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  // __sync_lock_test_and_set is a bizarrely named atomic exchange
+  // operation.  Note that some platforms only support this with the
+  // limitation that the only valid value to store is the immediate
+  // constant 1.  There is a test for this in JNI_CreateJavaVM().
+  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  // All atomic operations are expected to be full memory barriers
+  // (see atomic.hpp). However, __sync_lock_test_and_set is not
+  // a full memory barrier, but an acquire barrier. Hence, this added
+  // barrier.
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
+                                 volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value,
+                            volatile jint* dest,
+                            jint compare_value,
+                            cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value,
+                             volatile jlong* dest,
+                             jlong compare_value,
+                             cmpxchg_memory_order order) {
+
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
+                                    volatile intptr_t* dest,
+                                    intptr_t compare_value,
+                                    cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value,
+                                 volatile void* dest,
+                                 void* compare_value,
+                                 cmpxchg_memory_order order) {
+
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  os::atomic_copy64(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, dest);
+}
+
+#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
--- a/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,334 +0,0 @@
-/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
-#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
-  int ret;
-  __asm __volatile ("cas%.l %0,%2,%1"
-                   : "=d" (ret), "+m" (*(ptr))
-                   : "d" (newval), "0" (oldval));
-  return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until success.
-
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until success.
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
-        return prev;
-    }
-}
-#endif // M68K
-
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        return prev;
-    }
-}
-#endif // ARM
-
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  // __sync_lock_test_and_set is a bizarrely named atomic exchange
-  // operation.  Note that some platforms only support this with the
-  // limitation that the only valid value to store is the immediate
-  // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
-  // All atomic operations are expected to be full memory barriers
-  // (see atomic.hpp). However, __sync_lock_test_and_set is not
-  // a full memory barrier, but an acquire barrier. Hence, this added
-  // barrier.
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
-}
-
-#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
+#define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
+
+#include "vm_version_aarch64.hpp"
+
+// Implementation of class atomic
+
+#define FULL_MEM_BARRIER  __sync_synchronize()
+#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+inline jint Atomic::add(jint add_value, volatile jint* dest)
+{
+ return __sync_add_and_fetch(dest, add_value);
+}
+
+inline void Atomic::inc(volatile jint* dest)
+{
+ add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest)
+{
+ add_ptr(1, dest);
+}
+
+inline void Atomic::dec (volatile jint* dest)
+{
+ add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest)
+{
+ add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
+{
+  jint res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
+{
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
+                                        T compare_value, cmpxchg_memory_order order)
+{
+  if (order == memory_order_relaxed) {
+    T value = compare_value;
+    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+    return value;
+  } else {
+    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+{
+ return __sync_add_and_fetch(dest, add_value);
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
+{
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest)
+{
+ add_ptr(1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest)
+{
+ add_ptr(-1, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
+{
+  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
+}
+
+inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
+{
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
--- a/hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
-#define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-#include "vm_version_aarch64.hpp"
-
-// Implementation of class atomic
-
-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-inline jint Atomic::add(jint add_value, volatile jint* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void Atomic::inc(volatile jint* dest)
-{
- add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec (volatile jint* dest)
-{
- add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
-{
-  jint res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
-{
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
-                                        T compare_value, cmpxchg_memory_order order)
-{
-  if (order == memory_order_relaxed) {
-    T value = compare_value;
-    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
-                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
-    return value;
-  } else {
-    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
-{
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
-{
-  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
-{
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
--- a/hotspot/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,7 +26,7 @@
 #ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
 #define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_aarch64.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
+
+#ifndef PPC64
+#error "Atomic currently only implemented for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+//
+// machine barrier instructions:
+//
+// - sync            two-way memory barrier, aka fence
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders memory accesses for device memory (only)
+// - isync           invalidates speculatively executed instructions
+//                   From the POWER ISA 2.06 documentation:
+//                    "[...] an isync instruction prevents the execution of
+//                   instructions following the isync until instructions
+//                   preceding the isync have completed, [...]"
+//                   From IBM's AIX assembler reference:
+//                    "The isync [...] instructions causes the processor to
+//                   refetch any instructions that might have been fetched
+//                   prior to the isync instruction. The instruction isync
+//                   causes the processor to wait for all previous instructions
+//                   to complete. Then any instructions already fetched are
+//                   discarded and instruction processing continues in the
+//                   environment established by the previous instructions."
+//
+// semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jbyte)(unsigned char)old_value;
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
--- a/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
-#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-#ifndef PPC64
-#error "Atomic currently only implemented for PPC64"
-#endif
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-//
-// machine barrier instructions:
-//
-// - sync            two-way memory barrier, aka fence
-// - lwsync          orders  Store|Store,
-//                            Load|Store,
-//                            Load|Load,
-//                   but not Store|Load
-// - eieio           orders memory accesses for device memory (only)
-// - isync           invalidates speculatively executed instructions
-//                   From the POWER ISA 2.06 documentation:
-//                    "[...] an isync instruction prevents the execution of
-//                   instructions following the isync until instructions
-//                   preceding the isync have completed, [...]"
-//                   From IBM's AIX assembler reference:
-//                    "The isync [...] instructions causes the processor to
-//                   refetch any instructions that might have been fetched
-//                   prior to the isync instruction. The instruction isync
-//                   causes the processor to wait for all previous instructions
-//                   to complete. Then any instructions already fetched are
-//                   discarded and instruction processing continues in the
-//                   environment established by the previous instructions."
-//
-// semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-// - release         orders Store|Store,       (maps to lwsync)
-//                           Load|Store
-// - acquire         orders  Load|Store,       (maps to lwsync)
-//                           Load|Load
-// - fence           orders Store|Store,       (maps to sync)
-//                           Load|Store,
-//                           Load|Load,
-//                          Store|Load
-//
-
-#define strasm_sync                       "\n  sync    \n"
-#define strasm_lwsync                     "\n  lwsync  \n"
-#define strasm_isync                      "\n  isync   \n"
-#define strasm_release                    strasm_lwsync
-#define strasm_acquire                    strasm_lwsync
-#define strasm_fence                      strasm_sync
-#define strasm_nobarrier                  ""
-#define strasm_nobarrier_clobber_memory   ""
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
-  unsigned int result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: lwarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (jint) result;
-}
-
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
-  long result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: ldarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  // Using 32 bit internally.
-  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
-
-#ifdef VM_LITTLE_ENDIAN
-  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
-#else
-  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
-#endif
-  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
-                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
-                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
-
-  unsigned int old_value, value32;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lbz     %[old_value], 0(%[dest])                  \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* atomic loop */
-    "1:                                                   \n"
-    "   lwarx   %[value32], 0, %[dest_base]               \n"
-    /* extract byte and compare */
-    "   srd     %[old_value], %[value32], %[shift_amount] \n"
-    "   clrldi  %[old_value], %[old_value], 56            \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* replace byte and try to store */
-    "   xor     %[value32], %[xor_value], %[value32]      \n"
-    "   stwcx.  %[value32], 0, %[dest_base]               \n"
-    "   bne-    1b                                        \n"
-    /* exit */
-    "2:                                                   \n"
-    /* out */
-    : [old_value]           "=&r"   (old_value),
-      [value32]             "=&r"   (value32),
-                            "=m"    (*dest),
-                            "=m"    (*dest_base)
-    /* in */
-    : [dest]                "b"     (dest),
-      [dest_base]           "b"     (dest_base),
-      [shift_amount]        "r"     (shift_amount),
-      [masked_compare_val]  "r"     (masked_compare_val),
-      [xor_value]           "r"     (xor_value),
-                            "m"     (*dest),
-                            "m"     (*dest_base)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jbyte)(unsigned char)old_value;
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lwz     %[old_value], 0(%[dest])                \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jint) old_value;
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   ld      %[old_value], 0(%[dest])                \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-#undef strasm_sync
-#undef strasm_lwsync
-#undef strasm_isync
-#undef strasm_release
-#undef strasm_acquire
-#undef strasm_fence
-#undef strasm_nobarrier
-#undef strasm_nobarrier_clobber_memory
-
-#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,6 @@
 #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
 #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
 
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
 // Implementation of class atomic
 
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+// Adding a lock prefix to an instruction on MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  jint addend = add_value;
+  int mp = os::is_MP();
+  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (exchange_value)
+                    : "0" (exchange_value), "r" (dest)
+                    : "memory");
+  return exchange_value;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t addend = add_value;
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+  return exchange_value;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return exchange_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+extern "C" {
+  // defined in linux_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
--- a/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
-#define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (exchange_value)
-                    : "0" (exchange_value), "r" (dest)
-                    : "memory");
-  return exchange_value;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-  return exchange_value;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return exchange_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-extern "C" {
-  // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
--- a/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
+#define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+#ifdef M68K
+
+/*
+ * __m68k_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Returns newval on success and oldval if no exchange happened.
+ * This implementation is processor specific and works on
+ * 68020 68030 68040 and 68060.
+ *
+ * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
+ * instruction.
+ * Using a kernelhelper would be better for arch complete implementation.
+ *
+ */
+
+static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
+  int ret;
+  __asm __volatile ("cas%.l %0,%2,%1"
+                   : "=d" (ret), "+m" (*(ptr))
+                   : "d" (newval), "0" (oldval));
+  return ret;
+}
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int m68k_compare_and_swap(volatile int *ptr,
+                                        int oldval,
+                                        int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until success.
+
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until success.
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
+        return prev;
+    }
+}
+#endif // M68K
+
+#ifdef ARM
+
+/*
+ * __kernel_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ */
+
+typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int arm_compare_and_swap(volatile int *ptr,
+                                       int oldval,
+                                       int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        return prev;
+    }
+}
+#endif // ARM
+
+inline void Atomic::store(jint store_value, volatile jint* dest) {
+  *dest = store_value;
+}
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
+  *dest = store_value;
+}
+
+inline jint Atomic::add(jint add_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc(volatile jint* dest) {
+  add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::dec(volatile jint* dest) {
+  add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest) {
+  add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  // __sync_lock_test_and_set is a bizarrely named atomic exchange
+  // operation.  Note that some platforms only support this with the
+  // limitation that the only valid value to store is the immediate
+  // constant 1.  There is a test for this in JNI_CreateJavaVM().
+  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  // All atomic operations are expected to be full memory barriers
+  // (see atomic.hpp). However, __sync_lock_test_and_set is not
+  // a full memory barrier, but an acquire barrier. Hence, this added
+  // barrier.
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
+                                 volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value,
+                            volatile jint* dest,
+                            jint compare_value,
+                            cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value,
+                             volatile jlong* dest,
+                             jlong compare_value,
+                             cmpxchg_memory_order order) {
+
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
+                                    volatile intptr_t* dest,
+                                    intptr_t compare_value,
+                                    cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value,
+                                 volatile void* dest,
+                                 void* compare_value,
+                                 cmpxchg_memory_order order) {
+
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  os::atomic_copy64(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, dest);
+}
+
+#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
--- a/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,328 +0,0 @@
-/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
-#define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
-  int ret;
-  __asm __volatile ("cas%.l %0,%2,%1"
-                   : "=d" (ret), "+m" (*(ptr))
-                   : "d" (newval), "0" (oldval));
-  return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until success.
-
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until success.
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
-        return prev;
-    }
-}
-#endif // M68K
-
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        return prev;
-    }
-}
-#endif // ARM
-
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-  *dest = store_value;
-}
-
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  // __sync_lock_test_and_set is a bizarrely named atomic exchange
-  // operation.  Note that some platforms only support this with the
-  // limitation that the only valid value to store is the immediate
-  // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
-  // All atomic operations are expected to be full memory barriers
-  // (see atomic.hpp). However, __sync_lock_test_and_set is not
-  // a full memory barrier, but an acquire barrier. Hence, this added
-  // barrier.
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
-}
-
-#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
+#define OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
+inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
+inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
+
+inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
+inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
+inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
+
+
+#ifdef _LP64
+
+inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else
+
+extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
+
+inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
+  _Atomic_move_long_v9(src, dst);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif
+
+#ifdef _GNU_SOURCE
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  intptr_t rv;
+  __asm__ volatile(
+    "1: \n\t"
+    " ld     [%2], %%o2\n\t"
+    " add    %1, %%o2, %%o3\n\t"
+    " cas    [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    1b\n\t"
+    "  nop\n\t"
+    " add    %1, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+  return rv;
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t rv;
+#ifdef _LP64
+  __asm__ volatile(
+    "1: \n\t"
+    " ldx    [%2], %%o2\n\t"
+    " add    %0, %%o2, %%o3\n\t"
+    " casx   [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    %%xcc, 1b\n\t"
+    "  nop\n\t"
+    " add    %0, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+#else //_LP64
+  __asm__ volatile(
+    "1: \n\t"
+    " ld     [%2], %%o2\n\t"
+    " add    %1, %%o2, %%o3\n\t"
+    " cas    [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    1b\n\t"
+    "  nop\n\t"
+    " add    %1, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  intptr_t rv = exchange_value;
+  __asm__ volatile(
+    " swap   [%2],%1\n\t"
+    : "=r" (rv)
+    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
+    : "memory");
+  return rv;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  intptr_t rv = exchange_value;
+#ifdef _LP64
+  __asm__ volatile(
+    "1:\n\t"
+    " mov    %1, %%o3\n\t"
+    " ldx    [%2], %%o2\n\t"
+    " casx   [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    %%xcc, 1b\n\t"
+    "  nop\n\t"
+    " mov    %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (exchange_value), "r" (dest)
+    : "memory", "o2", "o3");
+#else  //_LP64
+  __asm__ volatile(
+    "swap    [%2],%1\n\t"
+    : "=r" (rv)
+    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
+    : "memory");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  jint rv;
+  __asm__ volatile(
+    " cas    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+#ifdef _LP64
+  jlong rv;
+  __asm__ volatile(
+    " casx   [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
+#else  //_LP64
+  volatile jlong_accessor evl, cvl, rv;
+  evl.long_value = exchange_value;
+  cvl.long_value = compare_value;
+
+  __asm__ volatile(
+    " sllx   %2, 32, %2\n\t"
+    " srl    %3, 0,  %3\n\t"
+    " or     %2, %3, %2\n\t"
+    " sllx   %5, 32, %5\n\t"
+    " srl    %6, 0,  %6\n\t"
+    " or     %5, %6, %5\n\t"
+    " casx   [%4], %5, %2\n\t"
+    " srl    %2, 0, %1\n\t"
+    " srlx   %2, 32, %0\n\t"
+    : "=r" (rv.words[0]), "=r" (rv.words[1])
+    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
+    : "memory");
+
+  return rv.long_value;
+#endif  //_LP64
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  intptr_t rv;
+#ifdef _LP64
+  __asm__ volatile(
+    " casx    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+#else  //_LP64
+  __asm__ volatile(
+    " cas     [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
+}
+
+#else // _GNU_SOURCE
+
+#if defined(COMPILER2) || defined(_LP64)
+
+// This is the interface to the atomic instructions in solaris_sparc.il.
+// It's very messy because we need to support v8 and these instructions
+// are illegal there.  When sparc v8 is dropped, we can drop out lots of
+// this code.  Also compiler2 does not support v8 so the conditional code
+// omits the instruction set check.
+
+extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
+extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
+
+extern "C" jint     _Atomic_cas32(jint     exchange_value, volatile jint*     dest, jint     compare_value);
+extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
+extern "C" jlong    _Atomic_casl (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
+
+extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
+extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
+
+
+inline jint     Atomic::add     (jint    add_value, volatile jint*     dest) {
+  return _Atomic_add32(add_value, dest);
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef _LP64
+  return _Atomic_add64(add_value, dest);
+#else  //_LP64
+  return _Atomic_add32(add_value, dest);
+#endif // _LP64
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  return _Atomic_swap32(exchange_value, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+#ifdef _LP64
+  return _Atomic_swap64(exchange_value, dest);
+#else  // _LP64
+  return _Atomic_swap32(exchange_value, dest);
+#endif // _LP64
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cas32(exchange_value, dest, compare_value);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+#ifdef _LP64
+  // Return 64 bit value in %o0
+  return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
+#else  // _LP64
+  // Return 64 bit value in %o0,%o1 by hand
+  return _Atomic_casl(exchange_value, dest, compare_value);
+#endif // _LP64
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+#ifdef _LP64
+  return _Atomic_cas64(exchange_value, dest, compare_value);
+#else  // _LP64
+  return _Atomic_cas32(exchange_value, dest, compare_value);
+#endif // _LP64
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
+}
+
+
+#else // _LP64 || COMPILER2
+
+
+// 32-bit compiler1 only
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  return (*os::atomic_add_func)(add_value, dest);
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  return (*os::atomic_xchg_func)(exchange_value, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+#endif // _LP64 || COMPILER2
+
+#endif // _GNU_SOURCE
+
+#endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
-#define OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
-
-#ifdef _LP64
-
-inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else
-
-extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
-
-inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
-  _Atomic_move_long_v9(src, dst);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif
-
-#ifdef _GNU_SOURCE
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  intptr_t rv;
-  __asm__ volatile(
-    "1: \n\t"
-    " ld     [%2], %%o2\n\t"
-    " add    %1, %%o2, %%o3\n\t"
-    " cas    [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    1b\n\t"
-    "  nop\n\t"
-    " add    %1, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-  return rv;
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t rv;
-#ifdef _LP64
-  __asm__ volatile(
-    "1: \n\t"
-    " ldx    [%2], %%o2\n\t"
-    " add    %0, %%o2, %%o3\n\t"
-    " casx   [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    %%xcc, 1b\n\t"
-    "  nop\n\t"
-    " add    %0, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-#else //_LP64
-  __asm__ volatile(
-    "1: \n\t"
-    " ld     [%2], %%o2\n\t"
-    " add    %1, %%o2, %%o3\n\t"
-    " cas    [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    1b\n\t"
-    "  nop\n\t"
-    " add    %1, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-#endif // _LP64
-  return rv;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  intptr_t rv = exchange_value;
-  __asm__ volatile(
-    " swap   [%2],%1\n\t"
-    : "=r" (rv)
-    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
-    : "memory");
-  return rv;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  intptr_t rv = exchange_value;
-#ifdef _LP64
-  __asm__ volatile(
-    "1:\n\t"
-    " mov    %1, %%o3\n\t"
-    " ldx    [%2], %%o2\n\t"
-    " casx   [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    %%xcc, 1b\n\t"
-    "  nop\n\t"
-    " mov    %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (exchange_value), "r" (dest)
-    : "memory", "o2", "o3");
-#else  //_LP64
-  __asm__ volatile(
-    "swap    [%2],%1\n\t"
-    : "=r" (rv)
-    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
-    : "memory");
-#endif // _LP64
-  return rv;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  jint rv;
-  __asm__ volatile(
-    " cas    [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-  return rv;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
-  jlong rv;
-  __asm__ volatile(
-    " casx   [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-  return rv;
-#else  //_LP64
-  volatile jlong_accessor evl, cvl, rv;
-  evl.long_value = exchange_value;
-  cvl.long_value = compare_value;
-
-  __asm__ volatile(
-    " sllx   %2, 32, %2\n\t"
-    " srl    %3, 0,  %3\n\t"
-    " or     %2, %3, %2\n\t"
-    " sllx   %5, 32, %5\n\t"
-    " srl    %6, 0,  %6\n\t"
-    " or     %5, %6, %5\n\t"
-    " casx   [%4], %5, %2\n\t"
-    " srl    %2, 0, %1\n\t"
-    " srlx   %2, 32, %0\n\t"
-    : "=r" (rv.words[0]), "=r" (rv.words[1])
-    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
-    : "memory");
-
-  return rv.long_value;
-#endif  //_LP64
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  intptr_t rv;
-#ifdef _LP64
-  __asm__ volatile(
-    " casx    [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-#else  //_LP64
-  __asm__ volatile(
-    " cas     [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-#endif // _LP64
-  return rv;
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
-}
-
-#else // _GNU_SOURCE
-
-#if defined(COMPILER2) || defined(_LP64)
-
-// This is the interface to the atomic instructions in solaris_sparc.il.
-// It's very messy because we need to support v8 and these instructions
-// are illegal there.  When sparc v8 is dropped, we can drop out lots of
-// this code.  Also compiler2 does not support v8 so the conditional code
-// omits the instruction set check.
-
-extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
-extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
-
-extern "C" jint     _Atomic_cas32(jint     exchange_value, volatile jint*     dest, jint     compare_value);
-extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
-extern "C" jlong    _Atomic_casl (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
-
-extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
-extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
-
-
-inline jint     Atomic::add     (jint    add_value, volatile jint*     dest) {
-  return _Atomic_add32(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef _LP64
-  return _Atomic_add64(add_value, dest);
-#else  //_LP64
-  return _Atomic_add32(add_value, dest);
-#endif // _LP64
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return _Atomic_swap32(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-#ifdef _LP64
-  return _Atomic_swap64(exchange_value, dest);
-#else  // _LP64
-  return _Atomic_swap32(exchange_value, dest);
-#endif // _LP64
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cas32(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
-  // Return 64 bit value in %o0
-  return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
-#else  // _LP64
-  // Return 64 bit value in %o0,%o1 by hand
-  return _Atomic_casl(exchange_value, dest, compare_value);
-#endif // _LP64
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
-  return _Atomic_cas64(exchange_value, dest, compare_value);
-#else  // _LP64
-  return _Atomic_cas32(exchange_value, dest, compare_value);
-#endif // _LP64
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
-}
-
-
-#else // _LP64 || COMPILER2
-
-
-// 32-bit compiler1 only
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (*os::atomic_add_func)(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return (*os::atomic_xchg_func)(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-#endif // _LP64 || COMPILER2
-
-#endif // _GNU_SOURCE
-
-#endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
 #define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 
 // Compiler version last used for testing: solaris studio 12u3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
+#define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
+
+#include "runtime/os.hpp"
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
+inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
+inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
+
+inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
+inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
+inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
+
+// For Sun Studio - implementation is in solaris_x86_[32/64].il.
+// For gcc - implementation is just below.
+
+// The lock prefix can be omitted for certain instructions on uniprocessors; to
+// facilitate this, os::is_MP() is passed as an additional argument.  64-bit
+// processors are assumed to be multi-threaded and/or multi-core, so the extra
+// argument is unnecessary.
+#ifndef _LP64
+#define IS_MP_DECL() , int is_mp
+#define IS_MP_ARG()  , (int) os::is_MP()
+#else
+#define IS_MP_DECL()
+#define IS_MP_ARG()
+#endif // _LP64
+
+extern "C" {
+  jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
+  jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
+  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
+                       jbyte compare_value IS_MP_DECL());
+  jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
+                       jint compare_value IS_MP_DECL());
+  jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
+                             jlong compare_value IS_MP_DECL());
+}
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  return _Atomic_add(add_value, dest IS_MP_ARG());
+}
+
+inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
+  return _Atomic_xchg(exchange_value, dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
+extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#ifdef _GNU_SOURCE
+// Add a lock prefix to an instruction on an MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+extern "C" {
+  inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) {
+    jint addend = add_value;
+    __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return addend + add_value;
+  }
+
+#ifdef AMD64
+  inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) {
+    intptr_t addend = add_value;
+    __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+    return addend + add_value;
+  }
+
+  inline jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest) {
+    __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+    return exchange_value;
+  }
+
+#endif // AMD64
+
+  inline jint _Atomic_xchg(jint exchange_value, volatile jint* dest) {
+    __asm__ __volatile__ ("xchgl (%2),%0"
+                          : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+    return exchange_value;
+  }
+
+  inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) {
+    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return exchange_value;
+  }
+
+
+  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
+    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return exchange_value;
+  }
+
+  // This is the interface to the atomic instruction in solaris_i486.s.
+  jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
+
+  inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) {
+#ifdef AMD64
+    __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+    return exchange_value;
+#else
+    return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP());
+
+    #if 0
+    // The code below does not work presumably because of the bug in gcc
+    // The error message says:
+    //   can't find a register in class BREG while reloading asm
+    // However I want to save this code and later replace _Atomic_cmpxchg_long_gcc
+    // with such inline asm code:
+
+    volatile jlong_accessor evl, cvl, rv;
+    evl.long_value = exchange_value;
+    cvl.long_value = compare_value;
+    int mp = os::is_MP();
+
+    __asm__ volatile ("cmp $0, %%esi\n\t"
+       "je 1f \n\t"
+       "lock\n\t"
+       "1: cmpxchg8b (%%edi)\n\t"
+       : "=a"(cvl.words[0]),   "=d"(cvl.words[1])
+       : "a"(cvl.words[0]), "d"(cvl.words[1]),
+         "b"(evl.words[0]), "c"(evl.words[1]),
+         "D"(dest), "S"(mp)
+       :  "cc", "memory");
+    return cvl.long_value;
+    #endif // if 0
+#endif // AMD64
+  }
+}
+#undef LOCK_IF_MP
+
+#endif // _GNU_SOURCE
+
+#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
--- a/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,279 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
-#define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
-// For Sun Studio - implementation is in solaris_x86_[32/64].il.
-// For gcc - implementation is just below.
-
-// The lock prefix can be omitted for certain instructions on uniprocessors; to
-// facilitate this, os::is_MP() is passed as an additional argument.  64-bit
-// processors are assumed to be multi-threaded and/or multi-core, so the extra
-// argument is unnecessary.
-#ifndef _LP64
-#define IS_MP_DECL() , int is_mp
-#define IS_MP_ARG()  , (int) os::is_MP()
-#else
-#define IS_MP_DECL()
-#define IS_MP_ARG()
-#endif // _LP64
-
-extern "C" {
-  jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
-  jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
-  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
-                       jbyte compare_value IS_MP_DECL());
-  jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
-                       jint compare_value IS_MP_DECL());
-  jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
-                             jlong compare_value IS_MP_DECL());
-}
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return _Atomic_add(add_value, dest IS_MP_ARG());
-}
-
-inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
-  return _Atomic_xchg(exchange_value, dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
-}
-
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
-extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#ifdef _GNU_SOURCE
-// Add a lock prefix to an instruction on an MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-extern "C" {
-  inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) {
-    jint addend = add_value;
-    __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-    return addend + add_value;
-  }
-
-#ifdef AMD64
-  inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) {
-    intptr_t addend = add_value;
-    __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-    return addend + add_value;
-  }
-
-  inline jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest) {
-    __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-    return exchange_value;
-  }
-
-#endif // AMD64
-
-  inline jint _Atomic_xchg(jint exchange_value, volatile jint* dest) {
-    __asm__ __volatile__ ("xchgl (%2),%0"
-                          : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-    return exchange_value;
-  }
-
-  inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) {
-    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-    return exchange_value;
-  }
-
-
-  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
-    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-    return exchange_value;
-  }
-
-  // This is the interface to the atomic instruction in solaris_i486.s.
-  jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
-
-  inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) {
-#ifdef AMD64
-    __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-    return exchange_value;
-#else
-    return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP());
-
-    #if 0
-    // The code below does not work presumably because of the bug in gcc
-    // The error message says:
-    //   can't find a register in class BREG while reloading asm
-    // However I want to save this code and later replace _Atomic_cmpxchg_long_gcc
-    // with such inline asm code:
-
-    volatile jlong_accessor evl, cvl, rv;
-    evl.long_value = exchange_value;
-    cvl.long_value = compare_value;
-    int mp = os::is_MP();
-
-    __asm__ volatile ("cmp $0, %%esi\n\t"
-       "je 1f \n\t"
-       "lock\n\t"
-       "1: cmpxchg8b (%%edi)\n\t"
-       : "=a"(cvl.words[0]),   "=d"(cvl.words[1])
-       : "a"(cvl.words[0]), "d"(cvl.words[1]),
-         "b"(evl.words[0]), "c"(evl.words[1]),
-         "D"(dest), "S"(mp)
-       :  "cc", "memory");
-    return cvl.long_value;
-    #endif // if 0
-#endif // AMD64
-  }
-}
-#undef LOCK_IF_MP
-
-#endif // _GNU_SOURCE
-
-#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
--- a/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 #define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -38,7 +38,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
+#define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
+
+#include "runtime/os.hpp"
+
+// The following alternative implementations are needed because
+// Windows 95 doesn't support (some of) the corresponding Windows NT
+// calls. Furthermore, these versions allow inlining in the caller.
+// (More precisely: The documentation for InterlockedExchange says
+// it is supported for Windows 95. However, when single-stepping
+// through the assembly code we cannot step into the routine and
+// when looking at the routine address we see only garbage code.
+// Better safe then sorry!). Was bug 7/31/98 (gri).
+//
+// Performance note: On uniprocessors, the 'lock' prefixes are not
+// necessary (and expensive). We should generate separate cases if
+// this becomes a performance problem.
+
+#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+
+
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+// Adding a lock prefix to an instruction on MP machine
+// VC++ doesn't like the lock prefix to be on a single line
+// so we can't insert a label after the lock prefix.
+// By emitting a lock prefix, we can define a label after it.
+#define LOCK_IF_MP(mp) __asm cmp mp, 0  \
+                       __asm je L0      \
+                       __asm _emit 0xF0 \
+                       __asm L0:
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  return (jint)(*os::atomic_add_func)(add_value, dest);
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  (void)add    (1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  (void)add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  (void)add_ptr(1, dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  (void)add    (-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  (void)add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  (void)add_ptr(-1, dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest;
+    mov eax, add_value;
+    mov ecx, eax;
+    LOCK_IF_MP(mp)
+    xadd dword ptr [edx], eax;
+    add eax, ecx;
+  }
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  // alternative for InterlockedIncrement
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest;
+    LOCK_IF_MP(mp)
+    add dword ptr [edx], 1;
+  }
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  // alternative for InterlockedDecrement
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest;
+    LOCK_IF_MP(mp)
+    sub dword ptr [edx], 1;
+  }
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec((volatile jint*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  // alternative for InterlockedExchange
+  __asm {
+    mov eax, exchange_value;
+    mov ecx, dest;
+    xchg eax, dword ptr [ecx];
+  }
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  // alternative for InterlockedCompareExchange
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest
+    mov cl, exchange_value
+    mov al, compare_value
+    LOCK_IF_MP(mp)
+    cmpxchg byte ptr [edx], cl
+  }
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  // alternative for InterlockedCompareExchange
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest
+    mov ecx, exchange_value
+    mov eax, compare_value
+    LOCK_IF_MP(mp)
+    cmpxchg dword ptr [edx], ecx
+  }
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  jint ex_lo  = (jint)exchange_value;
+  jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
+  jint cmp_lo = (jint)compare_value;
+  jint cmp_hi = *( ((jint*)&compare_value) + 1 );
+  __asm {
+    push ebx
+    push edi
+    mov eax, cmp_lo
+    mov edx, cmp_hi
+    mov edi, dest
+    mov ebx, ex_lo
+    mov ecx, ex_hi
+    LOCK_IF_MP(mp)
+    cmpxchg8b qword ptr [edi]
+    pop edi
+    pop ebx
+  }
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  volatile jlong* pdest = &dest;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, pdest
+    fistp    qword ptr [eax]
+  }
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  volatile jlong* src = &store_value;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, dest
+    fistp    qword ptr [eax]
+  }
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic::store(store_value, (volatile jlong*)dest);
+}
+
+#endif // AMD64
+
+#pragma warning(default: 4035) // Enables warnings reporting missing return statement
+
+#endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,304 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
-#define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// The following alternative implementations are needed because
-// Windows 95 doesn't support (some of) the corresponding Windows NT
-// calls. Furthermore, these versions allow inlining in the caller.
-// (More precisely: The documentation for InterlockedExchange says
-// it is supported for Windows 95. However, when single-stepping
-// through the assembly code we cannot step into the routine and
-// when looking at the routine address we see only garbage code.
-// Better safe then sorry!). Was bug 7/31/98 (gri).
-//
-// Performance note: On uniprocessors, the 'lock' prefixes are not
-// necessary (and expensive). We should generate separate cases if
-// this becomes a performance problem.
-
-#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-// Adding a lock prefix to an instruction on MP machine
-// VC++ doesn't like the lock prefix to be on a single line
-// so we can't insert a label after the lock prefix.
-// By emitting a lock prefix, we can define a label after it.
-#define LOCK_IF_MP(mp) __asm cmp mp, 0  \
-                       __asm je L0      \
-                       __asm _emit 0xF0 \
-                       __asm L0:
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_add_func)(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  (void)add    (1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  (void)add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  (void)add_ptr(1, dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  (void)add    (-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  (void)add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  (void)add_ptr(-1, dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest;
-    mov eax, add_value;
-    mov ecx, eax;
-    LOCK_IF_MP(mp)
-    xadd dword ptr [edx], eax;
-    add eax, ecx;
-  }
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  // alternative for InterlockedIncrement
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest;
-    LOCK_IF_MP(mp)
-    add dword ptr [edx], 1;
-  }
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  // alternative for InterlockedDecrement
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest;
-    LOCK_IF_MP(mp)
-    sub dword ptr [edx], 1;
-  }
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec((volatile jint*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  // alternative for InterlockedExchange
-  __asm {
-    mov eax, exchange_value;
-    mov ecx, dest;
-    xchg eax, dword ptr [ecx];
-  }
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  // alternative for InterlockedCompareExchange
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest
-    mov cl, exchange_value
-    mov al, compare_value
-    LOCK_IF_MP(mp)
-    cmpxchg byte ptr [edx], cl
-  }
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  // alternative for InterlockedCompareExchange
-  int mp = os::is_MP();
-  __asm {
-    mov edx, dest
-    mov ecx, exchange_value
-    mov eax, compare_value
-    LOCK_IF_MP(mp)
-    cmpxchg dword ptr [edx], ecx
-  }
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  jint ex_lo  = (jint)exchange_value;
-  jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
-  jint cmp_lo = (jint)compare_value;
-  jint cmp_hi = *( ((jint*)&compare_value) + 1 );
-  __asm {
-    push ebx
-    push edi
-    mov eax, cmp_lo
-    mov edx, cmp_hi
-    mov edi, dest
-    mov ebx, ex_lo
-    mov ecx, ex_hi
-    LOCK_IF_MP(mp)
-    cmpxchg8b qword ptr [edi]
-    pop edi
-    pop ebx
-  }
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  volatile jlong* pdest = &dest;
-  __asm {
-    mov eax, src
-    fild     qword ptr [eax]
-    mov eax, pdest
-    fistp    qword ptr [eax]
-  }
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  volatile jlong* src = &store_value;
-  __asm {
-    mov eax, src
-    fild     qword ptr [eax]
-    mov eax, dest
-    fistp    qword ptr [eax]
-  }
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic::store(store_value, (volatile jlong*)dest);
-}
-
-#endif // AMD64
-
-#pragma warning(default: 4035) // Enables warnings reporting missing return statement
-
-#endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 
 #include <intrin.h>
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- a/hotspot/src/share/vm/asm/assembler.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/asm/assembler.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "asm/codeBuffer.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -4266,7 +4266,7 @@
 #if INCLUDE_TRACE
   EventCompilerInlining event;
   if (event.should_commit()) {
-    event.set_compileID(compilation()->env()->task()->compile_id());
+    event.set_compileId(compilation()->env()->task()->compile_id());
     event.set_message(msg);
     event.set_succeeded(success);
     event.set_bci(bci());
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -49,7 +49,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1149,10 +1149,10 @@
 
 void ciEnv::report_failure(const char* reason) {
   // Create and fire JFR event
-  EventCompilerFailure event;
+  EventCompilationFailure event;
   if (event.should_commit()) {
-    event.set_compileID(compile_id());
-    event.set_failure(reason);
+    event.set_compileId(compile_id());
+    event.set_failureMessage(reason);
     event.commit();
   }
 }
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1410,11 +1410,11 @@
 }
 
 #if INCLUDE_TRACE
-TraceStructCiMethod ciMethod::to_trace_struct() const {
-  TraceStructCiMethod result;
-  result.set_class(holder()->name()->as_utf8());
+TraceStructCalleeMethod ciMethod::to_trace_struct() const {
+  TraceStructCalleeMethod result;
+  result.set_type(holder()->name()->as_utf8());
   result.set_name(name()->as_utf8());
-  result.set_signature(signature()->as_symbol()->as_utf8());
+  result.set_descriptor(signature()->as_symbol()->as_utf8());
   return result;
 }
 #endif
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -342,7 +342,7 @@
   void print_short_name(outputStream* st = tty);
 
 #if INCLUDE_TRACE
-  TraceStructCiMethod to_trace_struct() const;
+  TraceStructCalleeMethod to_trace_struct() const;
 #endif
 };
 
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -5402,6 +5402,17 @@
   debug_only(ik->verify();)
 }
 
+static bool relax_format_check_for(ClassLoaderData* loader_data) {
+  bool trusted = (loader_data->is_the_null_class_loader_data() ||
+                  SystemDictionary::is_platform_class_loader(loader_data->class_loader()));
+  bool need_verify =
+    // verifyAll
+    (BytecodeVerificationLocal && BytecodeVerificationRemote) ||
+    // verifyRemote
+    (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted);
+  return !need_verify;
+}
+
 ClassFileParser::ClassFileParser(ClassFileStream* stream,
                                  Symbol* name,
                                  ClassLoaderData* loader_data,
@@ -5490,7 +5501,7 @@
 
   // Check if verification needs to be relaxed for this class file
   // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
-  _relax_verify = Verifier::relax_verify_for(_loader_data->class_loader());
+  _relax_verify = relax_format_check_for(_loader_data);
 
   parse_stream(stream, CHECK);
 
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -63,7 +63,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutex.hpp"
--- a/hotspot/src/share/vm/classfile/klassFactory.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/klassFactory.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,12 +31,12 @@
 #include "prims/jvmtiEnvBase.hpp"
 #include "trace/traceMacros.hpp"
 
-static ClassFileStream* prologue(ClassFileStream* stream,
-                                 Symbol* name,
-                                 ClassLoaderData* loader_data,
-                                 Handle protection_domain,
-                                 JvmtiCachedClassFileData** cached_class_file,
-                                 TRAPS) {
+static ClassFileStream* check_class_file_load_hook(ClassFileStream* stream,
+                                                   Symbol* name,
+                                                   ClassLoaderData* loader_data,
+                                                   Handle protection_domain,
+                                                   JvmtiCachedClassFileData** cached_class_file,
+                                                   TRAPS) {
 
   assert(stream != NULL, "invariant");
 
@@ -102,8 +102,6 @@
   assert(loader_data != NULL, "invariant");
   assert(THREAD->is_Java_thread(), "must be a JavaThread");
 
-  bool changed_by_loadhook = false;
-
   ResourceMark rm;
   HandleMark hm;
 
@@ -111,12 +109,15 @@
 
   ClassFileStream* old_stream = stream;
 
-  stream = prologue(stream,
-                    name,
-                    loader_data,
-                    protection_domain,
-                    &cached_class_file,
-                    CHECK_NULL);
+  // Skip this processing for VM anonymous classes
+  if (host_klass == NULL) {
+    stream = check_class_file_load_hook(stream,
+                                        name,
+                                        loader_data,
+                                        protection_domain,
+                                        &cached_class_file,
+                                        CHECK_NULL);
+  }
 
   ClassFileParser parser(stream,
                          name,
--- a/hotspot/src/share/vm/classfile/stringTable.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -34,7 +34,7 @@
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -34,7 +34,7 @@
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
 
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1641,7 +1641,6 @@
       JvmtiExport::post_class_load((JavaThread *) THREAD, k());
 
   }
-  TRACE_KLASS_DEFINITION(k, THREAD);
   class_define_event(k);
 }
 
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -88,7 +88,7 @@
     BytecodeVerificationLocal : BytecodeVerificationRemote;
 }
 
-bool Verifier::relax_verify_for(oop loader) {
+bool Verifier::relax_access_for(oop loader) {
   bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
   bool need_verify =
     // verifyAll
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,8 @@
   // -Xverify:all/none override this value
   static bool should_verify_for(oop class_loader, bool should_verify_class);
 
-  // Relax certain verifier checks to enable some broken 1.1 apps to run on 1.2.
-  static bool relax_verify_for(oop class_loader);
+  // Relax certain access checks to enable some broken 1.1 apps to run on 1.2.
+  static bool relax_access_for(oop class_loader);
 
   // Print output for class+resolve
   static void trace_class_resolution(Klass* resolve_class, InstanceKlass* verify_class);
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -41,7 +41,7 @@
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -42,7 +42,7 @@
 #include "prims/nativeLookup.hpp"
 #include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -1755,7 +1755,7 @@
   assert(task->compile_id() != CICrashAt, "just as planned");
   if (event.should_commit()) {
     event.set_method(task->method());
-    event.set_compileID(task->compile_id());
+    event.set_compileId(task->compile_id());
     event.set_compileLevel(task->comp_level());
     event.set_succeded(task->is_success());
     event.set_isOsr(task->osr_bci() != CompileBroker::standard_entry_bci);
@@ -2399,4 +2399,3 @@
     }
   }
 }
-
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -61,7 +61,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -50,7 +50,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
--- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -26,7 +26,7 @@
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/shared/space.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 // Even though we don't use the GC efficiency in our heuristics as
 // much as we used to, we still order according to GC efficiency. This
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -27,7 +27,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/workgroup.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1Analytics.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1Analytics.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -316,14 +316,10 @@
   return get_new_size_prediction(_pending_cards_seq);
 }
 
-double G1Analytics::oldest_known_gc_end_time_sec() const {
+double G1Analytics::last_known_gc_end_time_sec() const {
   return _recent_prev_end_times_for_all_gcs_sec->oldest();
 }
 
-double G1Analytics::last_known_gc_end_time_sec() const {
-  return _recent_prev_end_times_for_all_gcs_sec->last();
-}
-
 void G1Analytics::update_recent_gc_times(double end_time_sec,
                                          double pause_time_ms) {
   _recent_gc_times_ms->add(pause_time_ms);
--- a/hotspot/src/share/vm/gc/g1/g1Analytics.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1Analytics.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -155,7 +155,6 @@
   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
   void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
 
-  double oldest_known_gc_end_time_sec() const;
   double last_known_gc_end_time_sec() const;
 };
 
--- a/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
 #include "utilities/bitMap.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -28,7 +28,6 @@
 #include "classfile/symbolTable.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
-#include "gc/g1/g1Analytics.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
@@ -75,7 +74,7 @@
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
@@ -2474,19 +2473,8 @@
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
-  jlong now = os::elapsed_counter() / NANOSECS_PER_MILLISEC;
-  const G1Analytics* analytics = _g1_policy->analytics();
-  double last = analytics->last_known_gc_end_time_sec();
-  jlong ret_val = now - (last * 1000);
-  if (ret_val < 0) {
-    // See the notes in GenCollectedHeap::millis_since_last_gc()
-    // for more information about the implementation.
-    log_warning(gc)("Detected clock going backwards. "
-      "Milliseconds since last GC would be " JLONG_FORMAT
-      ". returning zero instead.", ret_val);
-    return 0;
-  }
-  return ret_val;
+  // assert(false, "NYI");
+  return 0;
 }
 
 void G1CollectedHeap::prepare_for_verify() {
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -52,7 +52,7 @@
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/prefetch.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -604,7 +604,7 @@
     _analytics->report_alloc_rate_ms(alloc_rate_ms);
 
     double interval_ms =
-      (end_time_sec - _analytics->oldest_known_gc_end_time_sec()) * 1000.0;
+      (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
     _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
   }
--- a/hotspot/src/share/vm/gc/g1/g1EvacStats.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP
 
 #include "gc/g1/g1EvacStats.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 inline void G1EvacStats::add_direct_allocated(size_t value) {
   Atomic::add_ptr(value, &_direct_allocated);
--- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/synchronizer.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,7 +31,7 @@
 #include "gc/g1/g1StringDedupStat.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 bool G1StringDedup::_enabled = false;
 
--- a/hotspot/src/share/vm/gc/g1/g1StringDedup.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedup.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,6 @@
 
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
 
 class OopClosure;
 class BoolObjectClosure;
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/stack.inline.hpp"
 
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -31,7 +31,7 @@
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 G1StringDedupThread* G1StringDedupThread::_thread = NULL;
 
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -39,7 +39,7 @@
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
 
 int    HeapRegion::LogOfHRGrainBytes = 0;
--- a/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/space.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
                                                   size_t desired_word_size,
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -33,7 +33,7 @@
 #include "memory/allocation.hpp"
 #include "memory/padded.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
--- a/hotspot/src/share/vm/gc/g1/heapRegionTracer.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegionTracer.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -39,7 +39,7 @@
     e.set_to(to);
     e.set_start(start);
     e.set_used(used);
-    e.set_allocContext(allocationContext);
+    e.set_allocationContext(allocationContext);
     e.commit();
   }
 }
--- a/hotspot/src/share/vm/gc/g1/sparsePRT.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/sparsePRT.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
 
 // Check that the size of the SparsePRTEntry is evenly divisible by the maximum
--- a/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -30,7 +30,7 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
--- a/hotspot/src/share/vm/gc/parallel/mutableNUMASpace.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/mutableNUMASpace.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,6 +1,5 @@
-
 /*
- * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +27,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 
 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "gc/parallel/mutableSpace.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/gc/parallel/parMarkBitMap.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/parMarkBitMap.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -27,7 +27,7 @@
 #include "gc/parallel/psCompactionManager.inline.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/bitMap.inline.hpp"
--- a/hotspot/src/share/vm/gc/parallel/psCompactionManager.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psCompactionManager.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -38,7 +38,7 @@
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 
 PSOldGen*            ParCompactionManager::_old_gen = NULL;
 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -57,7 +57,7 @@
 #include "oops/methodData.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/vmThread.hpp"
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -46,7 +46,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/gc/shared/allocTracer.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/allocTracer.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -29,18 +29,18 @@
 #include "utilities/globalDefinitions.hpp"
 
 void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) {
-  EventAllocObjectOutsideTLAB event;
+  EventObjectAllocationOutsideTLAB event;
   if (event.should_commit()) {
-    event.set_class(klass());
+    event.set_objectClass(klass());
     event.set_allocationSize(alloc_size);
     event.commit();
   }
 }
 
 void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) {
-  EventAllocObjectInNewTLAB event;
+  EventObjectAllocationInNewTLAB event;
   if (event.should_commit()) {
-    event.set_class(klass());
+    event.set_objectClass(klass());
     event.set_allocationSize(alloc_size);
     event.set_tlabSize(tlab_size);
     event.commit();
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -29,7 +29,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/gc/shared/gcLocker.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/gcLocker.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 
 volatile jint GCLocker::_jni_lock_count = 0;
--- a/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Thu Aug 25 02:10:03 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Fri Aug 26 14:47:52 2016 -0700
@@ -43,7 +43,7 @@
 typedef uintptr_t TraceAddress;
 
 void GCTracer::send_garbage_collection_event() const {
-  EventGCGarbageCollection event(UNTIMED);
+  EventGarbageCollection event(UNTIMED);
   if (event.should_commit()) {
     event.set_gcId(GCId::current());
     event.set_name(_shared_gc_info.name());
@@ -91,7 +91,7 @@
 }
 
 void ParallelOldTracer::send_parallel_old_event() const {
-  EventGCParallelOld e(UNTIMED);
+  EventParallelOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
@@ -102,7 +102,7 @@
 }
 
 void YoungGCTracer::send_young_gc_event() const {
-  EventGCYoungGarbageCollection e(UNTIMED);
+  EventYoungGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_tenuringThreshold(_tenuring_threshold);
@@ -127,7 +127,7 @@
   EventPromoteObjectInNewPLAB event;
   if (event.should_commit()) {
     event.set_gcId(GCId::current());
-    event.set_class(klass);
+    event.set_objectClass(klass);
     event.set_objectSize(obj_size);
     event.set_tenured(tenured);
     event.set_tenuringAge(age);