changeset 57822:d757771b92a4

8236778: Add Atomic::fetch_and_add Reviewed-by: kbarrett, dholmes
author stefank
date Fri, 24 Jan 2020 09:15:08 +0100
parents 11e188a95589
children 5da30ab42c85
files src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp src/hotspot/share/gc/g1/g1ConcurrentMark.cpp src/hotspot/share/gc/g1/g1HotCardCache.cpp src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp src/hotspot/share/gc/g1/g1RemSet.cpp src/hotspot/share/gc/parallel/psParallelCompact.cpp src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp src/hotspot/share/gc/z/zArray.inline.hpp src/hotspot/share/gc/z/zMarkStackAllocator.cpp src/hotspot/share/gc/z/zNMethodTableIteration.cpp src/hotspot/share/gc/z/zPageAllocator.cpp src/hotspot/share/gc/z/zRelocationSet.inline.hpp src/hotspot/share/gc/z/zRootsIterator.cpp src/hotspot/share/runtime/atomic.hpp src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp
diffstat 32 files changed, 152 insertions(+), 146 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -93,11 +93,14 @@
 
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -28,11 +28,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
+
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return fetch_and_add(dest, add_value, order) + add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -160,11 +160,14 @@
 #endif // ARM
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -33,15 +33,18 @@
 // See https://patchwork.kernel.org/patch/3575821/
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
     FULL_MEM_BARRIER;
     return res;
   }
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<size_t byte_size>
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -67,11 +67,14 @@
 // For ARMv7 we add explicit barriers in the stubs.
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -93,11 +93,14 @@
 
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -75,11 +75,14 @@
 }
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -28,11 +28,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -28,11 +28,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return fetch_and_add(dest, add_value, order) + add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -31,11 +31,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -31,7 +31,7 @@
 template<size_t byte_size>
 struct Atomic::PlatformAdd {
   template<typename D, typename I>
-  inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
+  inline D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
     D old_value = *dest;
     while (true) {
       D new_value = old_value + add_value;
@@ -41,6 +41,11 @@
     }
     return old_value + add_value;
   }
+
+  template<typename D, typename I>
+  inline D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -41,11 +41,14 @@
 }
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 // Not using add_using_helper; see comment for cmpxchg.
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -54,11 +54,14 @@
 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 #ifdef AMD64
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -209,7 +209,7 @@
     return NULL;
   }
 
-  size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
+  size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
   if (cur_idx >= _chunk_capacity) {
     return NULL;
   }
@@ -282,7 +282,7 @@
 
 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
   assert_at_safepoint();
-  size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
+  size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
@@ -310,7 +310,7 @@
     return NULL;
   }
 
-  size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
+  size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
   if (claimed_index < _num_root_regions) {
     return &_root_regions[claimed_index];
   }
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -70,7 +70,7 @@
     return card_ptr;
   }
   // Otherwise, the card is hot.
-  size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
+  size_t index = Atomic::fetch_and_add(&_hot_cache_idx, 1u);
   if (index == _hot_cache_size) {
     // Can use relaxed store because all racing threads are writing the same
     // value and there aren't any concurrent readers.
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -261,7 +261,7 @@
   virtual void work(uint worker_id) {
     size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
     while (true) {
-      char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
+      char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
         break;
       }
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -180,7 +180,7 @@
 
       bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
       if (marked_as_dirty) {
-        uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
+        uint allocated = Atomic::fetch_and_add(&_cur_idx, 1u);
         _buffer[allocated] = region;
       }
     }
@@ -232,7 +232,7 @@
 
     void work(uint worker_id) {
       while (_cur_dirty_regions < _regions->size()) {
-        uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
+        uint next = Atomic::fetch_and_add(&_cur_dirty_regions, _chunk_length);
         uint max = MIN2(next + _chunk_length, _regions->size());
 
         for (uint i = next; i < max; i++) {
@@ -429,7 +429,7 @@
 
   uint claim_cards_to_scan(uint region, uint increment) {
     assert(region < _max_regions, "Tried to access invalid region %u", region);
-    return Atomic::add(&_card_table_scan_state[region], increment) - increment;
+    return Atomic::fetch_and_add(&_card_table_scan_state[region], increment);
   }
 
   void add_dirty_region(uint const region) {
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -2452,7 +2452,7 @@
   }
 
   bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
-    uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
+    uint claimed = Atomic::fetch_and_add(&_counter, 1u);
     if (claimed < _insert_index) {
       reference = _backing_array[claimed];
       return true;
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -32,7 +32,7 @@
 volatile size_t   StringDedupQueue::_claimed_index = 0;
 
 size_t StringDedupQueue::claim() {
-  return Atomic::add(&_claimed_index, size_t(1)) - 1;
+  return Atomic::fetch_and_add(&_claimed_index, 1u);
 }
 
 void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -592,7 +592,7 @@
 }
 
 size_t StringDedupTable::claim_table_partition(size_t partition_size) {
-  return Atomic::add(&_claimed_index, partition_size) - partition_size;
+  return Atomic::fetch_and_add(&_claimed_index, partition_size);
 }
 
 void StringDedupTable::verify() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -1362,7 +1362,7 @@
 
     size_t max = _heap->num_regions();
     while (_index < max) {
-      size_t cur = Atomic::add(&_index, stride) - stride;
+      size_t cur = Atomic::fetch_and_add(&_index, stride);
       size_t start = cur;
       size_t end = MIN2(cur + stride, max);
       if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -484,7 +484,7 @@
   ShenandoahNMethod** list = _array;
   size_t max = (size_t)_length;
   while (_claimed < max) {
-    size_t cur = Atomic::add(&_claimed, stride) - stride;
+    size_t cur = Atomic::fetch_and_add(&_claimed, stride);
     size_t start = cur;
     size_t end = MIN2(cur + stride, max);
     if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -90,7 +90,7 @@
 
   size_t max = (size_t)_length;
   while (_claimed < max) {
-    size_t cur = Atomic::add(&_claimed, stride) - stride;
+    size_t cur = Atomic::fetch_and_add(&_claimed, stride);
     size_t start = cur;
     size_t end = MIN2(cur + stride, max);
     if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -522,7 +522,7 @@
                                   _options);
 
     while (true) {
-      size_t v = Atomic::add(&_claimed, 1u) - 1;
+      size_t v = Atomic::fetch_and_add(&_claimed, 1u);
       if (v < _heap->num_regions()) {
         ShenandoahHeapRegion* r = _heap->get_region(v);
         if (!r->is_humongous() && !r->is_trash()) {
--- a/src/hotspot/share/gc/z/zArray.inline.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -101,7 +101,7 @@
 template <typename T, bool parallel>
 inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
   if (parallel) {
-    const size_t next = Atomic::add(&_next, 1u) - 1u;
+    const size_t next = Atomic::fetch_and_add(&_next, 1u);
     if (next < _array->size()) {
       *elem = _array->at(next);
       return true;
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -110,7 +110,7 @@
 
   // Increment top before end to make sure another
   // thread can't steal out newly expanded space.
-  addr = Atomic::add(&_top, size) - size;
+  addr = Atomic::fetch_and_add(&_top, size);
   Atomic::add(&_end, expand_size);
 
   return addr;
--- a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -58,7 +58,7 @@
     // Claim table partition. Each partition is currently sized to span
     // two cache lines. This number is just a guess, but seems to work well.
     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
-    const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size);
+    const size_t partition_start = MIN2(Atomic::fetch_and_add(&_claimed, partition_size), _size);
     const size_t partition_end = MIN2(partition_start + partition_size, _size);
     if (partition_start == partition_end) {
       // End of table
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -176,7 +176,7 @@
     for (;;) {
       // Get granule offset
       const size_t size = ZGranuleSize;
-      const uintptr_t offset = Atomic::add(&_start, size) - size;
+      const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
       if (offset >= _end) {
         // Done
         break;
--- a/src/hotspot/share/gc/z/zRelocationSet.inline.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/z/zRelocationSet.inline.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -38,7 +38,7 @@
 
   if (parallel) {
     if (_next < nforwardings) {
-      const size_t next = Atomic::add(&_next, 1u) - 1u;
+      const size_t next = Atomic::fetch_and_add(&_next, 1u);
       if (next < nforwardings) {
         *forwarding = _relocation_set->_forwardings[next];
         return true;
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp	Fri Jan 24 09:15:08 2020 +0100
@@ -185,7 +185,7 @@
     _claimed(0) {}
 
 uint ZJavaThreadsIterator::claim() {
-  return Atomic::add(&_claimed, 1u) - 1u;
+  return Atomic::fetch_and_add(&_claimed, 1u);
 }
 
 void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {
--- a/src/hotspot/share/runtime/atomic.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/runtime/atomic.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -98,13 +98,19 @@
   template <typename T>
   inline static T load_acquire(const volatile T* dest);
 
-  // Atomically add to a location. Returns updated value. add*() provide:
+  // Atomically add to a location. *add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 
+  // Returns updated value.
   template<typename D, typename I>
   inline static D add(D volatile* dest, I add_value,
                       atomic_memory_order order = memory_order_conservative);
 
+  // Returns previous value.
+  template<typename D, typename I>
+  inline static D fetch_and_add(D volatile* dest, I add_value,
+                                atomic_memory_order order = memory_order_conservative);
+
   template<typename D, typename I>
   inline static D sub(D volatile* dest, I sub_value,
                       atomic_memory_order order = memory_order_conservative);
@@ -230,54 +236,34 @@
 
   // Platform-specific implementation of add.  Support for sizes of 4
   // bytes and (if different) pointer size bytes are required.  The
-  // class is a function object that must be default constructable,
-  // with these requirements:
+  // class must be default constructable, with these requirements:
   //
   // - dest is of type D*, an integral or pointer type.
   // - add_value is of type I, an integral type.
   // - sizeof(I) == sizeof(D).
   // - if D is an integral type, I == D.
+  // - order is of type atomic_memory_order.
   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
   //
-  // Then
-  //   platform_add(dest, add_value)
-  // must be a valid expression, returning a result convertible to D.
+  // Then both
+  //   platform_add.add_and_fetch(dest, add_value, order)
+  //   platform_add.fetch_and_add(dest, add_value, order)
+  // must be valid expressions returning a result convertible to D.
+  //
+  // add_and_fetch atomically adds add_value to the value of dest,
+  // returning the new value.
+  //
+  // fetch_and_add atomically adds add_value to the value of dest,
+  // returning the old value.
+  //
+  // When D is a pointer type P*, both add_and_fetch and fetch_and_add
+  // treat it as if it were an uintptr_t; they do not perform any
+  // scaling of add_value, as that has already been done by the caller.
   //
   // No definition is provided; all platforms must explicitly define
   // this class and any needed specializations.
   template<size_t byte_size> struct PlatformAdd;
 
-  // Helper base classes for defining PlatformAdd.  To use, define
-  // PlatformAdd or a specialization that derives from one of these,
-  // and include in the PlatformAdd definition the support function
-  // (described below) required by the base class.
-  //
-  // These classes implement the required function object protocol for
-  // PlatformAdd, using a support function template provided by the
-  // derived class.  Let add_value (of type I) and dest (of type D) be
-  // the arguments the object is called with.  If D is a pointer type
-  // P*, then let addend (of type I) be add_value * sizeof(P);
-  // otherwise, addend is add_value.
-  //
-  // FetchAndAdd requires the derived class to provide
-  //   fetch_and_add(dest, addend)
-  // atomically adding addend to the value of dest, and returning the
-  // old value.
-  //
-  // AddAndFetch requires the derived class to provide
-  //   add_and_fetch(dest, addend)
-  // atomically adding addend to the value of dest, and returning the
-  // new value.
-  //
-  // When D is a pointer type P*, both fetch_and_add and add_and_fetch
-  // treat it as if it were a uintptr_t; they do not perform any
-  // scaling of the addend, as that has already been done by the
-  // caller.
-public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
-  template<typename Derived> struct FetchAndAdd;
-  template<typename Derived> struct AddAndFetch;
-private:
-
   // Support for platforms that implement some variants of add using a
   // (typically out of line) non-template helper function.  The
   // generic arguments passed to PlatformAdd need to be translated to
@@ -512,22 +498,6 @@
   }
 };
 
-// Define FetchAndAdd and AddAndFetch helper classes before including
-// platform file, which may use these as base classes, requiring they
-// be complete.
-
-template<typename Derived>
-struct Atomic::FetchAndAdd {
-  template<typename D, typename I>
-  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
-};
-
-template<typename Derived>
-struct Atomic::AddAndFetch {
-  template<typename D, typename I>
-  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
-};
-
 template<typename D>
 inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
@@ -684,7 +654,13 @@
 template<typename D, typename I>
 inline D Atomic::add(D volatile* dest, I add_value,
                      atomic_memory_order order) {
-  return AddImpl<D, I>()(dest, add_value, order);
+  return AddImpl<D, I>::add_and_fetch(dest, add_value, order);
+}
+
+template<typename D, typename I>
+inline D Atomic::fetch_and_add(D volatile* dest, I add_value,
+                               atomic_memory_order order) {
+  return AddImpl<D, I>::fetch_and_add(dest, add_value, order);
 }
 
 template<typename D, typename I>
@@ -695,9 +671,13 @@
                     (sizeof(I) <= sizeof(D)) &&
                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 {
-  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
+  static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {
     D addend = add_value;
-    return PlatformAdd<sizeof(D)>()(dest, addend, order);
+    return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);
+  }
+  static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {
+    D addend = add_value;
+    return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);
   }
 };
 
@@ -706,41 +686,26 @@
   P*, I,
   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 {
-  P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
-    STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
-    STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
-    typedef typename Conditional<IsSigned<I>::value,
-                                 intptr_t,
-                                 uintptr_t>::type CI;
+  STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
+  STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
+  typedef typename Conditional<IsSigned<I>::value,
+                               intptr_t,
+                               uintptr_t>::type CI;
+
+  static CI scale_addend(CI add_value) {
+    return add_value * sizeof(P);
+  }
+
+  static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
     CI addend = add_value;
-    return PlatformAdd<sizeof(P*)>()(dest, addend, order);
+    return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
+  }
+  static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
+    CI addend = add_value;
+    return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
   }
 };
 
-template<typename Derived>
-template<typename D, typename I>
-inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
-                                                  atomic_memory_order order) const {
-  I addend = add_value;
-  // If D is a pointer type P*, scale by sizeof(P).
-  if (IsPointer<D>::value) {
-    addend *= sizeof(typename RemovePointer<D>::type);
-  }
-  D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
-  return old + add_value;
-}
-
-template<typename Derived>
-template<typename D, typename I>
-inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
-                                                  atomic_memory_order order) const {
-  // If D is a pointer type P*, scale by sizeof(P).
-  if (IsPointer<D>::value) {
-    add_value *= sizeof(typename RemovePointer<D>::type);
-  }
-  return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
-}
-
 template<typename Type, typename Fn, typename D, typename I>
 inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
   return PrimitiveConversions::cast<D>(
--- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp	Thu Jan 23 21:14:16 2020 -0500
+++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp	Fri Jan 24 09:15:08 2020 +0100
@@ -54,7 +54,7 @@
 
   // Returns true if you succeeded to claim the range start -> (stop-1).
   bool claim(size_t* start, size_t* stop) {
-    size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
+    size_t claimed = Atomic::fetch_and_add(&_next_to_claim, 1u);
     if (claimed >= _stop_task) {
       return false;
     }