OpenJDK / jdk / jdk
changeset 50332:d0d933d61610
8204179: [BACKOUT] OopStorage should use GlobalCounter
Summary: Backout JDK-8202945
Reviewed-by: eosterlund, pliden
author | kbarrett |
---|---|
date | Thu, 31 May 2018 15:37:18 -0400 |
parents | 2712cf4711d1 |
children | 7cea35f78b50 dd9ec7e7e35a |
files | src/hotspot/share/gc/shared/oopStorage.cpp src/hotspot/share/gc/shared/oopStorage.hpp |
diffstat | 2 files changed, 63 insertions(+), 5 deletions(-) [+] |
line wrap: on
line diff
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Thu May 31 11:41:25 2018 -0700 +++ b/src/hotspot/share/gc/shared/oopStorage.cpp Thu May 31 15:37:18 2018 -0400 @@ -40,7 +40,6 @@ #include "utilities/align.hpp" #include "utilities/count_trailing_zeros.hpp" #include "utilities/debug.hpp" -#include "utilities/globalCounter.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" @@ -502,6 +501,48 @@ return true; } +OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {} + +// Begin read-side critical section. +uint OopStorage::ProtectActive::read_enter() { + return Atomic::add(2u, &_enter); +} + +// End read-side critical section. +void OopStorage::ProtectActive::read_exit(uint enter_value) { + Atomic::add(2u, &_exit[enter_value & 1]); +} + +// Wait until all readers that entered the critical section before +// synchronization have exited that critical section. +void OopStorage::ProtectActive::write_synchronize() { + SpinYield spinner; + // Determine old and new exit counters, based on bit0 of the + // on-entry _enter counter. + uint value = OrderAccess::load_acquire(&_enter); + volatile uint* new_ptr = &_exit[(value + 1) & 1]; + // Atomically change the in-use exit counter to the new counter, by + // adding 1 to the _enter counter (flipping bit0 between 0 and 1) + // and initializing the new exit counter to that enter value. Note: + // The new exit counter is not being used by read operations until + // this change succeeds. + uint old; + do { + old = value; + *new_ptr = ++value; + value = Atomic::cmpxchg(value, &_enter, old); + } while (old != value); + // Readers that entered the critical section before we changed the + // selected exit counter will use the old exit counter. Readers + // entering after the change will use the new exit counter. Wait + // for all the critical sections started before the change to + // complete, e.g. for the value of old_ptr to catch up with old. + volatile uint* old_ptr = &_exit[old & 1]; + while (old != OrderAccess::load_acquire(old_ptr)) { + spinner.wait(); + } +} + // Make new_array the _active_array. Increments new_array's refcount // to account for the new reference. The assignment is atomic wrto // obtain_active_array; once this function returns, it is safe for the @@ -513,9 +554,9 @@ // Install new_array, ensuring its initialization is complete first. OrderAccess::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. - GlobalCounter::write_synchronize(); - // All obtain_active_array critical sections that could see the old array - // have completed, having incremented the refcount of the old array. The + _protect_active.write_synchronize(); + // All obtain critical sections that could see the old array have + // completed, having incremented the refcount of the old array. The // caller can now safely relinquish the old array. } @@ -525,9 +566,10 @@ // _active_array. The caller must relinquish the array when done // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { - GlobalCounter::CriticalSection cs(Thread::current()); + uint enter_value = _protect_active.read_enter(); ActiveArray* result = OrderAccess::load_acquire(&_active_array); result->increment_refcount(); + _protect_active.read_exit(enter_value); return result; }
--- a/src/hotspot/share/gc/shared/oopStorage.hpp Thu May 31 11:41:25 2018 -0700 +++ b/src/hotspot/share/gc/shared/oopStorage.hpp Thu May 31 15:37:18 2018 -0400 @@ -204,6 +204,19 @@ void unlink(const Block& block); }; + // RCU-inspired protection of access to _active_array. + class ProtectActive { + volatile uint _enter; + volatile uint _exit[2]; + + public: + ProtectActive(); + + uint read_enter(); + void read_exit(uint enter_value); + void write_synchronize(); + }; + private: const char* _name; ActiveArray* _active_array; @@ -216,6 +229,9 @@ // Volatile for racy unlocked accesses. volatile size_t _allocation_count; + // Protection for _active_array. + mutable ProtectActive _protect_active; + // mutable because this gets set even for const iteration. mutable bool _concurrent_iteration_active;