changeset 58852:dbb94c2ceaf8

8238220: Rename OWSTTaskTerminator to TaskTerminator Reviewed-by: sjohanss, sangheki
author tschatzl
date Mon, 03 Feb 2020 10:45:44 +0100
parents c3d2fc56206f
children 1617236f5cbb
files src/hotspot/share/gc/g1/g1CollectedHeap.cpp src/hotspot/share/gc/g1/g1CollectedHeap.hpp src/hotspot/share/gc/g1/g1ConcurrentMark.cpp src/hotspot/share/gc/g1/g1ConcurrentMark.hpp src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp src/hotspot/share/gc/g1/g1FullGCMarker.cpp src/hotspot/share/gc/g1/g1FullGCMarker.hpp src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp src/hotspot/share/gc/parallel/psParallelCompact.cpp src/hotspot/share/gc/parallel/psScavenge.cpp src/hotspot/share/gc/shared/genCollectedHeap.cpp src/hotspot/share/gc/shared/owstTaskTerminator.cpp src/hotspot/share/gc/shared/owstTaskTerminator.hpp src/hotspot/share/gc/shared/taskTerminator.cpp src/hotspot/share/gc/shared/taskTerminator.hpp src/hotspot/share/gc/shared/taskqueue.cpp src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp
diffstat 17 files changed, 407 insertions(+), 406 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -76,10 +76,10 @@
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/locationPrinter.inline.hpp"
 #include "gc/shared/oopStorageParState.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/referenceProcessor.inline.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/weakProcessor.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
@@ -1133,7 +1133,7 @@
   print_heap_after_gc();
   print_heap_regions();
 #ifdef TRACESPINNING
-  OWSTTaskTerminator::print_termination_counts();
+  TaskTerminator::print_termination_counts();
 #endif
 }
 
@@ -3141,7 +3141,7 @@
       verify_after_young_collection(verify_type);
 
 #ifdef TRACESPINNING
-      OWSTTaskTerminator::print_termination_counts();
+      TaskTerminator::print_termination_counts();
 #endif
 
       gc_epilogue(false);
@@ -3477,14 +3477,14 @@
   G1CollectedHeap* _g1h;
   G1ParScanThreadStateSet* _pss;
   RefToScanQueueSet* _task_queues;
-  OWSTTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
 
 public:
   G1STWRefProcTaskProxy(ProcessTask& proc_task,
                         G1CollectedHeap* g1h,
                         G1ParScanThreadStateSet* per_thread_states,
                         RefToScanQueueSet *task_queues,
-                        OWSTTaskTerminator* terminator) :
+                        TaskTerminator* terminator) :
     AbstractGangTask("Process reference objects in parallel"),
     _proc_task(proc_task),
     _g1h(g1h),
@@ -3528,7 +3528,7 @@
   assert(_workers->active_workers() >= ergo_workers,
          "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
          ergo_workers, _workers->active_workers());
-  OWSTTaskTerminator terminator(ergo_workers, _queues);
+  TaskTerminator terminator(ergo_workers, _queues);
   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
 
   _workers->run_task(&proc_task_proxy, ergo_workers);
@@ -3815,7 +3815,7 @@
   G1CollectedHeap* _g1h;
   G1ParScanThreadStateSet* _per_thread_states;
   RefToScanQueueSet* _task_queues;
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
   uint _num_workers;
 
   void evacuate_live_objects(G1ParScanThreadState* pss,
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -1482,18 +1482,18 @@
   G1CollectedHeap*              _g1h;
   G1ParScanThreadState*         _par_scan_state;
   RefToScanQueueSet*            _queues;
-  OWSTTaskTerminator*           _terminator;
+  TaskTerminator*               _terminator;
   G1GCPhaseTimes::GCParPhases   _phase;
 
   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
   RefToScanQueueSet*      queues()         { return _queues; }
-  OWSTTaskTerminator*     terminator()     { return _terminator; }
+  TaskTerminator*         terminator()     { return _terminator; }
 
 public:
   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
                                 G1ParScanThreadState* par_scan_state,
                                 RefToScanQueueSet* queues,
-                                OWSTTaskTerminator* terminator,
+                                TaskTerminator* terminator,
                                 G1GCPhaseTimes::GCParPhases phase)
     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
       _g1h(g1h), _par_scan_state(par_scan_state),
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -46,10 +46,10 @@
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/gcVMOperations.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/weakProcessor.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -30,7 +30,7 @@
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1RegionMarkStatsCache.hpp"
 #include "gc/g1/heapRegionSet.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "gc/shared/verifyOption.hpp"
 #include "gc/shared/workgroup.hpp"
@@ -329,7 +329,7 @@
   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 
   G1CMTaskQueueSet*       _task_queues; // Task queue set
-  OWSTTaskTerminator      _terminator;  // For termination
+  TaskTerminator          _terminator;  // For termination
 
   // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
@@ -418,7 +418,7 @@
   HeapWord*           finger()       { return _finger;   }
   bool                concurrent()   { return _concurrent; }
   uint                active_tasks() { return _num_active_tasks; }
-  OWSTTaskTerminator* terminator()   { return &_terminator; }
+  TaskTerminator*     terminator()   { return &_terminator; }
 
   // Claims the next available region to be scanned by a marking
   // task/thread. It might return NULL if the next region is empty or
--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -36,7 +36,7 @@
 
 class G1FullGCMarkTask : public G1FullGCTask {
   G1RootProcessor          _root_processor;
-  OWSTTaskTerminator       _terminator;
+  TaskTerminator           _terminator;
 
 public:
   G1FullGCMarkTask(G1FullCollector* collector);
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -25,8 +25,8 @@
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "gc/g1/g1FullGCMarker.inline.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/verifyOption.hpp"
 #include "memory/iterator.inline.hpp"
 
@@ -50,7 +50,7 @@
 
 void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
                                       ObjArrayTaskQueueSet* array_stacks,
-                                      OWSTTaskTerminator* terminator) {
+                                      TaskTerminator* terminator) {
   do {
     drain_stack();
     ObjArrayTask steal_array;
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -87,7 +87,7 @@
   inline void drain_stack();
   void complete_marking(OopQueueSet* oop_stacks,
                         ObjArrayTaskQueueSet* array_stacks,
-                        OWSTTaskTerminator* terminator);
+                        TaskTerminator* terminator);
 
   // Closure getters
   CLDToOopClosure*      cld_closure()   { return &_cld_closure; }
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -61,7 +61,7 @@
     typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
     ProcessTask&                  _proc_task;
     G1FullCollector*              _collector;
-    OWSTTaskTerminator            _terminator;
+    TaskTerminator                _terminator;
 
   public:
     G1RefProcTaskProxy(ProcessTask& proc_task,
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -49,11 +49,11 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 #include "gc/shared/spaceDecorator.inline.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workerPolicy.hpp"
 #include "gc/shared/workgroup.hpp"
@@ -1970,7 +1970,7 @@
                          collection_exit.ticks());
 
 #ifdef TRACESPINNING
-  OWSTTaskTerminator::print_termination_counts();
+  TaskTerminator::print_termination_counts();
 #endif
 
   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
@@ -2150,7 +2150,7 @@
   cm->follow_marking_stacks();
 }
 
-static void steal_marking_work(OWSTTaskTerminator& terminator, uint worker_id) {
+static void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   ParCompactionManager* cm =
@@ -2174,7 +2174,7 @@
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
   SequentialSubTasksDone _subtasks;
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
   uint _active_workers;
 
 public:
@@ -2207,7 +2207,7 @@
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   ProcessTask& _task;
   uint _ergo_workers;
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
 
 public:
   PCRefProcTask(ProcessTask& task, uint ergo_workers) :
@@ -2587,7 +2587,7 @@
 }
 #endif // #ifdef ASSERT
 
-static void compaction_with_stealing_work(OWSTTaskTerminator* terminator, uint worker_id) {
+static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   ParCompactionManager* cm =
@@ -2623,7 +2623,7 @@
 class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   TaskQueue& _tq;
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
   uint _active_workers;
 
 public:
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -43,12 +43,12 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 #include "gc/shared/scavengableNMethods.hpp"
 #include "gc/shared/spaceDecorator.inline.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workerPolicy.hpp"
 #include "gc/shared/workgroup.hpp"
@@ -139,7 +139,7 @@
   pm->drain_stacks(false);
 }
 
-static void steal_work(OWSTTaskTerminator& terminator, uint worker_id) {
+static void steal_work(TaskTerminator& terminator, uint worker_id) {
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   PSPromotionManager* pm =
@@ -219,7 +219,7 @@
 
 class PSRefProcTask : public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
   ProcessTask& _task;
   uint _active_workers;
 
@@ -315,7 +315,7 @@
   HeapWord* _gen_top;
   uint _active_workers;
   bool _is_empty;
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
 
 public:
   ScavengeRootsTask(PSOldGen* old_gen,
@@ -732,7 +732,7 @@
                             scavenge_exit.ticks());
 
 #ifdef TRACESPINNING
-  OWSTTaskTerminator::print_termination_counts();
+  TaskTerminator::print_termination_counts();
 #endif
 
   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -678,7 +678,7 @@
   }
 
 #ifdef TRACESPINNING
-  OWSTTaskTerminator::print_termination_counts();
+  TaskTerminator::print_termination_counts();
 #endif
 }
 
--- a/src/hotspot/share/gc/shared/owstTaskTerminator.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,250 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "gc/shared/owstTaskTerminator.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "logging/log.hpp"
-
-#ifdef TRACESPINNING
-uint OWSTTaskTerminator::_total_yields = 0;
-uint OWSTTaskTerminator::_total_spins = 0;
-uint OWSTTaskTerminator::_total_peeks = 0;
-#endif
-
-OWSTTaskTerminator::OWSTTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
-  _n_threads(n_threads),
-  _queue_set(queue_set),
-  _offered_termination(0),
-  _spin_master(NULL) {
-
-  _blocker = new Monitor(Mutex::leaf, "OWSTTaskTerminator", false, Monitor::_safepoint_check_never);
-}
-
-OWSTTaskTerminator::~OWSTTaskTerminator() {
-  assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
-  assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
-
-  assert(_spin_master == NULL, "Should have been reset");
-  assert(_blocker != NULL, "Can not be NULL");
-  delete _blocker;
-}
-
-#ifdef ASSERT
-bool OWSTTaskTerminator::peek_in_queue_set() {
-  return _queue_set->peek();
-}
-#endif
-
-void OWSTTaskTerminator::yield() {
-  assert(_offered_termination <= _n_threads, "Invariant");
-  os::naked_yield();
-}
-
-#ifdef TRACESPINNING
-void OWSTTaskTerminator::print_termination_counts() {
-  log_trace(gc, task)("TaskTerminator Yields: %u Spins: %u Peeks: %u",
-                      total_yields(), total_spins(), total_peeks());
-}
-#endif
-
-void OWSTTaskTerminator::reset_for_reuse() {
-  if (_offered_termination != 0) {
-    assert(_offered_termination == _n_threads,
-           "Terminator may still be in use");
-    _offered_termination = 0;
-  }
-}
-
-void OWSTTaskTerminator::reset_for_reuse(uint n_threads) {
-  reset_for_reuse();
-  _n_threads = n_threads;
-}
-
-bool OWSTTaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
-  return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
-}
-
-size_t OWSTTaskTerminator::tasks_in_queue_set() const {
-  return _queue_set->tasks();
-}
-
-bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
-  assert(_n_threads > 0, "Initialization is incorrect");
-  assert(_offered_termination < _n_threads, "Invariant");
-  assert(_blocker != NULL, "Invariant");
-
-  // Single worker, done
-  if (_n_threads == 1) {
-    _offered_termination = 1;
-    assert(!peek_in_queue_set(), "Precondition");
-    return true;
-  }
-
-  _blocker->lock_without_safepoint_check();
-  _offered_termination++;
-  // All arrived, done
-  if (_offered_termination == _n_threads) {
-    _blocker->notify_all();
-    _blocker->unlock();
-    assert(!peek_in_queue_set(), "Precondition");
-    return true;
-  }
-
-  Thread* the_thread = Thread::current();
-  while (true) {
-    if (_spin_master == NULL) {
-      _spin_master = the_thread;
-
-      _blocker->unlock();
-
-      if (do_spin_master_work(terminator)) {
-        assert(_offered_termination == _n_threads, "termination condition");
-        assert(!peek_in_queue_set(), "Precondition");
-        return true;
-      } else {
-        _blocker->lock_without_safepoint_check();
-        // There is possibility that termination is reached between dropping the lock
-        // before returning from do_spin_master_work() and acquiring lock above.
-        if (_offered_termination == _n_threads) {
-          _blocker->unlock();
-          assert(!peek_in_queue_set(), "Precondition");
-          return true;
-        }
-      }
-    } else {
-      _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
-
-      if (_offered_termination == _n_threads) {
-        _blocker->unlock();
-        assert(!peek_in_queue_set(), "Precondition");
-        return true;
-      }
-    }
-
-    size_t tasks = tasks_in_queue_set();
-    if (exit_termination(tasks, terminator)) {
-      assert_lock_strong(_blocker);
-      _offered_termination--;
-      _blocker->unlock();
-      return false;
-    }
-  }
-}
-
-bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
-  uint yield_count = 0;
-  // Number of hard spin loops done since last yield
-  uint hard_spin_count = 0;
-  // Number of iterations in the hard spin loop.
-  uint hard_spin_limit = WorkStealingHardSpins;
-
-  // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
-  // If it is greater than 0, then start with a small number
-  // of spins and increase number with each turn at spinning until
-  // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
-  // Then do a yield() call and start spinning afresh.
-  if (WorkStealingSpinToYieldRatio > 0) {
-    hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
-    hard_spin_limit = MAX2(hard_spin_limit, 1U);
-  }
-  // Remember the initial spin limit.
-  uint hard_spin_start = hard_spin_limit;
-
-  // Loop waiting for all threads to offer termination or
-  // more work.
-  while (true) {
-    // Look for more work.
-    // Periodically sleep() instead of yield() to give threads
-    // waiting on the cores the chance to grab this code
-    if (yield_count <= WorkStealingYieldsBeforeSleep) {
-      // Do a yield or hardspin.  For purposes of deciding whether
-      // to sleep, count this as a yield.
-      yield_count++;
-
-      // Periodically call yield() instead spinning
-      // After WorkStealingSpinToYieldRatio spins, do a yield() call
-      // and reset the counts and starting limit.
-      if (hard_spin_count > WorkStealingSpinToYieldRatio) {
-        yield();
-        hard_spin_count = 0;
-        hard_spin_limit = hard_spin_start;
-#ifdef TRACESPINNING
-        _total_yields++;
-#endif
-      } else {
-        // Hard spin this time
-        // Increase the hard spinning period but only up to a limit.
-        hard_spin_limit = MIN2(2*hard_spin_limit,
-                               (uint) WorkStealingHardSpins);
-        for (uint j = 0; j < hard_spin_limit; j++) {
-          SpinPause();
-        }
-        hard_spin_count++;
-#ifdef TRACESPINNING
-        _total_spins++;
-#endif
-      }
-    } else {
-      log_develop_trace(gc, task)("OWSTTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
-                                  p2i(Thread::current()), yield_count);
-      yield_count = 0;
-
-      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
-      _spin_master = NULL;
-      locker.wait(WorkStealingSleepMillis);
-      if (_spin_master == NULL) {
-        _spin_master = Thread::current();
-      } else {
-        return false;
-      }
-    }
-
-#ifdef TRACESPINNING
-    _total_peeks++;
-#endif
-    size_t tasks = tasks_in_queue_set();
-    bool exit = exit_termination(tasks, terminator);
-    {
-      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
-      // Termination condition reached
-      if (_offered_termination == _n_threads) {
-        _spin_master = NULL;
-        return true;
-      } else if (exit) {
-        if (tasks >= _offered_termination - 1) {
-          locker.notify_all();
-        } else {
-          for (; tasks > 1; tasks--) {
-            locker.notify();
-          }
-        }
-        _spin_master = NULL;
-        return false;
-      }
-    }
-  }
-}
--- a/src/hotspot/share/gc/shared/owstTaskTerminator.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
-#define SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/thread.hpp"
-
-// Define this to enable additional tracing probes.
-#undef TRACESPINNING
-
-class TaskQueueSetSuper;
-class TerminatorTerminator;
-
-/*
- * Provides a task termination protocol. OWST stands for Optimized Work Stealing Threads
- *
- * This is an enhanced implementation of Google's work stealing task termination
- * protocol, which is described in the paper:
- * "Wessam Hassanein. 2016. Understanding and improving JVM GC work
- * stealing at the data center scale. In Proceedings of the 2016 ACM
- * SIGPLAN International Symposium on Memory Management (ISMM 2016). ACM,
- * New York, NY, USA, 46-54. DOI: https://doi.org/10.1145/2926697.2926706"
- *
- * Instead of a dedicated spin-master, our implementation will let spin-master relinquish
- * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role.
- * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks
- * for stealing and termination condition.
- */
-class OWSTTaskTerminator : public CHeapObj<mtGC> {
-  uint _n_threads;
-  TaskQueueSetSuper* _queue_set;
-
-  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
-  volatile uint _offered_termination;
-  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
-
-#ifdef ASSERT
-  bool peek_in_queue_set();
-#endif
-  void yield();
-
-  Monitor*    _blocker;
-  Thread*     _spin_master;
-
-#ifdef TRACESPINNING
-  static uint _total_yields;
-  static uint _total_spins;
-  static uint _total_peeks;
-#endif
-
-  // If we should exit current termination protocol
-  bool exit_termination(size_t tasks, TerminatorTerminator* terminator);
-
-  size_t tasks_in_queue_set() const;
-
-  // Perform spin-master task.
-  // Return true if termination condition is detected, otherwise return false
-  bool do_spin_master_work(TerminatorTerminator* terminator);
-
-  NONCOPYABLE(OWSTTaskTerminator);
-
-public:
-  OWSTTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
-  ~OWSTTaskTerminator();
-
-  // The current thread has no work, and is ready to terminate if everyone
-  // else is.  If returns "true", all threads are terminated.  If returns
-  // "false", available work has been observed in one of the task queues,
-  // so the global task is not complete.
-  bool offer_termination() {
-    return offer_termination(NULL);
-  }
-
-  // As above, but it also terminates if the should_exit_termination()
-  // method of the terminator parameter returns true. If terminator is
-  // NULL, then it is ignored.
-  bool offer_termination(TerminatorTerminator* terminator);
-
-  // Reset the terminator, so that it may be reused again.
-  // The caller is responsible for ensuring that this is done
-  // in an MT-safe manner, once the previous round of use of
-  // the terminator is finished.
-  void reset_for_reuse();
-  // Same as above but the number of parallel threads is set to the
-  // given number.
-  void reset_for_reuse(uint n_threads);
-
-#ifdef TRACESPINNING
-  static uint total_yields() { return _total_yields; }
-  static uint total_spins() { return _total_spins; }
-  static uint total_peeks() { return _total_peeks; }
-  static void print_termination_counts();
-#endif
-};
-
-
-#endif // SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/taskTerminator.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/taskTerminator.hpp"
+#include "gc/shared/taskqueue.hpp"
+#include "logging/log.hpp"
+
+#ifdef TRACESPINNING
+uint TaskTerminator::_total_yields = 0;
+uint TaskTerminator::_total_spins = 0;
+uint TaskTerminator::_total_peeks = 0;
+#endif
+
+TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
+  _n_threads(n_threads),
+  _queue_set(queue_set),
+  _offered_termination(0),
+  _spin_master(NULL) {
+
+  _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
+}
+
+TaskTerminator::~TaskTerminator() {
+  assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
+  assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
+
+  assert(_spin_master == NULL, "Should have been reset");
+  assert(_blocker != NULL, "Can not be NULL");
+  delete _blocker;
+}
+
+#ifdef ASSERT
+bool TaskTerminator::peek_in_queue_set() {
+  return _queue_set->peek();
+}
+#endif
+
+void TaskTerminator::yield() {
+  assert(_offered_termination <= _n_threads, "Invariant");
+  os::naked_yield();
+}
+
+#ifdef TRACESPINNING
+void TaskTerminator::print_termination_counts() {
+  log_trace(gc, task)("TaskTerminator Yields: %u Spins: %u Peeks: %u",
+                      total_yields(), total_spins(), total_peeks());
+}
+#endif
+
+void TaskTerminator::reset_for_reuse() {
+  if (_offered_termination != 0) {
+    assert(_offered_termination == _n_threads,
+           "Terminator may still be in use");
+    _offered_termination = 0;
+  }
+}
+
+void TaskTerminator::reset_for_reuse(uint n_threads) {
+  reset_for_reuse();
+  _n_threads = n_threads;
+}
+
+bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
+  return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
+}
+
+size_t TaskTerminator::tasks_in_queue_set() const {
+  return _queue_set->tasks();
+}
+
+bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
+  assert(_n_threads > 0, "Initialization is incorrect");
+  assert(_offered_termination < _n_threads, "Invariant");
+  assert(_blocker != NULL, "Invariant");
+
+  // Single worker, done
+  if (_n_threads == 1) {
+    _offered_termination = 1;
+    assert(!peek_in_queue_set(), "Precondition");
+    return true;
+  }
+
+  _blocker->lock_without_safepoint_check();
+  _offered_termination++;
+  // All arrived, done
+  if (_offered_termination == _n_threads) {
+    _blocker->notify_all();
+    _blocker->unlock();
+    assert(!peek_in_queue_set(), "Precondition");
+    return true;
+  }
+
+  Thread* the_thread = Thread::current();
+  while (true) {
+    if (_spin_master == NULL) {
+      _spin_master = the_thread;
+
+      _blocker->unlock();
+
+      if (do_spin_master_work(terminator)) {
+        assert(_offered_termination == _n_threads, "termination condition");
+        assert(!peek_in_queue_set(), "Precondition");
+        return true;
+      } else {
+        _blocker->lock_without_safepoint_check();
+        // There is possibility that termination is reached between dropping the lock
+        // before returning from do_spin_master_work() and acquiring lock above.
+        if (_offered_termination == _n_threads) {
+          _blocker->unlock();
+          assert(!peek_in_queue_set(), "Precondition");
+          return true;
+        }
+      }
+    } else {
+      _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
+
+      if (_offered_termination == _n_threads) {
+        _blocker->unlock();
+        assert(!peek_in_queue_set(), "Precondition");
+        return true;
+      }
+    }
+
+    size_t tasks = tasks_in_queue_set();
+    if (exit_termination(tasks, terminator)) {
+      assert_lock_strong(_blocker);
+      _offered_termination--;
+      _blocker->unlock();
+      return false;
+    }
+  }
+}
+
+bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
+  uint yield_count = 0;
+  // Number of hard spin loops done since last yield
+  uint hard_spin_count = 0;
+  // Number of iterations in the hard spin loop.
+  uint hard_spin_limit = WorkStealingHardSpins;
+
+  // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
+  // If it is greater than 0, then start with a small number
+  // of spins and increase number with each turn at spinning until
+  // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
+  // Then do a yield() call and start spinning afresh.
+  if (WorkStealingSpinToYieldRatio > 0) {
+    hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
+    hard_spin_limit = MAX2(hard_spin_limit, 1U);
+  }
+  // Remember the initial spin limit.
+  uint hard_spin_start = hard_spin_limit;
+
+  // Loop waiting for all threads to offer termination or
+  // more work.
+  while (true) {
+    // Look for more work.
+    // Periodically sleep() instead of yield() to give threads
+    // waiting on the cores the chance to grab this code
+    if (yield_count <= WorkStealingYieldsBeforeSleep) {
+      // Do a yield or hardspin.  For purposes of deciding whether
+      // to sleep, count this as a yield.
+      yield_count++;
+
+      // Periodically call yield() instead spinning
+      // After WorkStealingSpinToYieldRatio spins, do a yield() call
+      // and reset the counts and starting limit.
+      if (hard_spin_count > WorkStealingSpinToYieldRatio) {
+        yield();
+        hard_spin_count = 0;
+        hard_spin_limit = hard_spin_start;
+#ifdef TRACESPINNING
+        _total_yields++;
+#endif
+      } else {
+        // Hard spin this time
+        // Increase the hard spinning period but only up to a limit.
+        hard_spin_limit = MIN2(2*hard_spin_limit,
+                               (uint) WorkStealingHardSpins);
+        for (uint j = 0; j < hard_spin_limit; j++) {
+          SpinPause();
+        }
+        hard_spin_count++;
+#ifdef TRACESPINNING
+        _total_spins++;
+#endif
+      }
+    } else {
+      log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
+                                  p2i(Thread::current()), yield_count);
+      yield_count = 0;
+
+      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
+      _spin_master = NULL;
+      locker.wait(WorkStealingSleepMillis);
+      if (_spin_master == NULL) {
+        _spin_master = Thread::current();
+      } else {
+        return false;
+      }
+    }
+
+#ifdef TRACESPINNING
+    _total_peeks++;
+#endif
+    size_t tasks = tasks_in_queue_set();
+    bool exit = exit_termination(tasks, terminator);
+    {
+      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
+      // Termination condition reached
+      if (_offered_termination == _n_threads) {
+        _spin_master = NULL;
+        return true;
+      } else if (exit) {
+        if (tasks >= _offered_termination - 1) {
+          locker.notify_all();
+        } else {
+          for (; tasks > 1; tasks--) {
+            locker.notify();
+          }
+        }
+        _spin_master = NULL;
+        return false;
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/taskTerminator.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_GC_SHARED_TASKTERMINATOR_HPP
+#define SHARE_GC_SHARED_TASKTERMINATOR_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/thread.hpp"
+
+// Define this to enable additional tracing probes.
+#undef TRACESPINNING
+
+class TaskQueueSetSuper;
+class TerminatorTerminator;
+
+/*
+ * Provides a task termination protocol.
+ *
+ * This is an enhanced implementation of Google's OWST work stealing task termination
+ * protocol (OWST stands for Optimized Work Stealing Threads).
+ *
+ * It is described in the paper:
+ * "Wessam Hassanein. 2016. Understanding and improving JVM GC work
+ * stealing at the data center scale. In Proceedings of the 2016 ACM
+ * SIGPLAN International Symposium on Memory Management (ISMM 2016). ACM,
+ * New York, NY, USA, 46-54. DOI: https://doi.org/10.1145/2926697.2926706"
+ *
+ * Instead of a dedicated spin-master, our implementation will let spin-master relinquish
+ * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role.
+ * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks
+ * for stealing and termination condition.
+ */
+class TaskTerminator : public CHeapObj<mtGC> {
+  uint _n_threads;
+  TaskQueueSetSuper* _queue_set;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
+  volatile uint _offered_termination;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
+
+#ifdef ASSERT
+  bool peek_in_queue_set();
+#endif
+  void yield();
+
+  Monitor*    _blocker;
+  Thread*     _spin_master;
+
+#ifdef TRACESPINNING
+  static uint _total_yields;
+  static uint _total_spins;
+  static uint _total_peeks;
+#endif
+
+  // If we should exit current termination protocol
+  bool exit_termination(size_t tasks, TerminatorTerminator* terminator);
+
+  size_t tasks_in_queue_set() const;
+
+  // Perform spin-master task.
+  // Return true if termination condition is detected, otherwise return false
+  bool do_spin_master_work(TerminatorTerminator* terminator);
+
+  NONCOPYABLE(TaskTerminator);
+
+public:
+  TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
+  ~TaskTerminator();
+
+  // The current thread has no work, and is ready to terminate if everyone
+  // else is.  If returns "true", all threads are terminated.  If returns
+  // "false", available work has been observed in one of the task queues,
+  // so the global task is not complete.
+  bool offer_termination() {
+    return offer_termination(NULL);
+  }
+
+  // As above, but it also terminates if the should_exit_termination()
+  // method of the terminator parameter returns true. If terminator is
+  // NULL, then it is ignored.
+  bool offer_termination(TerminatorTerminator* terminator);
+
+  // Reset the terminator, so that it may be reused again.
+  // The caller is responsible for ensuring that this is done
+  // in an MT-safe manner, once the previous round of use of
+  // the terminator is finished.
+  void reset_for_reuse();
+  // Same as above but the number of parallel threads is set to the
+  // given number.
+  void reset_for_reuse(uint n_threads);
+
+#ifdef TRACESPINNING
+  static uint total_yields() { return _total_yields; }
+  static uint total_spins() { return _total_spins; }
+  static uint total_peeks() { return _total_peeks; }
+  static void print_termination_counts();
+#endif
+};
+
+#endif // SHARE_GC_SHARED_TASKTERMINATOR_HPP
--- a/src/hotspot/share/gc/shared/taskqueue.cpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/shared/taskqueue.cpp	Mon Feb 03 10:45:44 2020 +0100
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/taskqueue.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "oops/oop.inline.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
--- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Mon Feb 03 10:45:43 2020 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Mon Feb 03 10:45:44 2020 +0100
@@ -24,7 +24,8 @@
 
 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
 #define SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
-#include "gc/shared/owstTaskTerminator.hpp"
+
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/atomic.hpp"
@@ -340,7 +341,7 @@
 
 class ShenandoahTaskTerminator : public StackObj {
 private:
-  OWSTTaskTerminator _terminator;
+  TaskTerminator _terminator;
 public:
   ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);