changeset 30568:d0a24d56d8ea

Merge
author stefank
date Tue, 28 Apr 2015 12:17:56 +0000
parents ebd5af27fe02 497b3caa8243
children bc171531c562
files hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
diffstat 20 files changed, 335 insertions(+), 275 deletions(-) [+]
line wrap: on
line diff
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -28,6 +28,7 @@
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "oops/oop.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 // Trim our work_queue so its length is below max at return
 inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -66,6 +66,7 @@
 #include "services/memoryService.hpp"
 #include "services/runtimeService.hpp"
 #include "utilities/stack.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 // statics
 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -54,6 +54,7 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "services/memTracker.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 // Concurrent marking bit map wrapper
 
@@ -3758,6 +3759,10 @@
 #endif // _MARKING_STATS_
 }
 
+bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
+  return _task_queues->steal(worker_id, hash_seed, obj);
+}
+
 /*****************************************************************************
 
     The do_marking_step(time_target_ms, ...) method is the building
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -676,9 +676,7 @@
   }
 
   // Attempts to steal an object from the task queues of other tasks
-  bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
-    return _task_queues->steal(worker_id, hash_seed, obj);
-  }
+  bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 
   ConcurrentMark(G1CollectedHeap* g1h,
                  G1RegionToSpaceMapper* prev_bitmap_storage,
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 // Utility routine to set an exclusive range of cards on the given
 // card liveness bitmap
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -66,6 +66,7 @@
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/stack.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -29,7 +29,7 @@
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
-#include "utilities/stack.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
   : _g1h(g1h),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -100,10 +100,7 @@
   bool verify_task(StarTask ref) const;
 #endif // ASSERT
 
-  template <class T> void push_on_queue(T* ref) {
-    assert(verify_ref(ref), "sanity");
-    _refs->push(ref);
-  }
+  template <class T> void push_on_queue(T* ref);
 
   template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
     // If the new value of the field points to the same region or
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -59,6 +59,11 @@
   update_rs(from, p, queue_num());
 }
 
+template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
+  assert(verify_ref(ref), "sanity");
+  _refs->push(ref);
+}
+
 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
   assert(has_partial_array_mask(p), "invariant");
   oop from_obj = clear_partial_array_mask(p);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -54,6 +54,7 @@
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/stack.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 #include "utilities/workgroup.hpp"
 
 #ifdef _MSC_VER
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -37,7 +37,7 @@
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
-#include "utilities/stack.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 PSOldGen*            ParCompactionManager::_old_gen = NULL;
 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -179,17 +179,9 @@
   // Access function for compaction managers
   static ParCompactionManager* gc_thread_compaction_manager(int index);
 
-  static bool steal(int queue_num, int* seed, oop& t) {
-    return stack_array()->steal(queue_num, seed, t);
-  }
-
-  static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
-    return _objarray_queues->steal(queue_num, seed, t);
-  }
-
-  static bool steal(int queue_num, int* seed, size_t& region) {
-    return region_array()->steal(queue_num, seed, region);
-  }
+  static bool steal(int queue_num, int* seed, oop& t);
+  static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t);
+  static bool steal(int queue_num, int* seed, size_t& region);
 
   // Process tasks remaining on any marking stack
   void follow_marking_stacks();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -31,6 +31,19 @@
 #include "oops/oop.inline.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/taskqueue.inline.hpp"
+
+inline bool ParCompactionManager::steal(int queue_num, int* seed, oop& t) {
+  return stack_array()->steal(queue_num, seed, t);
+}
+
+inline bool ParCompactionManager::steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
+  return _objarray_queues->steal(queue_num, seed, t);
+}
+
+inline bool ParCompactionManager::steal(int queue_num, int* seed, size_t& region) {
+  return region_array()->steal(queue_num, seed, region);
+}
 
 inline void ParCompactionManager::push(oop obj) {
   _marking_stack.push(obj);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -36,7 +36,7 @@
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "utilities/stack.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
 OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -139,9 +139,7 @@
                                                     int start, int end);
   void process_array_chunk(oop old);
 
-  template <class T> void push_depth(T* p) {
-    claimed_stack_depth()->push(p);
-  }
+  template <class T> void push_depth(T* p);
 
   inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
                                     uint age, bool tenured,
@@ -159,9 +157,7 @@
   static PSPromotionManager* gc_thread_promotion_manager(int index);
   static PSPromotionManager* vm_thread_promotion_manager();
 
-  static bool steal_depth(int queue_num, int* seed, StarTask& t) {
-    return stack_array_depth()->steal(queue_num, seed, t);
-  }
+  static bool steal_depth(int queue_num, int* seed, StarTask& t);
 
   PSPromotionManager();
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -31,6 +31,7 @@
 #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "oops/oop.inline.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
@@ -39,6 +40,11 @@
 }
 
 template <class T>
+inline void PSPromotionManager::push_depth(T* p) {
+  claimed_stack_depth()->push(p);
+}
+
+template <class T>
 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
   if (p != NULL) { // XXX: error if p != NULL here
     oop o = oopDesc::load_decode_heap_oop_not_null(p);
@@ -99,7 +105,7 @@
 // performance.
 //
 template<bool promote_immediately>
-oop PSPromotionManager::copy_to_survivor_space(oop o) {
+inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
   assert(should_scavenge(&o), "Sanity");
 
   oop new_obj = NULL;
@@ -317,6 +323,10 @@
   }
 }
 
+inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) {
+  return stack_array_depth()->steal(queue_num, seed, t);
+}
+
 #if TASKQUEUE_STATS
 void PSPromotionManager::record_steal(StarTask& p) {
   if (is_oop_masked(p)) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -39,8 +39,7 @@
 #include "runtime/thread.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/management.hpp"
-#include "utilities/stack.inline.hpp"
-#include "utilities/taskqueue.hpp"
+#include "utilities/taskqueue.inline.hpp"
 
 //
 // ScavengeRootsTask
--- a/hotspot/src/share/vm/runtime/thread.cpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Tue Apr 28 12:17:56 2015 +0000
@@ -4210,13 +4210,13 @@
                Abstract_VM_Version::vm_info_string());
   st->cr();
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_SERVICES
   // Dump concurrent locks
   ConcurrentLocksDump concurrent_locks;
   if (print_concurrent_locks) {
     concurrent_locks.dump_at_safepoint();
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_SERVICES
 
   ALL_JAVA_THREADS(p) {
     ResourceMark rm;
@@ -4229,11 +4229,11 @@
       }
     }
     st->cr();
-#if INCLUDE_ALL_GCS
+#if INCLUDE_SERVICES
     if (print_concurrent_locks) {
       concurrent_locks.print_locks_on(p, st);
     }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_SERVICES
   }
 
   VMThread::vm_thread()->print_on(st);
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Mon Apr 27 10:04:26 2015 +0200
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -26,9 +26,6 @@
 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
 
 #include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "utilities/stack.hpp"
 
 // Simple TaskQueue stats that are collected by default in debug builds.
@@ -134,11 +131,7 @@
       if (_fields._top == 0) ++_fields._tag;
     }
 
-    Age cmpxchg(const Age new_age, const Age old_age) volatile {
-      return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
-                                          (volatile intptr_t *)&_data,
-                                          (intptr_t)old_age._data);
-    }
+    Age cmpxchg(const Age new_age, const Age old_age) volatile;
 
     bool operator ==(const Age& other) const { return _data == other._data; }
 
@@ -315,121 +308,6 @@
   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 }
 
-template<class E, MEMFLAGS F, unsigned int N>
-void GenericTaskQueue<E, F, N>::initialize() {
-  _elems = _array_allocator.allocate(N);
-}
-
-template<class E, MEMFLAGS F, unsigned int N>
-void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
-  // tty->print_cr("START OopTaskQueue::oops_do");
-  uint iters = size();
-  uint index = _bottom;
-  for (uint i = 0; i < iters; ++i) {
-    index = decrement_index(index);
-    // tty->print_cr("  doing entry %d," INTPTR_T " -> " INTPTR_T,
-    //            index, &_elems[index], _elems[index]);
-    E* t = (E*)&_elems[index];      // cast away volatility
-    oop* p = (oop*)t;
-    assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t)));
-    f->do_oop(p);
-  }
-  // tty->print_cr("END OopTaskQueue::oops_do");
-}
-
-template<class E, MEMFLAGS F, unsigned int N>
-bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
-  if (dirty_n_elems == N - 1) {
-    // Actually means 0, so do the push.
-    uint localBot = _bottom;
-    // g++ complains if the volatile result of the assignment is
-    // unused, so we cast the volatile away.  We cannot cast directly
-    // to void, because gcc treats that as not using the result of the
-    // assignment.  However, casting to E& means that we trigger an
-    // unused-value warning.  So, we cast the E& to void.
-    (void)const_cast<E&>(_elems[localBot] = t);
-    OrderAccess::release_store(&_bottom, increment_index(localBot));
-    TASKQUEUE_STATS_ONLY(stats.record_push());
-    return true;
-  }
-  return false;
-}
-
-// pop_local_slow() is done by the owning thread and is trying to
-// get the last task in the queue.  It will compete with pop_global()
-// that will be used by other threads.  The tag age is incremented
-// whenever the queue goes empty which it will do here if this thread
-// gets the last task or in pop_global() if the queue wraps (top == 0
-// and pop_global() succeeds, see pop_global()).
-template<class E, MEMFLAGS F, unsigned int N>
-bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
-  // This queue was observed to contain exactly one element; either this
-  // thread will claim it, or a competing "pop_global".  In either case,
-  // the queue will be logically empty afterwards.  Create a new Age value
-  // that represents the empty queue for the given value of "_bottom".  (We
-  // must also increment "tag" because of the case where "bottom == 1",
-  // "top == 0".  A pop_global could read the queue element in that case,
-  // then have the owner thread do a pop followed by another push.  Without
-  // the incrementing of "tag", the pop_global's CAS could succeed,
-  // allowing it to believe it has claimed the stale element.)
-  Age newAge((idx_t)localBot, oldAge.tag() + 1);
-  // Perhaps a competing pop_global has already incremented "top", in which
-  // case it wins the element.
-  if (localBot == oldAge.top()) {
-    // No competing pop_global has yet incremented "top"; we'll try to
-    // install new_age, thus claiming the element.
-    Age tempAge = _age.cmpxchg(newAge, oldAge);
-    if (tempAge == oldAge) {
-      // We win.
-      assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
-      TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
-      return true;
-    }
-  }
-  // We lose; a completing pop_global gets the element.  But the queue is empty
-  // and top is greater than bottom.  Fix this representation of the empty queue
-  // to become the canonical one.
-  _age.set(newAge);
-  assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
-  return false;
-}
-
-template<class E, MEMFLAGS F, unsigned int N>
-bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
-  Age oldAge = _age.get();
-  // Architectures with weak memory model require a barrier here
-  // to guarantee that bottom is not older than age,
-  // which is crucial for the correctness of the algorithm.
-#if !(defined SPARC || defined IA32 || defined AMD64)
-  OrderAccess::fence();
-#endif
-  uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
-  uint n_elems = size(localBot, oldAge.top());
-  if (n_elems == 0) {
-    return false;
-  }
-
-  // g++ complains if the volatile result of the assignment is
-  // unused, so we cast the volatile away.  We cannot cast directly
-  // to void, because gcc treats that as not using the result of the
-  // assignment.  However, casting to E& means that we trigger an
-  // unused-value warning.  So, we cast the E& to void.
-  (void) const_cast<E&>(t = _elems[oldAge.top()]);
-  Age newAge(oldAge);
-  newAge.increment();
-  Age resAge = _age.cmpxchg(newAge, oldAge);
-
-  // Note that using "_bottom" here might fail, since a pop_local might
-  // have decremented it.
-  assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
-  return resAge == oldAge;
-}
-
-template<class E, MEMFLAGS F, unsigned int N>
-GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
-  FREE_C_HEAP_ARRAY(E, _elems);
-}
-
 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
 // elements that do not fit in the TaskQueue.
 //
@@ -468,24 +346,6 @@
   overflow_t _overflow_stack;
 };
 
-template <class E, MEMFLAGS F, unsigned int N>
-bool OverflowTaskQueue<E, F, N>::push(E t)
-{
-  if (!taskqueue_t::push(t)) {
-    overflow_stack()->push(t);
-    TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
-  }
-  return true;
-}
-
-template <class E, MEMFLAGS F, unsigned int N>
-bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
-{
-  if (overflow_empty()) return false;
-  t = overflow_stack()->pop();
-  return true;
-}
-
 class TaskQueueSetSuper {
 protected:
   static int randomParkAndMiller(int* seed0);
@@ -506,13 +366,7 @@
 public:
   typedef typename T::element_type E;
 
-  GenericTaskQueueSet(int n) : _n(n) {
-    typedef T* GenericTaskQueuePtr;
-    _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
-    for (int i = 0; i < n; i++) {
-      _queues[i] = NULL;
-    }
-  }
+  GenericTaskQueueSet(int n);
 
   bool steal_best_of_2(uint queue_num, int* seed, E& t);
 
@@ -541,40 +395,6 @@
   return _queues[i];
 }
 
-template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
-  for (uint i = 0; i < 2 * _n; i++) {
-    if (steal_best_of_2(queue_num, seed, t)) {
-      TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
-      return true;
-    }
-  }
-  TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
-  return false;
-}
-
-template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
-  if (_n > 2) {
-    uint k1 = queue_num;
-    while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
-    uint k2 = queue_num;
-    while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
-    // Sample both and try the larger.
-    uint sz1 = _queues[k1]->size();
-    uint sz2 = _queues[k2]->size();
-    if (sz2 > sz1) return _queues[k2]->pop_global(t);
-    else return _queues[k1]->pop_global(t);
-  } else if (_n == 2) {
-    // Just try the other one.
-    uint k = (queue_num + 1) % 2;
-    return _queues[k]->pop_global(t);
-  } else {
-    assert(_n == 1, "can't be zero.");
-    return false;
-  }
-}
-
 template<class T, MEMFLAGS F>
 bool GenericTaskQueueSet<T, F>::peek() {
   // Try all the queues.
@@ -649,65 +469,6 @@
 #endif
 };
 
-template<class E, MEMFLAGS F, unsigned int N> inline bool
-GenericTaskQueue<E, F, N>::push(E t) {
-  uint localBot = _bottom;
-  assert(localBot < N, "_bottom out of range.");
-  idx_t top = _age.top();
-  uint dirty_n_elems = dirty_size(localBot, top);
-  assert(dirty_n_elems < N, "n_elems out of range.");
-  if (dirty_n_elems < max_elems()) {
-    // g++ complains if the volatile result of the assignment is
-    // unused, so we cast the volatile away.  We cannot cast directly
-    // to void, because gcc treats that as not using the result of the
-    // assignment.  However, casting to E& means that we trigger an
-    // unused-value warning.  So, we cast the E& to void.
-    (void) const_cast<E&>(_elems[localBot] = t);
-    OrderAccess::release_store(&_bottom, increment_index(localBot));
-    TASKQUEUE_STATS_ONLY(stats.record_push());
-    return true;
-  } else {
-    return push_slow(t, dirty_n_elems);
-  }
-}
-
-template<class E, MEMFLAGS F, unsigned int N> inline bool
-GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
-  uint localBot = _bottom;
-  // This value cannot be N-1.  That can only occur as a result of
-  // the assignment to bottom in this method.  If it does, this method
-  // resets the size to 0 before the next call (which is sequential,
-  // since this is pop_local.)
-  uint dirty_n_elems = dirty_size(localBot, _age.top());
-  assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
-  if (dirty_n_elems == 0) return false;
-  localBot = decrement_index(localBot);
-  _bottom = localBot;
-  // This is necessary to prevent any read below from being reordered
-  // before the store just above.
-  OrderAccess::fence();
-  // g++ complains if the volatile result of the assignment is
-  // unused, so we cast the volatile away.  We cannot cast directly
-  // to void, because gcc treats that as not using the result of the
-  // assignment.  However, casting to E& means that we trigger an
-  // unused-value warning.  So, we cast the E& to void.
-  (void) const_cast<E&>(t = _elems[localBot]);
-  // This is a second read of "age"; the "size()" above is the first.
-  // If there's still at least one element in the queue, based on the
-  // "_bottom" and "age" we've read, then there can be no interference with
-  // a "pop_global" operation, and we're done.
-  idx_t tp = _age.top();    // XXX
-  if (size(localBot, tp) > 0) {
-    assert(dirty_size(localBot, tp) != N - 1, "sanity");
-    TASKQUEUE_STATS_ONLY(stats.record_pop());
-    return true;
-  } else {
-    // Otherwise, the queue contained exactly one element; we take the slow
-    // path.
-    return pop_local_slow(localBot, _age.get());
-  }
-}
-
 typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/taskqueue.inline.hpp	Tue Apr 28 12:17:56 2015 +0000
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP
+#define SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP
+
+#include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/taskqueue.hpp"
+#include "utilities/stack.inline.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+template <class T, MEMFLAGS F>
+inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(int n) : _n(n) {
+  typedef T* GenericTaskQueuePtr;
+  _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
+  for (int i = 0; i < n; i++) {
+    _queues[i] = NULL;
+  }
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+inline void GenericTaskQueue<E, F, N>::initialize() {
+  _elems = _array_allocator.allocate(N);
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
+  FREE_C_HEAP_ARRAY(E, _elems);
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
+  if (dirty_n_elems == N - 1) {
+    // Actually means 0, so do the push.
+    uint localBot = _bottom;
+    // g++ complains if the volatile result of the assignment is
+    // unused, so we cast the volatile away.  We cannot cast directly
+    // to void, because gcc treats that as not using the result of the
+    // assignment.  However, casting to E& means that we trigger an
+    // unused-value warning.  So, we cast the E& to void.
+    (void)const_cast<E&>(_elems[localBot] = t);
+    OrderAccess::release_store(&_bottom, increment_index(localBot));
+    TASKQUEUE_STATS_ONLY(stats.record_push());
+    return true;
+  }
+  return false;
+}
+
+template<class E, MEMFLAGS F, unsigned int N> inline bool
+GenericTaskQueue<E, F, N>::push(E t) {
+  uint localBot = _bottom;
+  assert(localBot < N, "_bottom out of range.");
+  idx_t top = _age.top();
+  uint dirty_n_elems = dirty_size(localBot, top);
+  assert(dirty_n_elems < N, "n_elems out of range.");
+  if (dirty_n_elems < max_elems()) {
+    // g++ complains if the volatile result of the assignment is
+    // unused, so we cast the volatile away.  We cannot cast directly
+    // to void, because gcc treats that as not using the result of the
+    // assignment.  However, casting to E& means that we trigger an
+    // unused-value warning.  So, we cast the E& to void.
+    (void) const_cast<E&>(_elems[localBot] = t);
+    OrderAccess::release_store(&_bottom, increment_index(localBot));
+    TASKQUEUE_STATS_ONLY(stats.record_push());
+    return true;
+  } else {
+    return push_slow(t, dirty_n_elems);
+  }
+}
+
+template <class E, MEMFLAGS F, unsigned int N>
+inline bool OverflowTaskQueue<E, F, N>::push(E t)
+{
+  if (!taskqueue_t::push(t)) {
+    overflow_stack()->push(t);
+    TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
+  }
+  return true;
+}
+
+// pop_local_slow() is done by the owning thread and is trying to
+// get the last task in the queue.  It will compete with pop_global()
+// that will be used by other threads.  The tag age is incremented
+// whenever the queue goes empty which it will do here if this thread
+// gets the last task or in pop_global() if the queue wraps (top == 0
+// and pop_global() succeeds, see pop_global()).
+template<class E, MEMFLAGS F, unsigned int N>
+bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
+  // This queue was observed to contain exactly one element; either this
+  // thread will claim it, or a competing "pop_global".  In either case,
+  // the queue will be logically empty afterwards.  Create a new Age value
+  // that represents the empty queue for the given value of "_bottom".  (We
+  // must also increment "tag" because of the case where "bottom == 1",
+  // "top == 0".  A pop_global could read the queue element in that case,
+  // then have the owner thread do a pop followed by another push.  Without
+  // the incrementing of "tag", the pop_global's CAS could succeed,
+  // allowing it to believe it has claimed the stale element.)
+  Age newAge((idx_t)localBot, oldAge.tag() + 1);
+  // Perhaps a competing pop_global has already incremented "top", in which
+  // case it wins the element.
+  if (localBot == oldAge.top()) {
+    // No competing pop_global has yet incremented "top"; we'll try to
+    // install new_age, thus claiming the element.
+    Age tempAge = _age.cmpxchg(newAge, oldAge);
+    if (tempAge == oldAge) {
+      // We win.
+      assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
+      TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
+      return true;
+    }
+  }
+  // We lose; a completing pop_global gets the element.  But the queue is empty
+  // and top is greater than bottom.  Fix this representation of the empty queue
+  // to become the canonical one.
+  _age.set(newAge);
+  assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
+  return false;
+}
+
+template<class E, MEMFLAGS F, unsigned int N> inline bool
+GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
+  uint localBot = _bottom;
+  // This value cannot be N-1.  That can only occur as a result of
+  // the assignment to bottom in this method.  If it does, this method
+  // resets the size to 0 before the next call (which is sequential,
+  // since this is pop_local.)
+  uint dirty_n_elems = dirty_size(localBot, _age.top());
+  assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
+  if (dirty_n_elems == 0) return false;
+  localBot = decrement_index(localBot);
+  _bottom = localBot;
+  // This is necessary to prevent any read below from being reordered
+  // before the store just above.
+  OrderAccess::fence();
+  // g++ complains if the volatile result of the assignment is
+  // unused, so we cast the volatile away.  We cannot cast directly
+  // to void, because gcc treats that as not using the result of the
+  // assignment.  However, casting to E& means that we trigger an
+  // unused-value warning.  So, we cast the E& to void.
+  (void) const_cast<E&>(t = _elems[localBot]);
+  // This is a second read of "age"; the "size()" above is the first.
+  // If there's still at least one element in the queue, based on the
+  // "_bottom" and "age" we've read, then there can be no interference with
+  // a "pop_global" operation, and we're done.
+  idx_t tp = _age.top();    // XXX
+  if (size(localBot, tp) > 0) {
+    assert(dirty_size(localBot, tp) != N - 1, "sanity");
+    TASKQUEUE_STATS_ONLY(stats.record_pop());
+    return true;
+  } else {
+    // Otherwise, the queue contained exactly one element; we take the slow
+    // path.
+    return pop_local_slow(localBot, _age.get());
+  }
+}
+
+template <class E, MEMFLAGS F, unsigned int N>
+bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
+{
+  if (overflow_empty()) return false;
+  t = overflow_stack()->pop();
+  return true;
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
+  Age oldAge = _age.get();
+  // Architectures with weak memory model require a barrier here
+  // to guarantee that bottom is not older than age,
+  // which is crucial for the correctness of the algorithm.
+#if !(defined SPARC || defined IA32 || defined AMD64)
+  OrderAccess::fence();
+#endif
+  uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
+  uint n_elems = size(localBot, oldAge.top());
+  if (n_elems == 0) {
+    return false;
+  }
+
+  // g++ complains if the volatile result of the assignment is
+  // unused, so we cast the volatile away.  We cannot cast directly
+  // to void, because gcc treats that as not using the result of the
+  // assignment.  However, casting to E& means that we trigger an
+  // unused-value warning.  So, we cast the E& to void.
+  (void) const_cast<E&>(t = _elems[oldAge.top()]);
+  Age newAge(oldAge);
+  newAge.increment();
+  Age resAge = _age.cmpxchg(newAge, oldAge);
+
+  // Note that using "_bottom" here might fail, since a pop_local might
+  // have decremented it.
+  assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
+  return resAge == oldAge;
+}
+
+template<class T, MEMFLAGS F> bool
+GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
+  if (_n > 2) {
+    uint k1 = queue_num;
+    while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
+    uint k2 = queue_num;
+    while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
+    // Sample both and try the larger.
+    uint sz1 = _queues[k1]->size();
+    uint sz2 = _queues[k2]->size();
+    if (sz2 > sz1) return _queues[k2]->pop_global(t);
+    else return _queues[k1]->pop_global(t);
+  } else if (_n == 2) {
+    // Just try the other one.
+    uint k = (queue_num + 1) % 2;
+    return _queues[k]->pop_global(t);
+  } else {
+    assert(_n == 1, "can't be zero.");
+    return false;
+  }
+}
+
+template<class T, MEMFLAGS F> bool
+GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
+  for (uint i = 0; i < 2 * _n; i++) {
+    if (steal_best_of_2(queue_num, seed, t)) {
+      TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
+      return true;
+    }
+  }
+  TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
+  return false;
+}
+
+template <unsigned int N, MEMFLAGS F>
+inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
+  return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
+                                      (volatile intptr_t *)&_data,
+                                      (intptr_t)old_age._data);
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+inline void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
+  // tty->print_cr("START OopTaskQueue::oops_do");
+  uint iters = size();
+  uint index = _bottom;
+  for (uint i = 0; i < iters; ++i) {
+    index = decrement_index(index);
+    // tty->print_cr("  doing entry %d," INTPTR_T " -> " INTPTR_T,
+    //            index, &_elems[index], _elems[index]);
+    E* t = (E*)&_elems[index];      // cast away volatility
+    oop* p = (oop*)t;
+    assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t)));
+    f->do_oop(p);
+  }
+  // tty->print_cr("END OopTaskQueue::oops_do");
+}
+
+
+#endif // SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP