changeset 53550:6df094be7f58

8213352: Separate BufferNode allocation from PtrQueueSet Summary: Move free-list management and allocation to new class. Reviewed-by: tschatzl, sjohanss
author kbarrett
date Thu, 15 Nov 2018 19:59:10 -0500
parents d402a406bbc3
children a3aa8d5380d9
files src/hotspot/share/gc/g1/dirtyCardQueue.cpp src/hotspot/share/gc/g1/dirtyCardQueue.hpp src/hotspot/share/gc/g1/g1BarrierSet.cpp src/hotspot/share/gc/g1/g1BarrierSet.hpp src/hotspot/share/gc/g1/g1CollectedHeap.cpp src/hotspot/share/gc/g1/g1ConcurrentMark.cpp src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp src/hotspot/share/gc/shared/ptrQueue.cpp src/hotspot/share/gc/shared/ptrQueue.hpp src/hotspot/share/gc/shared/satbMarkQueue.cpp src/hotspot/share/gc/shared/satbMarkQueue.hpp test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp
diffstat 13 files changed, 245 insertions(+), 123 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -147,18 +147,15 @@
 }
 
 void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
-                                   Mutex* fl_lock,
+                                   BufferNode::Allocator* allocator,
                                    int process_completed_threshold,
                                    int max_completed_queue,
                                    Mutex* lock,
-                                   DirtyCardQueueSet* fl_owner,
                                    bool init_free_ids) {
   PtrQueueSet::initialize(cbl_mon,
-                          fl_lock,
+                          allocator,
                           process_completed_threshold,
-                          max_completed_queue,
-                          fl_owner);
-  set_buffer_size(G1UpdateBufferSize);
+                          max_completed_queue);
   _shared_dirty_card_queue.set_lock(lock);
   if (init_free_ids) {
     _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp	Thu Nov 15 19:59:10 2018 -0500
@@ -118,11 +118,10 @@
   DirtyCardQueueSet(bool notify_when_complete = true);
 
   void initialize(Monitor* cbl_mon,
-                  Mutex* fl_lock,
+                  BufferNode::Allocator* allocator,
                   int process_completed_threshold,
                   int max_completed_queue,
                   Mutex* lock,
-                  DirtyCardQueueSet* fl_owner,
                   bool init_free_ids = false);
 
   // The number of parallel ids that can be claimed to allow collector or
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -55,6 +55,8 @@
                       make_barrier_set_c2<G1BarrierSetC2>(),
                       card_table,
                       BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)),
+  _satb_mark_queue_buffer_allocator(G1SATBBufferSize, SATB_Q_FL_lock),
+  _dirty_card_queue_buffer_allocator(G1UpdateBufferSize, DirtyCardQ_FL_lock),
   _satb_mark_queue_set(),
   _dirty_card_queue_set()
 {}
@@ -202,3 +204,11 @@
   G1ThreadLocalData::satb_mark_queue(thread).flush();
   G1ThreadLocalData::dirty_card_queue(thread).flush();
 }
+
+BufferNode::Allocator& G1BarrierSet::satb_mark_queue_buffer_allocator() {
+  return _satb_mark_queue_buffer_allocator;
+}
+
+BufferNode::Allocator& G1BarrierSet::dirty_card_queue_buffer_allocator() {
+  return _dirty_card_queue_buffer_allocator;
+}
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp	Thu Nov 15 19:59:10 2018 -0500
@@ -39,6 +39,8 @@
 class G1BarrierSet: public CardTableBarrierSet {
   friend class VMStructs;
  private:
+  BufferNode::Allocator _satb_mark_queue_buffer_allocator;
+  BufferNode::Allocator _dirty_card_queue_buffer_allocator;
   G1SATBMarkQueueSet _satb_mark_queue_set;
   DirtyCardQueueSet _dirty_card_queue_set;
 
@@ -79,6 +81,9 @@
   virtual void on_thread_attach(JavaThread* thread);
   virtual void on_thread_detach(JavaThread* thread);
 
+  BufferNode::Allocator& satb_mark_queue_buffer_allocator();
+  BufferNode::Allocator& dirty_card_queue_buffer_allocator();
+
   static G1SATBMarkQueueSet& satb_mark_queue_set() {
     return g1_barrier_set()->_satb_mark_queue_set;
   }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -1653,6 +1653,28 @@
   BarrierSet::set_barrier_set(bs);
   _card_table = ct;
 
+  G1BarrierSet::satb_mark_queue_set().initialize(this,
+                                                 SATB_Q_CBL_mon,
+                                                 &bs->satb_mark_queue_buffer_allocator(),
+                                                 G1SATBProcessCompletedThreshold,
+                                                 G1SATBBufferEnqueueingThresholdPercent,
+                                                 Shared_SATB_Q_lock);
+
+  // process_completed_threshold and max_completed_queue are updated
+  // later, based on the concurrent refinement object.
+  G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
+                                                  &bs->dirty_card_queue_buffer_allocator(),
+                                                  -1, // temp. never trigger
+                                                  -1, // temp. no limit
+                                                  Shared_DirtyCardQ_lock,
+                                                  true); // init_free_ids
+
+  dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
+                                    &bs->dirty_card_queue_buffer_allocator(),
+                                    -1, // never trigger processing
+                                    -1, // no limit on length
+                                    Shared_DirtyCardQ_lock);
+
   // Create the hot card cache.
   _hot_card_cache = new G1HotCardCache(this);
 
@@ -1749,13 +1771,6 @@
   // Perform any initialization actions delegated to the policy.
   g1_policy()->init(this, &_collection_set);
 
-  G1BarrierSet::satb_mark_queue_set().initialize(this,
-                                                 SATB_Q_CBL_mon,
-                                                 SATB_Q_FL_lock,
-                                                 G1SATBProcessCompletedThreshold,
-                                                 G1SATBBufferEnqueueingThresholdPercent,
-                                                 Shared_SATB_Q_lock);
-
   jint ecode = initialize_concurrent_refinement();
   if (ecode != JNI_OK) {
     return ecode;
@@ -1766,20 +1781,11 @@
     return ecode;
   }
 
-  G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
-                                                  DirtyCardQ_FL_lock,
-                                                  (int)concurrent_refine()->yellow_zone(),
-                                                  (int)concurrent_refine()->red_zone(),
-                                                  Shared_DirtyCardQ_lock,
-                                                  NULL,  // fl_owner
-                                                  true); // init_free_ids
-
-  dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
-                                    DirtyCardQ_FL_lock,
-                                    -1, // never trigger processing
-                                    -1, // no limit on length
-                                    Shared_DirtyCardQ_lock,
-                                    &G1BarrierSet::dirty_card_queue_set());
+  {
+    DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+    dcqs.set_process_completed_threshold((int)concurrent_refine()->yellow_zone());
+    dcqs.set_max_completed_queue((int)concurrent_refine()->red_zone());
+  }
 
   // Here we allocate the dummy HeapRegion that is required by the
   // G1AllocRegion class.
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -406,9 +406,6 @@
 
   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 
-  SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
-  satb_qs.set_buffer_size(G1SATBBufferSize);
-
   _root_regions.init(_g1h->survivor(), this);
 
   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -35,11 +35,13 @@
 G1SATBMarkQueueSet::G1SATBMarkQueueSet() : _g1h(NULL) {}
 
 void G1SATBMarkQueueSet::initialize(G1CollectedHeap* g1h,
-                                    Monitor* cbl_mon, Mutex* fl_lock,
+                                    Monitor* cbl_mon,
+                                    BufferNode::Allocator* allocator,
                                     int process_completed_threshold,
                                     uint buffer_enqueue_threshold_percentage,
                                     Mutex* lock) {
-  SATBMarkQueueSet::initialize(cbl_mon, fl_lock,
+  SATBMarkQueueSet::initialize(cbl_mon,
+                               allocator,
                                process_completed_threshold,
                                buffer_enqueue_threshold_percentage,
                                lock);
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp	Thu Nov 15 19:59:10 2018 -0500
@@ -37,7 +37,8 @@
   G1SATBMarkQueueSet();
 
   void initialize(G1CollectedHeap* g1h,
-                  Monitor* cbl_mon, Mutex* fl_lock,
+                  Monitor* cbl_mon,
+                  BufferNode::Allocator* allocator,
                   int process_completed_threshold,
                   uint buffer_enqueue_threshold_percentage,
                   Mutex* lock);
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -26,6 +26,7 @@
 #include "gc/shared/ptrQueue.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
@@ -90,25 +91,92 @@
   FREE_C_HEAP_ARRAY(char, node);
 }
 
+BufferNode::Allocator::Allocator(size_t buffer_size, Mutex* lock) :
+  _buffer_size(buffer_size),
+  _lock(lock),
+  _free_list(NULL),
+  _free_count(0)
+{
+  assert(lock != NULL, "precondition");
+}
+
+BufferNode::Allocator::~Allocator() {
+  while (_free_list != NULL) {
+    BufferNode* node = _free_list;
+    _free_list = node->next();
+    BufferNode::deallocate(node);
+  }
+}
+
+size_t BufferNode::Allocator::free_count() const {
+  return Atomic::load(&_free_count);
+}
+
+BufferNode* BufferNode::Allocator::allocate() {
+  BufferNode* node = NULL;
+  {
+    MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+    node = _free_list;
+    if (node != NULL) {
+      _free_list = node->next();
+      --_free_count;
+      node->set_next(NULL);
+      node->set_index(0);
+      return node;
+    }
+  }
+  return  BufferNode::allocate(_buffer_size);
+}
+
+void BufferNode::Allocator::release(BufferNode* node) {
+  MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+  node->set_next(_free_list);
+  _free_list = node;
+  ++_free_count;
+}
+
+void BufferNode::Allocator::reduce_free_list() {
+  BufferNode* head = NULL;
+  {
+    MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+    // For now, delete half.
+    size_t remove = _free_count / 2;
+    if (remove > 0) {
+      head = _free_list;
+      BufferNode* tail = head;
+      BufferNode* prev = NULL;
+      for (size_t i = 0; i < remove; ++i) {
+        assert(tail != NULL, "free list size is wrong");
+        prev = tail;
+        tail = tail->next();
+      }
+      assert(prev != NULL, "invariant");
+      assert(prev->next() == tail, "invariant");
+      prev->set_next(NULL);
+      _free_list = tail;
+      _free_count -= remove;
+    }
+  }
+  while (head != NULL) {
+    BufferNode* next = head->next();
+    BufferNode::deallocate(head);
+    head = next;
+  }
+}
+
 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
-  _buffer_size(0),
+  _allocator(NULL),
   _cbl_mon(NULL),
   _completed_buffers_head(NULL),
   _completed_buffers_tail(NULL),
   _n_completed_buffers(0),
   _process_completed_threshold(0),
   _process_completed(false),
-  _fl_lock(NULL),
-  _buf_free_list(NULL),
-  _buf_free_list_sz(0),
-  _fl_owner(NULL),
   _all_active(false),
   _notify_when_complete(notify_when_complete),
   _max_completed_queue(0),
   _completed_queue_padding(0)
-{
-  _fl_owner = this;
-}
+{}
 
 PtrQueueSet::~PtrQueueSet() {
   // There are presently only a couple (derived) instances ever
@@ -117,59 +185,24 @@
 }
 
 void PtrQueueSet::initialize(Monitor* cbl_mon,
-                             Mutex* fl_lock,
+                             BufferNode::Allocator* allocator,
                              int process_completed_threshold,
-                             int max_completed_queue,
-                             PtrQueueSet *fl_owner) {
+                             int max_completed_queue) {
   _max_completed_queue = max_completed_queue;
   _process_completed_threshold = process_completed_threshold;
   _completed_queue_padding = 0;
-  assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
+  assert(cbl_mon != NULL && allocator != NULL, "Init order issue?");
   _cbl_mon = cbl_mon;
-  _fl_lock = fl_lock;
-  _fl_owner = (fl_owner != NULL) ? fl_owner : this;
+  _allocator = allocator;
 }
 
 void** PtrQueueSet::allocate_buffer() {
-  BufferNode* node = NULL;
-  {
-    MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-    node = _fl_owner->_buf_free_list;
-    if (node != NULL) {
-      _fl_owner->_buf_free_list = node->next();
-      _fl_owner->_buf_free_list_sz--;
-    }
-  }
-  if (node == NULL) {
-    node = BufferNode::allocate(buffer_size());
-  } else {
-    // Reinitialize buffer obtained from free list.
-    node->set_index(0);
-    node->set_next(NULL);
-  }
+  BufferNode* node = _allocator->allocate();
   return BufferNode::make_buffer_from_node(node);
 }
 
 void PtrQueueSet::deallocate_buffer(BufferNode* node) {
-  MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-  node->set_next(_fl_owner->_buf_free_list);
-  _fl_owner->_buf_free_list = node;
-  _fl_owner->_buf_free_list_sz++;
-}
-
-void PtrQueueSet::reduce_free_list() {
-  assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
-  // For now we'll adopt the strategy of deleting half.
-  MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
-  size_t n = _buf_free_list_sz / 2;
-  for (size_t i = 0; i < n; ++i) {
-    assert(_buf_free_list != NULL,
-           "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
-    BufferNode* node = _buf_free_list;
-    _buf_free_list = node->next();
-    _buf_free_list_sz--;
-    BufferNode::deallocate(node);
-  }
+  _allocator->release(node);
 }
 
 void PtrQueue::handle_zero_index() {
@@ -270,11 +303,6 @@
             "Completed buffer length is wrong.");
 }
 
-void PtrQueueSet::set_buffer_size(size_t sz) {
-  assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
-  _buffer_size = sz;
-}
-
 // Merge lists of buffers. Notify the processing threads.
 // The source queue is emptied as a result. The queues
 // must share the monitor.
--- a/src/hotspot/share/gc/shared/ptrQueue.hpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/shared/ptrQueue.hpp	Thu Nov 15 19:59:10 2018 -0500
@@ -28,6 +28,8 @@
 #include "utilities/align.hpp"
 #include "utilities/sizes.hpp"
 
+class Mutex;
+
 // There are various techniques that require threads to be able to log
 // addresses.  For example, a generational write barrier might log
 // the addresses of modified old-generation objects.  This type supports
@@ -223,18 +225,19 @@
     return offset_of(BufferNode, _buffer);
   }
 
+AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
+  // Allocate a new BufferNode with the "buffer" having size elements.
+  static BufferNode* allocate(size_t size);
+
+  // Free a BufferNode.
+  static void deallocate(BufferNode* node);
+
 public:
   BufferNode* next() const     { return _next;  }
   void set_next(BufferNode* n) { _next = n;     }
   size_t index() const         { return _index; }
   void set_index(size_t i)     { _index = i; }
 
-  // Allocate a new BufferNode with the "buffer" having size elements.
-  static BufferNode* allocate(size_t size);
-
-  // Free a BufferNode.
-  static void deallocate(BufferNode* node);
-
   // Return the BufferNode containing the buffer, after setting its index.
   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
     BufferNode* node =
@@ -250,6 +253,24 @@
     return reinterpret_cast<void**>(
       reinterpret_cast<char*>(node) + buffer_offset());
   }
+
+  // Free-list based allocator.
+  class Allocator {
+    size_t _buffer_size;
+    Mutex* _lock;
+    BufferNode* _free_list;
+    volatile size_t _free_count;
+
+  public:
+    Allocator(size_t buffer_size, Mutex* lock);
+    ~Allocator();
+
+    size_t buffer_size() const { return _buffer_size; }
+    size_t free_count() const;
+    BufferNode* allocate();
+    void release(BufferNode* node);
+    void reduce_free_list();
+  };
 };
 
 // A PtrQueueSet represents resources common to a set of pointer queues.
@@ -257,8 +278,7 @@
 // set, and return completed buffers to the set.
 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 class PtrQueueSet {
-  // The size of all buffers in the set.
-  size_t _buffer_size;
+  BufferNode::Allocator* _allocator;
 
 protected:
   Monitor* _cbl_mon;  // Protects the fields below.
@@ -268,15 +288,6 @@
   int _process_completed_threshold;
   volatile bool _process_completed;
 
-  // This (and the interpretation of the first element as a "next"
-  // pointer) are protected by the TLOQ_FL_lock.
-  Mutex* _fl_lock;
-  BufferNode* _buf_free_list;
-  size_t _buf_free_list_sz;
-  // Queue set can share a freelist. The _fl_owner variable
-  // specifies the owner. It is set to "this" by default.
-  PtrQueueSet* _fl_owner;
-
   bool _all_active;
 
   // If true, notify_all on _cbl_mon when the threshold is reached.
@@ -307,10 +318,9 @@
   // Because of init-order concerns, we can't pass these as constructor
   // arguments.
   void initialize(Monitor* cbl_mon,
-                  Mutex* fl_lock,
+                  BufferNode::Allocator* allocator,
                   int process_completed_threshold,
-                  int max_completed_queue,
-                  PtrQueueSet *fl_owner = NULL);
+                  int max_completed_queue);
 
 public:
 
@@ -336,24 +346,14 @@
 
   bool is_active() { return _all_active; }
 
-  // Set the buffer size.  Should be called before any "enqueue" operation
-  // can be called.  And should only be called once.
-  void set_buffer_size(size_t sz);
-
-  // Get the buffer size.  Must have been set.
   size_t buffer_size() const {
-    assert(_buffer_size > 0, "buffer size not set");
-    return _buffer_size;
+    return _allocator->buffer_size();
   }
 
   // Get/Set the number of completed buffers that triggers log processing.
   void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
   int process_completed_threshold() const { return _process_completed_threshold; }
 
-  // Must only be called at a safe point.  Indicates that the buffer free
-  // list size may be reduced, if that is deemed desirable.
-  void reduce_free_list();
-
   size_t completed_buffers_num() { return _n_completed_buffers; }
 
   void merge_bufferlists(PtrQueueSet* src);
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -111,11 +111,12 @@
   _buffer_enqueue_threshold(0)
 {}
 
-void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
+void SATBMarkQueueSet::initialize(Monitor* cbl_mon,
+                                  BufferNode::Allocator* allocator,
                                   int process_completed_threshold,
                                   uint buffer_enqueue_threshold_percentage,
                                   Mutex* lock) {
-  PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
+  PtrQueueSet::initialize(cbl_mon, allocator, process_completed_threshold, -1);
   _shared_satb_queue.set_lock(lock);
   assert(buffer_size() != 0, "buffer size not initialized");
   // Minimum threshold of 1 ensures enqueuing of completely full buffers.
--- a/src/hotspot/share/gc/shared/satbMarkQueue.hpp	Thu Nov 15 17:55:41 2018 -0500
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.hpp	Thu Nov 15 19:59:10 2018 -0500
@@ -108,7 +108,8 @@
     queue->apply_filter(filter);
   }
 
-  void initialize(Monitor* cbl_mon, Mutex* fl_lock,
+  void initialize(Monitor* cbl_mon,
+                  BufferNode::Allocator* allocator,
                   int process_completed_threshold,
                   uint buffer_enqueue_threshold_percentage,
                   Mutex* lock);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp	Thu Nov 15 19:59:10 2018 -0500
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/ptrQueue.hpp"
+#include "runtime/mutex.hpp"
+#include "unittest.hpp"
+
+// Some basic testing of BufferNode::Allocator.
+TEST_VM(PtrQueueBufferAllocatorTest, test) {
+  Mutex m(Mutex::leaf, "PtrQueueBufferAllocatorTest",
+          false, Mutex::_safepoint_check_never);
+  BufferNode::Allocator allocator(256, &m);
+
+  // Allocate some new nodes for use in testing.
+  BufferNode* nodes[10] = {};
+  const size_t node_count = ARRAY_SIZE(nodes);
+  for (size_t i = 0; i < node_count; ++i) {
+    ASSERT_EQ(0u, allocator.free_count());
+    nodes[i] = allocator.allocate();
+    ASSERT_EQ(NULL, nodes[i]->next());
+  }
+
+  // Release the nodes, adding them to the allocator's free list.
+  for (size_t i = 0; i < node_count; ++i) {
+    ASSERT_EQ(i, allocator.free_count());
+    allocator.release(nodes[i]);
+    if (i == 0) {
+      ASSERT_EQ(NULL, nodes[i]->next());
+    } else {
+      ASSERT_EQ(nodes[i - 1], nodes[i]->next());
+    }
+  }
+
+  // Allocate nodes from the free list.
+  for (size_t i = 0; i < node_count; ++i) {
+    size_t j = node_count - i;
+    ASSERT_EQ(j, allocator.free_count());
+    ASSERT_EQ(nodes[j - 1], allocator.allocate());
+  }
+  ASSERT_EQ(0u, allocator.free_count());
+
+  // Release nodes back to the free list.
+  for (size_t i = 0; i < node_count; ++i) {
+    allocator.release(nodes[i]);
+  }
+  ASSERT_EQ(node_count, allocator.free_count());
+
+  // Destroy some nodes in the free list.
+  // We don't have a way to verify destruction, but we can at
+  // leat verify we don't crash along the way.
+  allocator.reduce_free_list();
+  // destroy allocator.
+}