changeset 10711:fb31034a45ea

8151670: Unexpected concurrent refinement deactivation and reactivation Summary: Refinement threads now use SuspendibleThreadSet::yield. Reviewed-by: jmasa, mgerdin
author kbarrett
date Fri, 25 Mar 2016 15:50:31 -0400
parents 9489df4f2b94
children 5518d3ad6fa7
files src/share/vm/gc/g1/concurrentG1RefineThread.cpp src/share/vm/gc/g1/dirtyCardQueue.cpp src/share/vm/gc/g1/dirtyCardQueue.hpp
diffstat 3 files changed, 66 insertions(+), 40 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Fri Mar 25 15:54:18 2016 +0300
+++ b/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Fri Mar 25 15:50:31 2016 -0400
@@ -76,7 +76,6 @@
 }
 
 void ConcurrentG1RefineThread::wait_for_completed_buffers() {
-  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   while (!should_terminate() && !is_active()) {
     _monitor->wait(Mutex::_no_safepoint_check_flag);
@@ -126,7 +125,12 @@
     {
       SuspendibleThreadSetJoiner sts_join;
 
-      do {
+      while (!should_terminate()) {
+        if (sts_join.should_yield()) {
+          sts_join.yield();
+          continue;             // Re-check for termination after yield delay.
+        }
+
         size_t curr_buffer_num = dcqs.completed_buffers_num();
         // If the number of the buffers falls down into the yellow zone,
         // that means that the transition period after the evacuation pause has ended.
@@ -138,17 +142,23 @@
         if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
           _next->activate();
         }
-      } while (dcqs.apply_closure_to_completed_buffer(_refine_closure,
-                                                      _worker_id + _worker_id_offset,
-                                                      _deactivation_threshold,
-                                                      false /* during_pause */));
 
-      deactivate();
-      log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
-                            _worker_id, _deactivation_threshold,
-                            dcqs.completed_buffers_num());
+        // Process the next buffer, if there are enough left.
+        if (!dcqs.apply_closure_to_completed_buffer(_refine_closure,
+                                                    _worker_id + _worker_id_offset,
+                                                    _deactivation_threshold,
+                                                    false /* during_pause */)) {
+          break; // Deactivate, number of buffers fell below threshold.
+        }
+      }
     }
 
+    deactivate();
+    log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT
+                          ", current: " SIZE_FORMAT,
+                          _worker_id, _deactivation_threshold,
+                          dcqs.completed_buffers_num());
+
     if (os::supports_vtime()) {
       _vtime_accum = (os::elapsedVTime() - _vtime_start);
     } else {
--- a/src/share/vm/gc/g1/dirtyCardQueue.cpp	Fri Mar 25 15:54:18 2016 +0300
+++ b/src/share/vm/gc/g1/dirtyCardQueue.cpp	Fri Mar 25 15:50:31 2016 -0400
@@ -155,42 +155,52 @@
                                                 bool consume,
                                                 uint worker_i) {
   if (cl == NULL) return true;
+  bool result = true;
   void** buf = BufferNode::make_buffer_from_node(node);
   size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
-  size_t start = DirtyCardQueue::byte_index_to_index(node->index());
-  for (size_t i = start; i < limit; ++i) {
+  size_t i = DirtyCardQueue::byte_index_to_index(node->index());
+  for ( ; i < limit; ++i) {
     jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
     assert(card_ptr != NULL, "invariant");
     if (!cl->do_card_ptr(card_ptr, worker_i)) {
-      if (consume) {
-        size_t new_index = DirtyCardQueue::index_to_byte_index(i + 1);
-        assert(new_index <= buffer_size(), "invariant");
-        node->set_index(new_index);
-      }
-      return false;
+      result = false;           // Incomplete processing.
+      break;
     }
   }
   if (consume) {
-    node->set_index(buffer_size());
+    size_t new_index = DirtyCardQueue::index_to_byte_index(i);
+    assert(new_index <= buffer_size(), "invariant");
+    node->set_index(new_index);
   }
-  return true;
+  return result;
 }
 
+#ifndef ASSERT
+#define assert_fully_consumed(node, buffer_size)
+#else
+#define assert_fully_consumed(node, buffer_size)                \
+  do {                                                          \
+    size_t _afc_index = (node)->index();                        \
+    size_t _afc_size = (buffer_size);                           \
+    assert(_afc_index == _afc_size,                             \
+           "Buffer was not fully consumed as claimed: index: "  \
+           SIZE_FORMAT ", size: " SIZE_FORMAT,                  \
+            _afc_index, _afc_size);                             \
+  } while (0)
+#endif // ASSERT
+
 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
   guarantee(_free_ids != NULL, "must be");
 
-  // claim a par id
-  uint worker_i = _free_ids->claim_par_id();
+  uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
+  bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
+  _free_ids->release_par_id(worker_i); // release the id
 
-  bool b = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
-  if (b) {
+  if (result) {
+    assert_fully_consumed(node, buffer_size());
     Atomic::inc(&_processed_buffers_mut);
   }
-
-  // release the id
-  _free_ids->release_par_id(worker_i);
-
-  return b;
+  return result;
 }
 
 
@@ -227,15 +237,16 @@
     return false;
   } else {
     if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
+      assert_fully_consumed(nd, buffer_size());
       // Done with fully processed buffer.
       deallocate_buffer(nd);
       Atomic::inc(&_processed_buffers_rs_thread);
-      return true;
     } else {
       // Return partially processed buffer to the queue.
+      guarantee(!during_pause, "Should never stop early");
       enqueue_complete_buffer(nd);
-      return false;
     }
+    return true;
   }
 }
 
--- a/src/share/vm/gc/g1/dirtyCardQueue.hpp	Fri Mar 25 15:54:18 2016 +0300
+++ b/src/share/vm/gc/g1/dirtyCardQueue.hpp	Fri Mar 25 15:50:31 2016 -0400
@@ -82,7 +82,8 @@
   // returns true.  Stops processing after the first closure
   // application that returns false, and returns false from this
   // function.  If "consume" is true, the node's index is updated to
-  // follow the last processed element.
+  // exclude the processed elements, e.g. up to the element for which
+  // the closure returned false.
   bool apply_closure_to_buffer(CardTableEntryClosure* cl,
                                BufferNode* node,
                                bool consume,
@@ -121,14 +122,18 @@
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
-  // If there exists some completed buffer, pop it, then apply the
-  // specified closure to its active elements.  If all active elements
-  // are processed, returns "true".  If no completed buffers exist,
-  // returns false.  If a completed buffer exists, but is only
-  // partially completed before a "yield" happens, the partially
-  // completed buffer (with its index updated to exclude the processed
-  // elements) is returned to the completed buffer set, and this call
-  // returns false.
+  // If there are more than stop_at completed buffers, pop one, apply
+  // the specified closure to its active elements, and return true.
+  // Otherwise return false.
+  //
+  // A completely processed buffer is freed.  However, if a closure
+  // invocation returns false, processing is stopped and the partially
+  // processed buffer (with its index updated to exclude the processed
+  // elements, e.g. up to the element for which the closure returned
+  // false) is returned to the completed buffer set.
+  //
+  // If during_pause is true, stop_at must be zero, and the closure
+  // must never return false.
   bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
                                          uint worker_i,
                                          size_t stop_at,