annotate src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp @ 5188:5afc5a089c2c

8014555: G1: Memory ordering problem with Conc refinement and card marking Summary: Add a StoreLoad barrier in the G1 post-barrier to fix a race with concurrent refinement. Reviewed-by: brutisso, tschatzl, roland
author mgerdin
date Tue, 08 Oct 2013 17:35:51 +0200
parents f95d63e2154a
children 935f879e4eb0
rev   line source
ysr@345 1 /*
johnc@1727 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@345 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@345 4 *
ysr@345 5 * This code is free software; you can redistribute it and/or modify it
ysr@345 6 * under the terms of the GNU General Public License version 2 only, as
ysr@345 7 * published by the Free Software Foundation.
ysr@345 8 *
ysr@345 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@345 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@345 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@345 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@345 13 * accompanied this code).
ysr@345 14 *
ysr@345 15 * You should have received a copy of the GNU General Public License version
ysr@345 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@345 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@345 18 *
trims@1563 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1563 20 * or visit www.oracle.com if you need additional information or have any
trims@1563 21 * questions.
ysr@345 22 *
ysr@345 23 */
ysr@345 24
stefank@1992 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP
stefank@1992 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP
stefank@1992 27
stefank@1992 28 #include "gc_implementation/g1/ptrQueue.hpp"
stefank@1992 29 #include "memory/allocation.hpp"
stefank@1992 30
ysr@345 31 class FreeIdSet;
ysr@345 32
ysr@345 33 // A closure class for processing card table entries. Note that we don't
ysr@345 34 // require these closure objects to be stack-allocated.
zgu@4135 35 class CardTableEntryClosure: public CHeapObj<mtGC> {
ysr@345 36 public:
ysr@345 37 // Process the card whose card table entry is "card_ptr". If returns
ysr@345 38 // "false", terminate the iteration early.
ysr@345 39 virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
ysr@345 40 };
ysr@345 41
ysr@345 42 // A ptrQueue whose elements are "oops", pointers to object heads.
ysr@345 43 class DirtyCardQueue: public PtrQueue {
ysr@345 44 public:
ysr@345 45 DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) :
tonyp@1864 46 // Dirty card queues are always active, so we create them with their
tonyp@1864 47 // active field set to true.
tonyp@1864 48 PtrQueue(qset_, perm, true /* active */) { }
tonyp@1864 49
ysr@345 50 // Apply the closure to all elements, and reset the index to make the
ysr@345 51 // buffer empty. If a closure application returns "false", return
ysr@345 52 // "false" immediately, halting the iteration. If "consume" is true,
ysr@345 53 // deletes processed entries from logs.
ysr@345 54 bool apply_closure(CardTableEntryClosure* cl,
ysr@345 55 bool consume = true,
ysr@345 56 size_t worker_i = 0);
ysr@345 57
ysr@345 58 // Apply the closure to all elements of "buf", down to "index"
ysr@345 59 // (inclusive.) If returns "false", then a closure application returned
ysr@345 60 // "false", and we return immediately. If "consume" is true, entries are
ysr@345 61 // set to NULL as they are processed, so they will not be processed again
ysr@345 62 // later.
ysr@345 63 static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
ysr@345 64 void** buf, size_t index, size_t sz,
ysr@345 65 bool consume = true,
ysr@345 66 int worker_i = 0);
ysr@345 67 void **get_buf() { return _buf;}
ysr@345 68 void set_buf(void **buf) {_buf = buf;}
ysr@345 69 size_t get_index() { return _index;}
ysr@345 70 void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
ysr@345 71 };
ysr@345 72
ysr@345 73
ysr@345 74
ysr@345 75 class DirtyCardQueueSet: public PtrQueueSet {
ysr@345 76 CardTableEntryClosure* _closure;
ysr@345 77
ysr@345 78 DirtyCardQueue _shared_dirty_card_queue;
ysr@345 79
ysr@345 80 // Override.
ysr@345 81 bool mut_process_buffer(void** buf);
ysr@345 82
ysr@345 83 // Protected by the _cbl_mon.
ysr@345 84 FreeIdSet* _free_ids;
ysr@345 85
ysr@345 86 // The number of completed buffers processed by mutator and rs thread,
ysr@345 87 // respectively.
ysr@345 88 jint _processed_buffers_mut;
ysr@345 89 jint _processed_buffers_rs_thread;
ysr@345 90
ysr@345 91 public:
iveresov@1185 92 DirtyCardQueueSet(bool notify_when_complete = true);
ysr@345 93
ysr@345 94 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1185 95 int process_completed_threshold,
iveresov@1185 96 int max_completed_queue,
iveresov@1185 97 Mutex* lock, PtrQueueSet* fl_owner = NULL);
ysr@345 98
ysr@345 99 // The number of parallel ids that can be claimed to allow collector or
ysr@345 100 // mutator threads to do card-processing work.
ysr@345 101 static size_t num_par_ids();
ysr@345 102
ysr@345 103 static void handle_zero_index_for_thread(JavaThread* t);
ysr@345 104
ysr@345 105 // Register "blk" as "the closure" for all queues. Only one such closure
ysr@345 106 // is allowed. The "apply_closure_to_completed_buffer" method will apply
ysr@345 107 // this closure to a completed buffer, and "iterate_closure_all_threads"
ysr@345 108 // applies it to partially-filled buffers (the latter should only be done
ysr@345 109 // with the world stopped).
ysr@345 110 void set_closure(CardTableEntryClosure* closure);
ysr@345 111
ysr@345 112 // If there is a registered closure for buffers, apply it to all entries
ysr@345 113 // in all currently-active buffers. This should only be applied at a
ysr@345 114 // safepoint. (Currently must not be called in parallel; this should
ysr@345 115 // change in the future.) If "consume" is true, processed entries are
ysr@345 116 // discarded.
ysr@345 117 void iterate_closure_all_threads(bool consume = true,
ysr@345 118 size_t worker_i = 0);
ysr@345 119
ysr@345 120 // If there exists some completed buffer, pop it, then apply the
ysr@345 121 // registered closure to all its elements, nulling out those elements
ysr@345 122 // processed. If all elements are processed, returns "true". If no
ysr@345 123 // completed buffers exist, returns false. If a completed buffer exists,
ysr@345 124 // but is only partially completed before a "yield" happens, the
ysr@345 125 // partially completed buffer (with its processed elements set to NULL)
ysr@345 126 // is returned to the completed buffer set, and this call returns false.
ysr@345 127 bool apply_closure_to_completed_buffer(int worker_i = 0,
ysr@345 128 int stop_at = 0,
johnc@1164 129 bool during_pause = false);
johnc@1164 130
johnc@1727 131 // If there exists some completed buffer, pop it, then apply the
johnc@1727 132 // specified closure to all its elements, nulling out those elements
johnc@1727 133 // processed. If all elements are processed, returns "true". If no
johnc@1727 134 // completed buffers exist, returns false. If a completed buffer exists,
johnc@1727 135 // but is only partially completed before a "yield" happens, the
johnc@1727 136 // partially completed buffer (with its processed elements set to NULL)
johnc@1727 137 // is returned to the completed buffer set, and this call returns false.
johnc@1727 138 bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
johnc@1727 139 int worker_i = 0,
johnc@1727 140 int stop_at = 0,
johnc@1727 141 bool during_pause = false);
johnc@1727 142
johnc@1727 143 // Helper routine for the above.
johnc@1727 144 bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
johnc@1727 145 int worker_i,
iveresov@1185 146 BufferNode* nd);
ysr@345 147
iveresov@1185 148 BufferNode* get_completed_buffer(int stop_at);
johnc@1164 149
ysr@345 150 // Applies the current closure to all completed buffers,
ysr@345 151 // non-consumptively.
ysr@345 152 void apply_closure_to_all_completed_buffers();
ysr@345 153
ysr@345 154 DirtyCardQueue* shared_dirty_card_queue() {
ysr@345 155 return &_shared_dirty_card_queue;
ysr@345 156 }
ysr@345 157
johnc@1727 158 // Deallocate any completed log buffers
johnc@1727 159 void clear();
johnc@1727 160
ysr@345 161 // If a full collection is happening, reset partial logs, and ignore
ysr@345 162 // completed ones: the full collection will make them all irrelevant.
ysr@345 163 void abandon_logs();
ysr@345 164
ysr@345 165 // If any threads have partial logs, add them to the global list of logs.
ysr@345 166 void concatenate_logs();
ysr@345 167 void clear_n_completed_buffers() { _n_completed_buffers = 0;}
ysr@345 168
ysr@345 169 jint processed_buffers_mut() {
ysr@345 170 return _processed_buffers_mut;
ysr@345 171 }
ysr@345 172 jint processed_buffers_rs_thread() {
ysr@345 173 return _processed_buffers_rs_thread;
ysr@345 174 }
ysr@345 175
ysr@345 176 };
stefank@1992 177
stefank@1992 178 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP