annotate hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp @ 3262:30d1c247fc25

6700789: G1: Enable use of compressed oops with G1 heaps Summary: Modifications to G1 so as to allow the use of compressed oops. Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp
author ysr
date Tue, 14 Jul 2009 15:40:39 -0700
parents 74a1337e4acc
children a268fa66d7fb
rev   line source
ysr@1374 1 /*
xdono@2105 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
ysr@1374 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@1374 4 *
ysr@1374 5 * This code is free software; you can redistribute it and/or modify it
ysr@1374 6 * under the terms of the GNU General Public License version 2 only, as
ysr@1374 7 * published by the Free Software Foundation.
ysr@1374 8 *
ysr@1374 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@1374 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@1374 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@1374 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@1374 13 * accompanied this code).
ysr@1374 14 *
ysr@1374 15 * You should have received a copy of the GNU General Public License version
ysr@1374 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@1374 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@1374 18 *
ysr@1374 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@1374 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@1374 21 * have any questions.
ysr@1374 22 *
ysr@1374 23 */
ysr@1374 24
ysr@1374 25 // A G1RemSet provides ways of iterating over pointers into a selected
ysr@1374 26 // collection set.
ysr@1374 27
ysr@1374 28 class G1CollectedHeap;
ysr@1374 29 class CardTableModRefBarrierSet;
ysr@1374 30 class HRInto_G1RemSet;
ysr@1374 31 class ConcurrentG1Refine;
ysr@1374 32
apetrusenko@2013 33 class G1RemSet: public CHeapObj {
ysr@1374 34 protected:
ysr@1374 35 G1CollectedHeap* _g1;
ysr@1374 36 unsigned _conc_refine_cards;
ysr@1374 37 size_t n_workers();
ysr@1374 38
ysr@1374 39 public:
ysr@1374 40 G1RemSet(G1CollectedHeap* g1) :
iveresov@2881 41 _g1(g1), _conc_refine_cards(0)
ysr@1374 42 {}
ysr@1374 43
ysr@1374 44 // Invoke "blk->do_oop" on all pointers into the CS in object in regions
ysr@1374 45 // outside the CS (having invoked "blk->set_region" to set the "from"
ysr@1374 46 // region correctly beforehand.) The "worker_i" param is for the
ysr@1374 47 // parallel case where the number of the worker thread calling this
ysr@1374 48 // function can be helpful in partitioning the work to be done. It
ysr@1374 49 // should be the same as the "i" passed to the calling thread's
ysr@1374 50 // work(i) function. In the sequential case this param will be ingored.
ysr@1374 51 virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
ysr@1374 52 int worker_i) = 0;
ysr@1374 53
ysr@1374 54 // Prepare for and cleanup after an oops_into_collection_set_do
ysr@1374 55 // call. Must call each of these once before and after (in sequential
ysr@1374 56 // code) any threads call oops into collection set do. (This offers an
ysr@1374 57 // opportunity to sequential setup and teardown of structures needed by a
ysr@1374 58 // parallel iteration over the CS's RS.)
ysr@1374 59 virtual void prepare_for_oops_into_collection_set_do() = 0;
ysr@1374 60 virtual void cleanup_after_oops_into_collection_set_do() = 0;
ysr@1374 61
ysr@1374 62 // If "this" is of the given subtype, return "this", else "NULL".
ysr@1374 63 virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
ysr@1374 64
ysr@3262 65 // Record, if necessary, the fact that *p (where "p" is in region "from",
ysr@3262 66 // and is, a fortiori, required to be non-NULL) has changed to its new value.
ysr@1374 67 virtual void write_ref(HeapRegion* from, oop* p) = 0;
ysr@3262 68 virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
ysr@1374 69 virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
ysr@3262 70 virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
ysr@1374 71
ysr@1374 72 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
ysr@1374 73 // or card, respectively, such that a region or card with a corresponding
ysr@1374 74 // 0 bit contains no part of any live object. Eliminates any remembered
ysr@1374 75 // set entries that correspond to dead heap ranges.
ysr@1374 76 virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
ysr@1374 77 // Like the above, but assumes is called in parallel: "worker_num" is the
ysr@1374 78 // parallel thread id of the current thread, and "claim_val" is the
ysr@1374 79 // value that should be used to claim heap regions.
ysr@1374 80 virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@1374 81 int worker_num, int claim_val) = 0;
ysr@1374 82
ysr@1374 83 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
ysr@1374 84 // join and leave around parts that must be atomic wrt GC. (NULL means
ysr@1374 85 // being done at a safepoint.)
ysr@1374 86 virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {}
ysr@1374 87
ysr@1374 88 // Print any relevant summary info.
ysr@1374 89 virtual void print_summary_info() {}
ysr@1374 90
ysr@1374 91 // Prepare remebered set for verification.
ysr@1374 92 virtual void prepare_for_verify() {};
ysr@1374 93 };
ysr@1374 94
ysr@1374 95
ysr@1374 96 // The simplest possible G1RemSet: iterates over all objects in non-CS
ysr@1374 97 // regions, searching for pointers into the CS.
ysr@1374 98 class StupidG1RemSet: public G1RemSet {
ysr@1374 99 public:
ysr@1374 100 StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
ysr@1374 101
ysr@1374 102 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
ysr@1374 103 int worker_i);
ysr@1374 104
ysr@1374 105 void prepare_for_oops_into_collection_set_do() {}
ysr@1374 106 void cleanup_after_oops_into_collection_set_do() {}
ysr@1374 107
ysr@1374 108 // Nothing is necessary in the version below.
ysr@1374 109 void write_ref(HeapRegion* from, oop* p) {}
ysr@3262 110 void write_ref(HeapRegion* from, narrowOop* p) {}
ysr@1374 111 void par_write_ref(HeapRegion* from, oop* p, int tid) {}
ysr@3262 112 void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
ysr@1374 113
ysr@1374 114 void scrub(BitMap* region_bm, BitMap* card_bm) {}
ysr@1374 115 void scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@1374 116 int worker_num, int claim_val) {}
ysr@1374 117
ysr@1374 118 };
ysr@1374 119
ysr@1374 120 // A G1RemSet in which each heap region has a rem set that records the
ysr@1374 121 // external heap references into it. Uses a mod ref bs to track updates,
ysr@1374 122 // so that they can be used to update the individual region remsets.
ysr@1374 123
ysr@1374 124 class HRInto_G1RemSet: public G1RemSet {
ysr@1374 125 protected:
ysr@1374 126 enum SomePrivateConstants {
ysr@1374 127 UpdateRStoMergeSync = 0,
ysr@1374 128 MergeRStoDoDirtySync = 1,
ysr@1374 129 DoDirtySync = 2,
ysr@1374 130 LastSync = 3,
ysr@1374 131
ysr@1374 132 SeqTask = 0,
ysr@1374 133 NumSeqTasks = 1
ysr@1374 134 };
ysr@1374 135
ysr@1374 136 CardTableModRefBS* _ct_bs;
ysr@1374 137 SubTasksDone* _seq_task;
ysr@1374 138 G1CollectorPolicy* _g1p;
ysr@1374 139
ysr@1374 140 ConcurrentG1Refine* _cg1r;
ysr@1374 141
ysr@1374 142 size_t* _cards_scanned;
ysr@1374 143 size_t _total_cards_scanned;
ysr@1374 144
ysr@1374 145 // _par_traversal_in_progress is "true" iff a parallel traversal is in
ysr@1374 146 // progress. If so, then cards added to remembered sets should also have
ysr@1374 147 // their references into the collection summarized in "_new_refs".
ysr@1374 148 bool _par_traversal_in_progress;
iveresov@2881 149 void set_par_traversal(bool b) { _par_traversal_in_progress = b; }
ysr@3262 150 GrowableArray<OopOrNarrowOopStar>** _new_refs;
ysr@3262 151 template <class T> void new_refs_iterate_work(OopClosure* cl);
ysr@3262 152 void new_refs_iterate(OopClosure* cl) {
ysr@3262 153 if (UseCompressedOops) {
ysr@3262 154 new_refs_iterate_work<narrowOop>(cl);
ysr@3262 155 } else {
ysr@3262 156 new_refs_iterate_work<oop>(cl);
ysr@3262 157 }
ysr@3262 158 }
ysr@3262 159
ysr@3262 160 protected:
ysr@3262 161 template <class T> void write_ref_nv(HeapRegion* from, T* p);
ysr@3262 162 template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
ysr@1374 163
ysr@1374 164 public:
ysr@1374 165 // This is called to reset dual hash tables after the gc pause
ysr@1374 166 // is finished and the initial hash table is no longer being
ysr@1374 167 // scanned.
ysr@1374 168 void cleanupHRRS();
ysr@1374 169
ysr@1374 170 HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
ysr@1374 171 ~HRInto_G1RemSet();
ysr@1374 172
ysr@1374 173 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
ysr@1374 174 int worker_i);
ysr@1374 175
ysr@1374 176 void prepare_for_oops_into_collection_set_do();
ysr@1374 177 void cleanup_after_oops_into_collection_set_do();
ysr@1374 178 void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
ysr@3262 179 template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
ysr@3262 180 void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@3262 181 if (UseCompressedOops) {
ysr@3262 182 scanNewRefsRS_work<narrowOop>(oc, worker_i);
ysr@3262 183 } else {
ysr@3262 184 scanNewRefsRS_work<oop>(oc, worker_i);
ysr@3262 185 }
ysr@3262 186 }
ysr@1374 187 void updateRS(int worker_i);
ysr@1374 188 HeapRegion* calculateStartRegion(int i);
ysr@1374 189
ysr@1374 190 HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
ysr@1374 191
ysr@1374 192 CardTableModRefBS* ct_bs() { return _ct_bs; }
ysr@1374 193 size_t cardsScanned() { return _total_cards_scanned; }
ysr@1374 194
ysr@1374 195 // Record, if necessary, the fact that *p (where "p" is in region "from",
ysr@1374 196 // which is required to be non-NULL) has changed to a new non-NULL value.
ysr@3262 197 // [Below the virtual version calls a non-virtual protected
ysr@3262 198 // workhorse that is templatified for narrow vs wide oop.]
ysr@3262 199 inline void write_ref(HeapRegion* from, oop* p) {
ysr@3262 200 write_ref_nv(from, p);
ysr@3262 201 }
ysr@3262 202 inline void write_ref(HeapRegion* from, narrowOop* p) {
ysr@3262 203 write_ref_nv(from, p);
ysr@3262 204 }
ysr@3262 205 inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
ysr@3262 206 par_write_ref_nv(from, p, tid);
ysr@3262 207 }
ysr@3262 208 inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
ysr@3262 209 par_write_ref_nv(from, p, tid);
ysr@3262 210 }
ysr@1374 211
ysr@3262 212 bool self_forwarded(oop obj);
ysr@1374 213
ysr@1374 214 void scrub(BitMap* region_bm, BitMap* card_bm);
ysr@1374 215 void scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@1374 216 int worker_num, int claim_val);
ysr@1374 217
ysr@1374 218 virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i);
ysr@1374 219
ysr@1374 220 virtual void print_summary_info();
ysr@1374 221 virtual void prepare_for_verify();
ysr@1374 222 };
ysr@1374 223
ysr@1374 224 #define G1_REM_SET_LOGGING 0
ysr@1374 225
ysr@1374 226 class CountNonCleanMemRegionClosure: public MemRegionClosure {
ysr@1374 227 G1CollectedHeap* _g1;
ysr@1374 228 int _n;
ysr@1374 229 HeapWord* _start_first;
ysr@1374 230 public:
ysr@1374 231 CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
ysr@1374 232 _g1(g1), _n(0), _start_first(NULL)
ysr@1374 233 {}
ysr@1374 234 void do_MemRegion(MemRegion mr);
ysr@1374 235 int n() { return _n; };
ysr@1374 236 HeapWord* start_first() { return _start_first; }
ysr@1374 237 };
apetrusenko@2152 238
apetrusenko@2152 239 class UpdateRSOopClosure: public OopClosure {
apetrusenko@2152 240 HeapRegion* _from;
apetrusenko@2152 241 HRInto_G1RemSet* _rs;
apetrusenko@2152 242 int _worker_i;
ysr@3262 243
ysr@3262 244 template <class T> void do_oop_work(T* p);
ysr@3262 245
apetrusenko@2152 246 public:
apetrusenko@2152 247 UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
apetrusenko@2152 248 _from(NULL), _rs(rs), _worker_i(worker_i) {
apetrusenko@2152 249 guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
apetrusenko@2152 250 }
apetrusenko@2152 251
apetrusenko@2152 252 void set_from(HeapRegion* from) {
apetrusenko@2152 253 assert(from != NULL, "from region must be non-NULL");
apetrusenko@2152 254 _from = from;
apetrusenko@2152 255 }
apetrusenko@2152 256
ysr@3262 257 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@3262 258 virtual void do_oop(oop* p) { do_oop_work(p); }
apetrusenko@2152 259
apetrusenko@2152 260 // Override: this closure is idempotent.
apetrusenko@2152 261 // bool idempotent() { return true; }
apetrusenko@2152 262 bool apply_to_weak_ref_discovered_field() { return true; }
apetrusenko@2152 263 };