annotate src/share/vm/memory/cardTableRS.cpp @ 196:d1605aabd0a1

6719955: Update copyright year Summary: Update copyright year for files that have been modified in 2008 Reviewed-by: ohair, tbell
author xdono
date Wed, 02 Jul 2008 12:55:16 -0700
parents ba764ed4b6f2
children 1ee8caae33af
rev   line source
duke@0 1 /*
xdono@196 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 # include "incls/_precompiled.incl"
duke@0 26 # include "incls/_cardTableRS.cpp.incl"
duke@0 27
duke@0 28 CardTableRS::CardTableRS(MemRegion whole_heap,
duke@0 29 int max_covered_regions) :
duke@0 30 GenRemSet(&_ct_bs),
duke@0 31 _ct_bs(whole_heap, max_covered_regions),
duke@0 32 _cur_youngergen_card_val(youngergenP1_card)
duke@0 33 {
duke@0 34 _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1];
duke@0 35 if (_last_cur_val_in_gen == NULL) {
duke@0 36 vm_exit_during_initialization("Could not last_cur_val_in_gen array.");
duke@0 37 }
duke@0 38 for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) {
duke@0 39 _last_cur_val_in_gen[i] = clean_card_val();
duke@0 40 }
duke@0 41 _ct_bs.set_CTRS(this);
duke@0 42 }
duke@0 43
duke@0 44 void CardTableRS::resize_covered_region(MemRegion new_region) {
duke@0 45 _ct_bs.resize_covered_region(new_region);
duke@0 46 }
duke@0 47
duke@0 48 jbyte CardTableRS::find_unused_youngergenP_card_value() {
duke@0 49 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 50 for (jbyte v = youngergenP1_card;
duke@0 51 v < cur_youngergen_and_prev_nonclean_card;
duke@0 52 v++) {
duke@0 53 bool seen = false;
duke@0 54 for (int g = 0; g < gch->n_gens()+1; g++) {
duke@0 55 if (_last_cur_val_in_gen[g] == v) {
duke@0 56 seen = true;
duke@0 57 break;
duke@0 58 }
duke@0 59 }
duke@0 60 if (!seen) return v;
duke@0 61 }
duke@0 62 ShouldNotReachHere();
duke@0 63 return 0;
duke@0 64 }
duke@0 65
duke@0 66 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
duke@0 67 // Parallel or sequential, we must always set the prev to equal the
duke@0 68 // last one written.
duke@0 69 if (parallel) {
duke@0 70 // Find a parallel value to be used next.
duke@0 71 jbyte next_val = find_unused_youngergenP_card_value();
duke@0 72 set_cur_youngergen_card_val(next_val);
duke@0 73
duke@0 74 } else {
duke@0 75 // In an sequential traversal we will always write youngergen, so that
duke@0 76 // the inline barrier is correct.
duke@0 77 set_cur_youngergen_card_val(youngergen_card);
duke@0 78 }
duke@0 79 }
duke@0 80
duke@0 81 void CardTableRS::younger_refs_iterate(Generation* g,
duke@0 82 OopsInGenClosure* blk) {
duke@0 83 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
duke@0 84 g->younger_refs_iterate(blk);
duke@0 85 }
duke@0 86
duke@0 87 class ClearNoncleanCardWrapper: public MemRegionClosure {
duke@0 88 MemRegionClosure* _dirty_card_closure;
duke@0 89 CardTableRS* _ct;
duke@0 90 bool _is_par;
duke@0 91 private:
duke@0 92 // Clears the given card, return true if the corresponding card should be
duke@0 93 // processed.
duke@0 94 bool clear_card(jbyte* entry) {
duke@0 95 if (_is_par) {
duke@0 96 while (true) {
duke@0 97 // In the parallel case, we may have to do this several times.
duke@0 98 jbyte entry_val = *entry;
duke@0 99 assert(entry_val != CardTableRS::clean_card_val(),
duke@0 100 "We shouldn't be looking at clean cards, and this should "
duke@0 101 "be the only place they get cleaned.");
duke@0 102 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
duke@0 103 || _ct->is_prev_youngergen_card_val(entry_val)) {
duke@0 104 jbyte res =
duke@0 105 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
duke@0 106 if (res == entry_val) {
duke@0 107 break;
duke@0 108 } else {
duke@0 109 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
duke@0 110 "The CAS above should only fail if another thread did "
duke@0 111 "a GC write barrier.");
duke@0 112 }
duke@0 113 } else if (entry_val ==
duke@0 114 CardTableRS::cur_youngergen_and_prev_nonclean_card) {
duke@0 115 // Parallelism shouldn't matter in this case. Only the thread
duke@0 116 // assigned to scan the card should change this value.
duke@0 117 *entry = _ct->cur_youngergen_card_val();
duke@0 118 break;
duke@0 119 } else {
duke@0 120 assert(entry_val == _ct->cur_youngergen_card_val(),
duke@0 121 "Should be the only possibility.");
duke@0 122 // In this case, the card was clean before, and become
duke@0 123 // cur_youngergen only because of processing of a promoted object.
duke@0 124 // We don't have to look at the card.
duke@0 125 return false;
duke@0 126 }
duke@0 127 }
duke@0 128 return true;
duke@0 129 } else {
duke@0 130 jbyte entry_val = *entry;
duke@0 131 assert(entry_val != CardTableRS::clean_card_val(),
duke@0 132 "We shouldn't be looking at clean cards, and this should "
duke@0 133 "be the only place they get cleaned.");
duke@0 134 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
duke@0 135 "This should be possible in the sequential case.");
duke@0 136 *entry = CardTableRS::clean_card_val();
duke@0 137 return true;
duke@0 138 }
duke@0 139 }
duke@0 140
duke@0 141 public:
duke@0 142 ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
duke@0 143 CardTableRS* ct) :
duke@0 144 _dirty_card_closure(dirty_card_closure), _ct(ct) {
duke@0 145 _is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@0 146 }
duke@0 147 void do_MemRegion(MemRegion mr) {
duke@0 148 // We start at the high end of "mr", walking backwards
duke@0 149 // while accumulating a contiguous dirty range of cards in
duke@0 150 // [start_of_non_clean, end_of_non_clean) which we then
duke@0 151 // process en masse.
duke@0 152 HeapWord* end_of_non_clean = mr.end();
duke@0 153 HeapWord* start_of_non_clean = end_of_non_clean;
duke@0 154 jbyte* entry = _ct->byte_for(mr.last());
duke@0 155 const jbyte* first_entry = _ct->byte_for(mr.start());
duke@0 156 while (entry >= first_entry) {
duke@0 157 HeapWord* cur = _ct->addr_for(entry);
duke@0 158 if (!clear_card(entry)) {
duke@0 159 // We hit a clean card; process any non-empty
duke@0 160 // dirty range accumulated so far.
duke@0 161 if (start_of_non_clean < end_of_non_clean) {
duke@0 162 MemRegion mr2(start_of_non_clean, end_of_non_clean);
duke@0 163 _dirty_card_closure->do_MemRegion(mr2);
duke@0 164 }
duke@0 165 // Reset the dirty window while continuing to
duke@0 166 // look for the next dirty window to process.
duke@0 167 end_of_non_clean = cur;
duke@0 168 start_of_non_clean = end_of_non_clean;
duke@0 169 }
duke@0 170 // Open the left end of the window one card to the left.
duke@0 171 start_of_non_clean = cur;
duke@0 172 // Note that "entry" leads "start_of_non_clean" in
duke@0 173 // its leftward excursion after this point
duke@0 174 // in the loop and, when we hit the left end of "mr",
duke@0 175 // will point off of the left end of the card-table
duke@0 176 // for "mr".
duke@0 177 entry--;
duke@0 178 }
duke@0 179 // If the first card of "mr" was dirty, we will have
duke@0 180 // been left with a dirty window, co-initial with "mr",
duke@0 181 // which we now process.
duke@0 182 if (start_of_non_clean < end_of_non_clean) {
duke@0 183 MemRegion mr2(start_of_non_clean, end_of_non_clean);
duke@0 184 _dirty_card_closure->do_MemRegion(mr2);
duke@0 185 }
duke@0 186 }
duke@0 187 };
duke@0 188 // clean (by dirty->clean before) ==> cur_younger_gen
duke@0 189 // dirty ==> cur_youngergen_and_prev_nonclean_card
duke@0 190 // precleaned ==> cur_youngergen_and_prev_nonclean_card
duke@0 191 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card
duke@0 192 // cur-younger-gen ==> cur_younger_gen
duke@0 193 // cur_youngergen_and_prev_nonclean_card ==> no change.
coleenp@113 194 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
duke@0 195 jbyte* entry = ct_bs()->byte_for(field);
duke@0 196 do {
duke@0 197 jbyte entry_val = *entry;
duke@0 198 // We put this first because it's probably the most common case.
duke@0 199 if (entry_val == clean_card_val()) {
duke@0 200 // No threat of contention with cleaning threads.
duke@0 201 *entry = cur_youngergen_card_val();
duke@0 202 return;
duke@0 203 } else if (card_is_dirty_wrt_gen_iter(entry_val)
duke@0 204 || is_prev_youngergen_card_val(entry_val)) {
duke@0 205 // Mark it as both cur and prev youngergen; card cleaning thread will
duke@0 206 // eventually remove the previous stuff.
duke@0 207 jbyte new_val = cur_youngergen_and_prev_nonclean_card;
duke@0 208 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
duke@0 209 // Did the CAS succeed?
duke@0 210 if (res == entry_val) return;
duke@0 211 // Otherwise, retry, to see the new value.
duke@0 212 continue;
duke@0 213 } else {
duke@0 214 assert(entry_val == cur_youngergen_and_prev_nonclean_card
duke@0 215 || entry_val == cur_youngergen_card_val(),
duke@0 216 "should be only possibilities.");
duke@0 217 return;
duke@0 218 }
duke@0 219 } while (true);
duke@0 220 }
duke@0 221
duke@0 222 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
duke@0 223 OopsInGenClosure* cl) {
duke@0 224 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs.precision(),
duke@0 225 cl->gen_boundary());
duke@0 226 ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
duke@0 227
duke@0 228 _ct_bs.non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
duke@0 229 dcto_cl, &clear_cl, false);
duke@0 230 }
duke@0 231
duke@0 232 void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
duke@0 233 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 234 // Generations younger than gen have been evacuated. We can clear
duke@0 235 // card table entries for gen (we know that it has no pointers
duke@0 236 // to younger gens) and for those below. The card tables for
duke@0 237 // the youngest gen need never be cleared, and those for perm gen
duke@0 238 // will be cleared based on the parameter clear_perm.
duke@0 239 // There's a bit of subtlety in the clear() and invalidate()
duke@0 240 // methods that we exploit here and in invalidate_or_clear()
duke@0 241 // below to avoid missing cards at the fringes. If clear() or
duke@0 242 // invalidate() are changed in the future, this code should
duke@0 243 // be revisited. 20040107.ysr
duke@0 244 Generation* g = gen;
duke@0 245 for(Generation* prev_gen = gch->prev_gen(g);
duke@0 246 prev_gen != NULL;
duke@0 247 g = prev_gen, prev_gen = gch->prev_gen(g)) {
duke@0 248 MemRegion to_be_cleared_mr = g->prev_used_region();
duke@0 249 clear(to_be_cleared_mr);
duke@0 250 }
duke@0 251 // Clear perm gen cards if asked to do so.
duke@0 252 if (clear_perm) {
duke@0 253 MemRegion to_be_cleared_mr = gch->perm_gen()->prev_used_region();
duke@0 254 clear(to_be_cleared_mr);
duke@0 255 }
duke@0 256 }
duke@0 257
duke@0 258 void CardTableRS::invalidate_or_clear(Generation* gen, bool younger,
duke@0 259 bool perm) {
duke@0 260 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 261 // For each generation gen (and younger and/or perm)
duke@0 262 // invalidate the cards for the currently occupied part
duke@0 263 // of that generation and clear the cards for the
duke@0 264 // unoccupied part of the generation (if any, making use
duke@0 265 // of that generation's prev_used_region to determine that
duke@0 266 // region). No need to do anything for the youngest
duke@0 267 // generation. Also see note#20040107.ysr above.
duke@0 268 Generation* g = gen;
duke@0 269 for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
duke@0 270 g = prev_gen, prev_gen = gch->prev_gen(g)) {
duke@0 271 MemRegion used_mr = g->used_region();
duke@0 272 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
duke@0 273 if (!to_be_cleared_mr.is_empty()) {
duke@0 274 clear(to_be_cleared_mr);
duke@0 275 }
duke@0 276 invalidate(used_mr);
duke@0 277 if (!younger) break;
duke@0 278 }
duke@0 279 // Clear perm gen cards if asked to do so.
duke@0 280 if (perm) {
duke@0 281 g = gch->perm_gen();
duke@0 282 MemRegion used_mr = g->used_region();
duke@0 283 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
duke@0 284 if (!to_be_cleared_mr.is_empty()) {
duke@0 285 clear(to_be_cleared_mr);
duke@0 286 }
duke@0 287 invalidate(used_mr);
duke@0 288 }
duke@0 289 }
duke@0 290
duke@0 291
duke@0 292 class VerifyCleanCardClosure: public OopClosure {
coleenp@113 293 private:
coleenp@113 294 HeapWord* _boundary;
coleenp@113 295 HeapWord* _begin;
coleenp@113 296 HeapWord* _end;
coleenp@113 297 protected:
coleenp@113 298 template <class T> void do_oop_work(T* p) {
duke@0 299 HeapWord* jp = (HeapWord*)p;
coleenp@113 300 if (jp >= _begin && jp < _end) {
coleenp@113 301 oop obj = oopDesc::load_decode_heap_oop(p);
coleenp@113 302 guarantee(obj == NULL ||
coleenp@113 303 (HeapWord*)p < _boundary ||
coleenp@113 304 (HeapWord*)obj >= _boundary,
duke@0 305 "pointer on clean card crosses boundary");
duke@0 306 }
duke@0 307 }
coleenp@113 308 public:
coleenp@113 309 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
coleenp@113 310 _boundary(b), _begin(begin), _end(end) {}
coleenp@113 311 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
coleenp@113 312 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
duke@0 313 };
duke@0 314
duke@0 315 class VerifyCTSpaceClosure: public SpaceClosure {
coleenp@113 316 private:
duke@0 317 CardTableRS* _ct;
duke@0 318 HeapWord* _boundary;
duke@0 319 public:
duke@0 320 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
duke@0 321 _ct(ct), _boundary(boundary) {}
coleenp@113 322 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
duke@0 323 };
duke@0 324
duke@0 325 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
duke@0 326 CardTableRS* _ct;
duke@0 327 public:
duke@0 328 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
duke@0 329 void do_generation(Generation* gen) {
duke@0 330 // Skip the youngest generation.
duke@0 331 if (gen->level() == 0) return;
duke@0 332 // Normally, we're interested in pointers to younger generations.
duke@0 333 VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
duke@0 334 gen->space_iterate(&blk, true);
duke@0 335 }
duke@0 336 };
duke@0 337
duke@0 338 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
duke@0 339 // We don't need to do young-gen spaces.
duke@0 340 if (s->end() <= gen_boundary) return;
duke@0 341 MemRegion used = s->used_region();
duke@0 342
duke@0 343 jbyte* cur_entry = byte_for(used.start());
duke@0 344 jbyte* limit = byte_after(used.last());
duke@0 345 while (cur_entry < limit) {
duke@0 346 if (*cur_entry == CardTableModRefBS::clean_card) {
duke@0 347 jbyte* first_dirty = cur_entry+1;
duke@0 348 while (first_dirty < limit &&
duke@0 349 *first_dirty == CardTableModRefBS::clean_card) {
duke@0 350 first_dirty++;
duke@0 351 }
duke@0 352 // If the first object is a regular object, and it has a
duke@0 353 // young-to-old field, that would mark the previous card.
duke@0 354 HeapWord* boundary = addr_for(cur_entry);
duke@0 355 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
duke@0 356 HeapWord* boundary_block = s->block_start(boundary);
duke@0 357 HeapWord* begin = boundary; // Until proven otherwise.
duke@0 358 HeapWord* start_block = boundary_block; // Until proven otherwise.
duke@0 359 if (boundary_block < boundary) {
duke@0 360 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
duke@0 361 oop boundary_obj = oop(boundary_block);
duke@0 362 if (!boundary_obj->is_objArray() &&
duke@0 363 !boundary_obj->is_typeArray()) {
duke@0 364 guarantee(cur_entry > byte_for(used.start()),
duke@0 365 "else boundary would be boundary_block");
duke@0 366 if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
duke@0 367 begin = boundary_block + s->block_size(boundary_block);
duke@0 368 start_block = begin;
duke@0 369 }
duke@0 370 }
duke@0 371 }
duke@0 372 }
duke@0 373 // Now traverse objects until end.
duke@0 374 HeapWord* cur = start_block;
duke@0 375 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
duke@0 376 while (cur < end) {
duke@0 377 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
duke@0 378 oop(cur)->oop_iterate(&verify_blk);
duke@0 379 }
duke@0 380 cur += s->block_size(cur);
duke@0 381 }
duke@0 382 cur_entry = first_dirty;
duke@0 383 } else {
duke@0 384 // We'd normally expect that cur_youngergen_and_prev_nonclean_card
duke@0 385 // is a transient value, that cannot be in the card table
duke@0 386 // except during GC, and thus assert that:
duke@0 387 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
duke@0 388 // "Illegal CT value");
duke@0 389 // That however, need not hold, as will become clear in the
duke@0 390 // following...
duke@0 391
duke@0 392 // We'd normally expect that if we are in the parallel case,
duke@0 393 // we can't have left a prev value (which would be different
duke@0 394 // from the current value) in the card table, and so we'd like to
duke@0 395 // assert that:
duke@0 396 // guarantee(cur_youngergen_card_val() == youngergen_card
duke@0 397 // || !is_prev_youngergen_card_val(*cur_entry),
duke@0 398 // "Illegal CT value");
duke@0 399 // That, however, may not hold occasionally, because of
duke@0 400 // CMS or MSC in the old gen. To wit, consider the
duke@0 401 // following two simple illustrative scenarios:
duke@0 402 // (a) CMS: Consider the case where a large object L
duke@0 403 // spanning several cards is allocated in the old
duke@0 404 // gen, and has a young gen reference stored in it, dirtying
duke@0 405 // some interior cards. A young collection scans the card,
duke@0 406 // finds a young ref and installs a youngergenP_n value.
duke@0 407 // L then goes dead. Now a CMS collection starts,
duke@0 408 // finds L dead and sweeps it up. Assume that L is
duke@0 409 // abutting _unallocated_blk, so _unallocated_blk is
duke@0 410 // adjusted down to (below) L. Assume further that
duke@0 411 // no young collection intervenes during this CMS cycle.
duke@0 412 // The next young gen cycle will not get to look at this
duke@0 413 // youngergenP_n card since it lies in the unoccupied
duke@0 414 // part of the space.
duke@0 415 // Some young collections later the blocks on this
duke@0 416 // card can be re-allocated either due to direct allocation
duke@0 417 // or due to absorbing promotions. At this time, the
duke@0 418 // before-gc verification will fail the above assert.
duke@0 419 // (b) MSC: In this case, an object L with a young reference
duke@0 420 // is on a card that (therefore) holds a youngergen_n value.
duke@0 421 // Suppose also that L lies towards the end of the used
duke@0 422 // the used space before GC. An MSC collection
duke@0 423 // occurs that compacts to such an extent that this
duke@0 424 // card is no longer in the occupied part of the space.
duke@0 425 // Since current code in MSC does not always clear cards
duke@0 426 // in the unused part of old gen, this stale youngergen_n
duke@0 427 // value is left behind and can later be covered by
duke@0 428 // an object when promotion or direct allocation
duke@0 429 // re-allocates that part of the heap.
duke@0 430 //
duke@0 431 // Fortunately, the presence of such stale card values is
duke@0 432 // "only" a minor annoyance in that subsequent young collections
duke@0 433 // might needlessly scan such cards, but would still never corrupt
duke@0 434 // the heap as a result. However, it's likely not to be a significant
duke@0 435 // performance inhibitor in practice. For instance,
duke@0 436 // some recent measurements with unoccupied cards eagerly cleared
duke@0 437 // out to maintain this invariant, showed next to no
duke@0 438 // change in young collection times; of course one can construct
duke@0 439 // degenerate examples where the cost can be significant.)
duke@0 440 // Note, in particular, that if the "stale" card is modified
duke@0 441 // after re-allocation, it would be dirty, not "stale". Thus,
duke@0 442 // we can never have a younger ref in such a card and it is
duke@0 443 // safe not to scan that card in any collection. [As we see
duke@0 444 // below, we do some unnecessary scanning
duke@0 445 // in some cases in the current parallel scanning algorithm.]
duke@0 446 //
duke@0 447 // The main point below is that the parallel card scanning code
duke@0 448 // deals correctly with these stale card values. There are two main
duke@0 449 // cases to consider where we have a stale "younger gen" value and a
duke@0 450 // "derivative" case to consider, where we have a stale
duke@0 451 // "cur_younger_gen_and_prev_non_clean" value, as will become
duke@0 452 // apparent in the case analysis below.
duke@0 453 // o Case 1. If the stale value corresponds to a younger_gen_n
duke@0 454 // value other than the cur_younger_gen value then the code
duke@0 455 // treats this as being tantamount to a prev_younger_gen
duke@0 456 // card. This means that the card may be unnecessarily scanned.
duke@0 457 // There are two sub-cases to consider:
duke@0 458 // o Case 1a. Let us say that the card is in the occupied part
duke@0 459 // of the generation at the time the collection begins. In
duke@0 460 // that case the card will be either cleared when it is scanned
duke@0 461 // for young pointers, or will be set to cur_younger_gen as a
duke@0 462 // result of promotion. (We have elided the normal case where
duke@0 463 // the scanning thread and the promoting thread interleave
duke@0 464 // possibly resulting in a transient
duke@0 465 // cur_younger_gen_and_prev_non_clean value before settling
duke@0 466 // to cur_younger_gen. [End Case 1a.]
duke@0 467 // o Case 1b. Consider now the case when the card is in the unoccupied
duke@0 468 // part of the space which becomes occupied because of promotions
duke@0 469 // into it during the current young GC. In this case the card
duke@0 470 // will never be scanned for young references. The current
duke@0 471 // code will set the card value to either
duke@0 472 // cur_younger_gen_and_prev_non_clean or leave
duke@0 473 // it with its stale value -- because the promotions didn't
duke@0 474 // result in any younger refs on that card. Of these two
duke@0 475 // cases, the latter will be covered in Case 1a during
duke@0 476 // a subsequent scan. To deal with the former case, we need
duke@0 477 // to further consider how we deal with a stale value of
duke@0 478 // cur_younger_gen_and_prev_non_clean in our case analysis
duke@0 479 // below. This we do in Case 3 below. [End Case 1b]
duke@0 480 // [End Case 1]
duke@0 481 // o Case 2. If the stale value corresponds to cur_younger_gen being
duke@0 482 // a value not necessarily written by a current promotion, the
duke@0 483 // card will not be scanned by the younger refs scanning code.
duke@0 484 // (This is OK since as we argued above such cards cannot contain
duke@0 485 // any younger refs.) The result is that this value will be
duke@0 486 // treated as a prev_younger_gen value in a subsequent collection,
duke@0 487 // which is addressed in Case 1 above. [End Case 2]
duke@0 488 // o Case 3. We here consider the "derivative" case from Case 1b. above
duke@0 489 // because of which we may find a stale
duke@0 490 // cur_younger_gen_and_prev_non_clean card value in the table.
duke@0 491 // Once again, as in Case 1, we consider two subcases, depending
duke@0 492 // on whether the card lies in the occupied or unoccupied part
duke@0 493 // of the space at the start of the young collection.
duke@0 494 // o Case 3a. Let us say the card is in the occupied part of
duke@0 495 // the old gen at the start of the young collection. In that
duke@0 496 // case, the card will be scanned by the younger refs scanning
duke@0 497 // code which will set it to cur_younger_gen. In a subsequent
duke@0 498 // scan, the card will be considered again and get its final
duke@0 499 // correct value. [End Case 3a]
duke@0 500 // o Case 3b. Now consider the case where the card is in the
duke@0 501 // unoccupied part of the old gen, and is occupied as a result
duke@0 502 // of promotions during thus young gc. In that case,
duke@0 503 // the card will not be scanned for younger refs. The presence
duke@0 504 // of newly promoted objects on the card will then result in
duke@0 505 // its keeping the value cur_younger_gen_and_prev_non_clean
duke@0 506 // value, which we have dealt with in Case 3 here. [End Case 3b]
duke@0 507 // [End Case 3]
duke@0 508 //
duke@0 509 // (Please refer to the code in the helper class
duke@0 510 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
duke@0 511 //
duke@0 512 // The informal arguments above can be tightened into a formal
duke@0 513 // correctness proof and it behooves us to write up such a proof,
duke@0 514 // or to use model checking to prove that there are no lingering
duke@0 515 // concerns.
duke@0 516 //
duke@0 517 // Clearly because of Case 3b one cannot bound the time for
duke@0 518 // which a card will retain what we have called a "stale" value.
duke@0 519 // However, one can obtain a Loose upper bound on the redundant
duke@0 520 // work as a result of such stale values. Note first that any
duke@0 521 // time a stale card lies in the occupied part of the space at
duke@0 522 // the start of the collection, it is scanned by younger refs
duke@0 523 // code and we can define a rank function on card values that
duke@0 524 // declines when this is so. Note also that when a card does not
duke@0 525 // lie in the occupied part of the space at the beginning of a
duke@0 526 // young collection, its rank can either decline or stay unchanged.
duke@0 527 // In this case, no extra work is done in terms of redundant
duke@0 528 // younger refs scanning of that card.
duke@0 529 // Then, the case analysis above reveals that, in the worst case,
duke@0 530 // any such stale card will be scanned unnecessarily at most twice.
duke@0 531 //
duke@0 532 // It is nonethelss advisable to try and get rid of some of this
duke@0 533 // redundant work in a subsequent (low priority) re-design of
duke@0 534 // the card-scanning code, if only to simplify the underlying
duke@0 535 // state machine analysis/proof. ysr 1/28/2002. XXX
duke@0 536 cur_entry++;
duke@0 537 }
duke@0 538 }
duke@0 539 }
duke@0 540
duke@0 541 void CardTableRS::verify() {
duke@0 542 // At present, we only know how to verify the card table RS for
duke@0 543 // generational heaps.
duke@0 544 VerifyCTGenClosure blk(this);
duke@0 545 CollectedHeap* ch = Universe::heap();
duke@0 546 // We will do the perm-gen portion of the card table, too.
duke@0 547 Generation* pg = SharedHeap::heap()->perm_gen();
duke@0 548 HeapWord* pg_boundary = pg->reserved().start();
duke@0 549
duke@0 550 if (ch->kind() == CollectedHeap::GenCollectedHeap) {
duke@0 551 GenCollectedHeap::heap()->generation_iterate(&blk, false);
duke@0 552 _ct_bs.verify();
duke@0 553
duke@0 554 // If the old gen collections also collect perm, then we are only
duke@0 555 // interested in perm-to-young pointers, not perm-to-old pointers.
duke@0 556 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 557 CollectorPolicy* cp = gch->collector_policy();
duke@0 558 if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) {
duke@0 559 pg_boundary = gch->get_gen(1)->reserved().start();
duke@0 560 }
duke@0 561 }
duke@0 562 VerifyCTSpaceClosure perm_space_blk(this, pg_boundary);
duke@0 563 SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true);
duke@0 564 }
duke@0 565
duke@0 566
jmasa@6 567 void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
duke@0 568 if (!mr.is_empty()) {
duke@0 569 jbyte* cur_entry = byte_for(mr.start());
duke@0 570 jbyte* limit = byte_after(mr.last());
jmasa@6 571 // The region mr may not start on a card boundary so
jmasa@6 572 // the first card may reflect a write to the space
jmasa@6 573 // just prior to mr.
jmasa@6 574 if (!is_aligned(mr.start())) {
jmasa@6 575 cur_entry++;
jmasa@6 576 }
duke@0 577 for (;cur_entry < limit; cur_entry++) {
duke@0 578 guarantee(*cur_entry == CardTableModRefBS::clean_card,
duke@0 579 "Unexpected dirty card found");
duke@0 580 }
duke@0 581 }
duke@0 582 }