annotate src/share/vm/memory/cardTableModRefBS.cpp @ 887:ff004bcd2596

6843292: "Expect to be beyond new region unless impacting another region" assertion too strong Summary: In the assertion allow for collision with the guard page. Reviewed-by: tonyp, ysr, jcoomes
author jmasa
date Sun, 02 Aug 2009 19:10:31 -0700
parents 7bb995fbd3c0
children 8624da129f0b
rev   line source
duke@0 1 /*
xdono@579 2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
duke@0 26 // enumerate ref fields that have been modified (since the last
duke@0 27 // enumeration.)
duke@0 28
duke@0 29 # include "incls/_precompiled.incl"
duke@0 30 # include "incls/_cardTableModRefBS.cpp.incl"
duke@0 31
duke@0 32 size_t CardTableModRefBS::cards_required(size_t covered_words)
duke@0 33 {
duke@0 34 // Add one for a guard card, used to detect errors.
duke@0 35 const size_t words = align_size_up(covered_words, card_size_in_words);
duke@0 36 return words / card_size_in_words + 1;
duke@0 37 }
duke@0 38
duke@0 39 size_t CardTableModRefBS::compute_byte_map_size()
duke@0 40 {
duke@0 41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
duke@0 42 "unitialized, check declaration order");
duke@0 43 assert(_page_size != 0, "unitialized, check declaration order");
duke@0 44 const size_t granularity = os::vm_allocation_granularity();
duke@0 45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
duke@0 46 }
duke@0 47
duke@0 48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
duke@0 49 int max_covered_regions):
duke@0 50 ModRefBarrierSet(max_covered_regions),
duke@0 51 _whole_heap(whole_heap),
duke@0 52 _guard_index(cards_required(whole_heap.word_size()) - 1),
duke@0 53 _last_valid_index(_guard_index - 1),
jcoomes@21 54 _page_size(os::vm_page_size()),
duke@0 55 _byte_map_size(compute_byte_map_size())
duke@0 56 {
duke@0 57 _kind = BarrierSet::CardTableModRef;
duke@0 58
duke@0 59 HeapWord* low_bound = _whole_heap.start();
duke@0 60 HeapWord* high_bound = _whole_heap.end();
duke@0 61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
duke@0 62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
duke@0 63
duke@0 64 assert(card_size <= 512, "card_size must be less than 512"); // why?
duke@0 65
duke@0 66 _covered = new MemRegion[max_covered_regions];
duke@0 67 _committed = new MemRegion[max_covered_regions];
duke@0 68 if (_covered == NULL || _committed == NULL)
duke@0 69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
duke@0 70 int i;
duke@0 71 for (i = 0; i < max_covered_regions; i++) {
duke@0 72 _covered[i].set_word_size(0);
duke@0 73 _committed[i].set_word_size(0);
duke@0 74 }
duke@0 75 _cur_covered_regions = 0;
duke@0 76
duke@0 77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
duke@0 78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
duke@0 79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
duke@0 80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
duke@0 81 _page_size, heap_rs.base(), heap_rs.size());
duke@0 82 if (!heap_rs.is_reserved()) {
duke@0 83 vm_exit_during_initialization("Could not reserve enough space for the "
duke@0 84 "card marking array");
duke@0 85 }
duke@0 86
duke@0 87 // The assember store_check code will do an unsigned shift of the oop,
duke@0 88 // then add it to byte_map_base, i.e.
duke@0 89 //
duke@0 90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
duke@0 91 _byte_map = (jbyte*) heap_rs.base();
duke@0 92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
duke@0 93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
duke@0 94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
duke@0 95
duke@0 96 jbyte* guard_card = &_byte_map[_guard_index];
duke@0 97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
duke@0 98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
duke@0 99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
duke@0 100 // Do better than this for Merlin
duke@0 101 vm_exit_out_of_memory(_page_size, "card table last card");
duke@0 102 }
duke@0 103 *guard_card = last_card;
duke@0 104
duke@0 105 _lowest_non_clean =
duke@0 106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
duke@0 107 _lowest_non_clean_chunk_size =
duke@0 108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
duke@0 109 _lowest_non_clean_base_chunk_index =
duke@0 110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
duke@0 111 _last_LNC_resizing_collection =
duke@0 112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
duke@0 113 if (_lowest_non_clean == NULL
duke@0 114 || _lowest_non_clean_chunk_size == NULL
duke@0 115 || _lowest_non_clean_base_chunk_index == NULL
duke@0 116 || _last_LNC_resizing_collection == NULL)
duke@0 117 vm_exit_during_initialization("couldn't allocate an LNC array.");
duke@0 118 for (i = 0; i < max_covered_regions; i++) {
duke@0 119 _lowest_non_clean[i] = NULL;
duke@0 120 _lowest_non_clean_chunk_size[i] = 0;
duke@0 121 _last_LNC_resizing_collection[i] = -1;
duke@0 122 }
duke@0 123
duke@0 124 if (TraceCardTableModRefBS) {
duke@0 125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
duke@0 126 gclog_or_tty->print_cr(" "
duke@0 127 " &_byte_map[0]: " INTPTR_FORMAT
duke@0 128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
duke@0 129 &_byte_map[0],
duke@0 130 &_byte_map[_last_valid_index]);
duke@0 131 gclog_or_tty->print_cr(" "
duke@0 132 " byte_map_base: " INTPTR_FORMAT,
duke@0 133 byte_map_base);
duke@0 134 }
duke@0 135 }
duke@0 136
duke@0 137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
duke@0 138 int i;
duke@0 139 for (i = 0; i < _cur_covered_regions; i++) {
duke@0 140 if (_covered[i].start() == base) return i;
duke@0 141 if (_covered[i].start() > base) break;
duke@0 142 }
duke@0 143 // If we didn't find it, create a new one.
duke@0 144 assert(_cur_covered_regions < _max_covered_regions,
duke@0 145 "too many covered regions");
duke@0 146 // Move the ones above up, to maintain sorted order.
duke@0 147 for (int j = _cur_covered_regions; j > i; j--) {
duke@0 148 _covered[j] = _covered[j-1];
duke@0 149 _committed[j] = _committed[j-1];
duke@0 150 }
duke@0 151 int res = i;
duke@0 152 _cur_covered_regions++;
duke@0 153 _covered[res].set_start(base);
duke@0 154 _covered[res].set_word_size(0);
duke@0 155 jbyte* ct_start = byte_for(base);
duke@0 156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
duke@0 157 _committed[res].set_start((HeapWord*)ct_start_aligned);
duke@0 158 _committed[res].set_word_size(0);
duke@0 159 return res;
duke@0 160 }
duke@0 161
duke@0 162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
duke@0 163 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 164 if (_covered[i].contains(addr)) {
duke@0 165 return i;
duke@0 166 }
duke@0 167 }
duke@0 168 assert(0, "address outside of heap?");
duke@0 169 return -1;
duke@0 170 }
duke@0 171
duke@0 172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
duke@0 173 HeapWord* max_end = NULL;
duke@0 174 for (int j = 0; j < ind; j++) {
duke@0 175 HeapWord* this_end = _committed[j].end();
duke@0 176 if (this_end > max_end) max_end = this_end;
duke@0 177 }
duke@0 178 return max_end;
duke@0 179 }
duke@0 180
duke@0 181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
duke@0 182 MemRegion mr) const {
duke@0 183 MemRegion result = mr;
duke@0 184 for (int r = 0; r < _cur_covered_regions; r += 1) {
duke@0 185 if (r != self) {
duke@0 186 result = result.minus(_committed[r]);
duke@0 187 }
duke@0 188 }
duke@0 189 // Never include the guard page.
duke@0 190 result = result.minus(_guard_region);
duke@0 191 return result;
duke@0 192 }
duke@0 193
duke@0 194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
duke@0 195 // We don't change the start of a region, only the end.
duke@0 196 assert(_whole_heap.contains(new_region),
duke@0 197 "attempt to cover area not in reserved area");
duke@0 198 debug_only(verify_guard();)
jmasa@208 199 // collided is true if the expansion would push into another committed region
jmasa@208 200 debug_only(bool collided = false;)
jmasa@6 201 int const ind = find_covering_region_by_base(new_region.start());
jmasa@6 202 MemRegion const old_region = _covered[ind];
duke@0 203 assert(old_region.start() == new_region.start(), "just checking");
duke@0 204 if (new_region.word_size() != old_region.word_size()) {
duke@0 205 // Commit new or uncommit old pages, if necessary.
duke@0 206 MemRegion cur_committed = _committed[ind];
duke@0 207 // Extend the end of this _commited region
duke@0 208 // to cover the end of any lower _committed regions.
duke@0 209 // This forms overlapping regions, but never interior regions.
jmasa@6 210 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
duke@0 211 if (max_prev_end > cur_committed.end()) {
duke@0 212 cur_committed.set_end(max_prev_end);
duke@0 213 }
duke@0 214 // Align the end up to a page size (starts are already aligned).
jmasa@6 215 jbyte* const new_end = byte_after(new_region.last());
jmasa@208 216 HeapWord* new_end_aligned =
jmasa@6 217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
duke@0 218 assert(new_end_aligned >= (HeapWord*) new_end,
duke@0 219 "align up, but less");
jmasa@581 220 // Check the other regions (excludes "ind") to ensure that
jmasa@581 221 // the new_end_aligned does not intrude onto the committed
jmasa@581 222 // space of another region.
jmasa@208 223 int ri = 0;
jmasa@208 224 for (ri = 0; ri < _cur_covered_regions; ri++) {
jmasa@208 225 if (ri != ind) {
jmasa@208 226 if (_committed[ri].contains(new_end_aligned)) {
jmasa@581 227 // The prior check included in the assert
jmasa@581 228 // (new_end_aligned >= _committed[ri].start())
jmasa@581 229 // is redundant with the "contains" test.
jmasa@581 230 // Any region containing the new end
jmasa@581 231 // should start at or beyond the region found (ind)
jmasa@581 232 // for the new end (committed regions are not expected to
jmasa@581 233 // be proper subsets of other committed regions).
jmasa@581 234 assert(_committed[ri].start() >= _committed[ind].start(),
jmasa@208 235 "New end of committed region is inconsistent");
jmasa@208 236 new_end_aligned = _committed[ri].start();
jmasa@581 237 // new_end_aligned can be equal to the start of its
jmasa@581 238 // committed region (i.e., of "ind") if a second
jmasa@581 239 // region following "ind" also start at the same location
jmasa@581 240 // as "ind".
jmasa@581 241 assert(new_end_aligned >= _committed[ind].start(),
jmasa@208 242 "New end of committed region is before start");
jmasa@208 243 debug_only(collided = true;)
jmasa@208 244 // Should only collide with 1 region
jmasa@208 245 break;
jmasa@208 246 }
jmasa@208 247 }
jmasa@208 248 }
jmasa@208 249 #ifdef ASSERT
jmasa@208 250 for (++ri; ri < _cur_covered_regions; ri++) {
jmasa@208 251 assert(!_committed[ri].contains(new_end_aligned),
jmasa@208 252 "New end of committed region is in a second committed region");
jmasa@208 253 }
jmasa@208 254 #endif
duke@0 255 // The guard page is always committed and should not be committed over.
jmasa@887 256 // "guarded" is used for assertion checking below and recalls the fact
jmasa@887 257 // that the would-be end of the new committed region would have
jmasa@887 258 // penetrated the guard page.
jmasa@887 259 HeapWord* new_end_for_commit = new_end_aligned;
jmasa@887 260
jmasa@887 261 DEBUG_ONLY(bool guarded = false;)
jmasa@887 262 if (new_end_for_commit > _guard_region.start()) {
jmasa@887 263 new_end_for_commit = _guard_region.start();
jmasa@887 264 DEBUG_ONLY(guarded = true;)
jmasa@887 265 }
jmasa@208 266
duke@0 267 if (new_end_for_commit > cur_committed.end()) {
duke@0 268 // Must commit new pages.
jmasa@6 269 MemRegion const new_committed =
duke@0 270 MemRegion(cur_committed.end(), new_end_for_commit);
duke@0 271
duke@0 272 assert(!new_committed.is_empty(), "Region should not be empty here");
duke@0 273 if (!os::commit_memory((char*)new_committed.start(),
duke@0 274 new_committed.byte_size(), _page_size)) {
duke@0 275 // Do better than this for Merlin
duke@0 276 vm_exit_out_of_memory(new_committed.byte_size(),
duke@0 277 "card table expansion");
duke@0 278 }
duke@0 279 // Use new_end_aligned (as opposed to new_end_for_commit) because
duke@0 280 // the cur_committed region may include the guard region.
duke@0 281 } else if (new_end_aligned < cur_committed.end()) {
duke@0 282 // Must uncommit pages.
jmasa@6 283 MemRegion const uncommit_region =
duke@0 284 committed_unique_to_self(ind, MemRegion(new_end_aligned,
duke@0 285 cur_committed.end()));
duke@0 286 if (!uncommit_region.is_empty()) {
duke@0 287 if (!os::uncommit_memory((char*)uncommit_region.start(),
duke@0 288 uncommit_region.byte_size())) {
jmasa@208 289 assert(false, "Card table contraction failed");
jmasa@208 290 // The call failed so don't change the end of the
jmasa@208 291 // committed region. This is better than taking the
jmasa@208 292 // VM down.
jmasa@208 293 new_end_aligned = _committed[ind].end();
duke@0 294 }
duke@0 295 }
duke@0 296 }
duke@0 297 // In any case, we can reset the end of the current committed entry.
duke@0 298 _committed[ind].set_end(new_end_aligned);
duke@0 299
duke@0 300 // The default of 0 is not necessarily clean cards.
duke@0 301 jbyte* entry;
duke@0 302 if (old_region.last() < _whole_heap.start()) {
duke@0 303 entry = byte_for(_whole_heap.start());
duke@0 304 } else {
duke@0 305 entry = byte_after(old_region.last());
duke@0 306 }
swamyv@489 307 assert(index_for(new_region.last()) < _guard_index,
duke@0 308 "The guard card will be overwritten");
jmasa@208 309 // This line commented out cleans the newly expanded region and
jmasa@208 310 // not the aligned up expanded region.
jmasa@208 311 // jbyte* const end = byte_after(new_region.last());
jmasa@208 312 jbyte* const end = (jbyte*) new_end_for_commit;
jmasa@887 313 assert((end >= byte_after(new_region.last())) || collided || guarded,
jmasa@208 314 "Expect to be beyond new region unless impacting another region");
duke@0 315 // do nothing if we resized downward.
jmasa@208 316 #ifdef ASSERT
jmasa@208 317 for (int ri = 0; ri < _cur_covered_regions; ri++) {
jmasa@208 318 if (ri != ind) {
jmasa@208 319 // The end of the new committed region should not
jmasa@208 320 // be in any existing region unless it matches
jmasa@208 321 // the start of the next region.
jmasa@208 322 assert(!_committed[ri].contains(end) ||
jmasa@208 323 (_committed[ri].start() == (HeapWord*) end),
jmasa@208 324 "Overlapping committed regions");
jmasa@208 325 }
jmasa@208 326 }
jmasa@208 327 #endif
duke@0 328 if (entry < end) {
duke@0 329 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
duke@0 330 }
duke@0 331 }
duke@0 332 // In any case, the covered size changes.
duke@0 333 _covered[ind].set_word_size(new_region.word_size());
duke@0 334 if (TraceCardTableModRefBS) {
duke@0 335 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
duke@0 336 gclog_or_tty->print_cr(" "
duke@0 337 " _covered[%d].start(): " INTPTR_FORMAT
duke@0 338 " _covered[%d].last(): " INTPTR_FORMAT,
duke@0 339 ind, _covered[ind].start(),
duke@0 340 ind, _covered[ind].last());
duke@0 341 gclog_or_tty->print_cr(" "
duke@0 342 " _committed[%d].start(): " INTPTR_FORMAT
duke@0 343 " _committed[%d].last(): " INTPTR_FORMAT,
duke@0 344 ind, _committed[ind].start(),
duke@0 345 ind, _committed[ind].last());
duke@0 346 gclog_or_tty->print_cr(" "
duke@0 347 " byte_for(start): " INTPTR_FORMAT
duke@0 348 " byte_for(last): " INTPTR_FORMAT,
duke@0 349 byte_for(_covered[ind].start()),
duke@0 350 byte_for(_covered[ind].last()));
duke@0 351 gclog_or_tty->print_cr(" "
duke@0 352 " addr_for(start): " INTPTR_FORMAT
duke@0 353 " addr_for(last): " INTPTR_FORMAT,
duke@0 354 addr_for((jbyte*) _committed[ind].start()),
duke@0 355 addr_for((jbyte*) _committed[ind].last()));
duke@0 356 }
duke@0 357 debug_only(verify_guard();)
duke@0 358 }
duke@0 359
duke@0 360 // Note that these versions are precise! The scanning code has to handle the
duke@0 361 // fact that the write barrier may be either precise or imprecise.
duke@0 362
coleenp@113 363 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
duke@0 364 inline_write_ref_field(field, newVal);
duke@0 365 }
duke@0 366
iveresov@616 367 /*
iveresov@616 368 Claimed and deferred bits are used together in G1 during the evacuation
iveresov@616 369 pause. These bits can have the following state transitions:
iveresov@616 370 1. The claimed bit can be put over any other card state. Except that
iveresov@616 371 the "dirty -> dirty and claimed" transition is checked for in
iveresov@616 372 G1 code and is not used.
iveresov@616 373 2. Deferred bit can be set only if the previous state of the card
iveresov@616 374 was either clean or claimed. mark_card_deferred() is wait-free.
iveresov@616 375 We do not care if the operation is be successful because if
iveresov@616 376 it does not it will only result in duplicate entry in the update
iveresov@616 377 buffer because of the "cache-miss". So it's not worth spinning.
iveresov@616 378 */
iveresov@616 379
duke@0 380
ysr@342 381 bool CardTableModRefBS::claim_card(size_t card_index) {
ysr@342 382 jbyte val = _byte_map[card_index];
iveresov@616 383 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
iveresov@616 384 while (val == clean_card_val() ||
iveresov@616 385 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
iveresov@616 386 jbyte new_val = val;
iveresov@616 387 if (val == clean_card_val()) {
iveresov@616 388 new_val = (jbyte)claimed_card_val();
iveresov@616 389 } else {
iveresov@616 390 new_val = val | (jbyte)claimed_card_val();
iveresov@616 391 }
iveresov@616 392 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
iveresov@616 393 if (res == val) {
ysr@342 394 return true;
iveresov@616 395 }
iveresov@616 396 val = res;
ysr@342 397 }
ysr@342 398 return false;
ysr@342 399 }
ysr@342 400
iveresov@616 401 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
iveresov@616 402 jbyte val = _byte_map[card_index];
iveresov@616 403 // It's already processed
iveresov@616 404 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
iveresov@616 405 return false;
iveresov@616 406 }
iveresov@616 407 // Cached bit can be installed either on a clean card or on a claimed card.
iveresov@616 408 jbyte new_val = val;
iveresov@616 409 if (val == clean_card_val()) {
iveresov@616 410 new_val = (jbyte)deferred_card_val();
iveresov@616 411 } else {
iveresov@616 412 if (val & claimed_card_val()) {
iveresov@616 413 new_val = val | (jbyte)deferred_card_val();
iveresov@616 414 }
iveresov@616 415 }
iveresov@616 416 if (new_val != val) {
iveresov@616 417 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
iveresov@616 418 }
iveresov@616 419 return true;
iveresov@616 420 }
iveresov@616 421
iveresov@616 422
duke@0 423 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
duke@0 424 MemRegion mr,
duke@0 425 DirtyCardToOopClosure* dcto_cl,
duke@0 426 MemRegionClosure* cl,
duke@0 427 bool clear) {
duke@0 428 if (!mr.is_empty()) {
duke@0 429 int n_threads = SharedHeap::heap()->n_par_threads();
duke@0 430 if (n_threads > 0) {
duke@0 431 #ifndef SERIALGC
duke@0 432 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
duke@0 433 #else // SERIALGC
duke@0 434 fatal("Parallel gc not supported here.");
duke@0 435 #endif // SERIALGC
duke@0 436 } else {
duke@0 437 non_clean_card_iterate_work(mr, cl, clear);
duke@0 438 }
duke@0 439 }
duke@0 440 }
duke@0 441
duke@0 442 // NOTE: For this to work correctly, it is important that
duke@0 443 // we look for non-clean cards below (so as to catch those
duke@0 444 // marked precleaned), rather than look explicitly for dirty
duke@0 445 // cards (and miss those marked precleaned). In that sense,
duke@0 446 // the name precleaned is currently somewhat of a misnomer.
duke@0 447 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
duke@0 448 MemRegionClosure* cl,
duke@0 449 bool clear) {
duke@0 450 // Figure out whether we have to worry about parallelism.
duke@0 451 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
duke@0 452 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 453 MemRegion mri = mr.intersection(_covered[i]);
duke@0 454 if (mri.word_size() > 0) {
duke@0 455 jbyte* cur_entry = byte_for(mri.last());
duke@0 456 jbyte* limit = byte_for(mri.start());
duke@0 457 while (cur_entry >= limit) {
duke@0 458 jbyte* next_entry = cur_entry - 1;
duke@0 459 if (*cur_entry != clean_card) {
duke@0 460 size_t non_clean_cards = 1;
duke@0 461 // Should the next card be included in this range of dirty cards.
duke@0 462 while (next_entry >= limit && *next_entry != clean_card) {
duke@0 463 non_clean_cards++;
duke@0 464 cur_entry = next_entry;
duke@0 465 next_entry--;
duke@0 466 }
duke@0 467 // The memory region may not be on a card boundary. So that
duke@0 468 // objects beyond the end of the region are not processed, make
duke@0 469 // cur_cards precise with regard to the end of the memory region.
duke@0 470 MemRegion cur_cards(addr_for(cur_entry),
duke@0 471 non_clean_cards * card_size_in_words);
duke@0 472 MemRegion dirty_region = cur_cards.intersection(mri);
duke@0 473 if (clear) {
duke@0 474 for (size_t i = 0; i < non_clean_cards; i++) {
duke@0 475 // Clean the dirty cards (but leave the other non-clean
duke@0 476 // alone.) If parallel, do the cleaning atomically.
duke@0 477 jbyte cur_entry_val = cur_entry[i];
duke@0 478 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
duke@0 479 if (is_par) {
duke@0 480 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
duke@0 481 assert(res != clean_card,
duke@0 482 "Dirty card mysteriously cleaned");
duke@0 483 } else {
duke@0 484 cur_entry[i] = clean_card;
duke@0 485 }
duke@0 486 }
duke@0 487 }
duke@0 488 }
duke@0 489 cl->do_MemRegion(dirty_region);
duke@0 490 }
duke@0 491 cur_entry = next_entry;
duke@0 492 }
duke@0 493 }
duke@0 494 }
duke@0 495 }
duke@0 496
duke@0 497 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
duke@0 498 OopClosure* cl,
duke@0 499 bool clear,
duke@0 500 bool before_save_marks) {
duke@0 501 // Note that dcto_cl is resource-allocated, so there is no
duke@0 502 // corresponding "delete".
duke@0 503 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
duke@0 504 MemRegion used_mr;
duke@0 505 if (before_save_marks) {
duke@0 506 used_mr = sp->used_region_at_save_marks();
duke@0 507 } else {
duke@0 508 used_mr = sp->used_region();
duke@0 509 }
duke@0 510 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
duke@0 511 }
duke@0 512
duke@0 513 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
duke@0 514 jbyte* cur = byte_for(mr.start());
duke@0 515 jbyte* last = byte_after(mr.last());
duke@0 516 while (cur < last) {
duke@0 517 *cur = dirty_card;
duke@0 518 cur++;
duke@0 519 }
duke@0 520 }
duke@0 521
ysr@342 522 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
duke@0 523 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 524 MemRegion mri = mr.intersection(_covered[i]);
duke@0 525 if (!mri.is_empty()) dirty_MemRegion(mri);
duke@0 526 }
duke@0 527 }
duke@0 528
duke@0 529 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
duke@0 530 // Be conservative: only clean cards entirely contained within the
duke@0 531 // region.
duke@0 532 jbyte* cur;
duke@0 533 if (mr.start() == _whole_heap.start()) {
duke@0 534 cur = byte_for(mr.start());
duke@0 535 } else {
duke@0 536 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
duke@0 537 cur = byte_after(mr.start() - 1);
duke@0 538 }
duke@0 539 jbyte* last = byte_after(mr.last());
duke@0 540 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
duke@0 541 }
duke@0 542
duke@0 543 void CardTableModRefBS::clear(MemRegion mr) {
duke@0 544 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 545 MemRegion mri = mr.intersection(_covered[i]);
duke@0 546 if (!mri.is_empty()) clear_MemRegion(mri);
duke@0 547 }
duke@0 548 }
duke@0 549
ysr@342 550 void CardTableModRefBS::dirty(MemRegion mr) {
ysr@342 551 jbyte* first = byte_for(mr.start());
ysr@342 552 jbyte* last = byte_after(mr.last());
ysr@342 553 memset(first, dirty_card, last-first);
ysr@342 554 }
ysr@342 555
duke@0 556 // NOTES:
duke@0 557 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
duke@0 558 // iterates over dirty cards ranges in increasing address order.
duke@0 559 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
duke@0 560 MemRegionClosure* cl) {
duke@0 561 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 562 MemRegion mri = mr.intersection(_covered[i]);
duke@0 563 if (!mri.is_empty()) {
duke@0 564 jbyte *cur_entry, *next_entry, *limit;
duke@0 565 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
duke@0 566 cur_entry <= limit;
duke@0 567 cur_entry = next_entry) {
duke@0 568 next_entry = cur_entry + 1;
duke@0 569 if (*cur_entry == dirty_card) {
duke@0 570 size_t dirty_cards;
duke@0 571 // Accumulate maximal dirty card range, starting at cur_entry
duke@0 572 for (dirty_cards = 1;
duke@0 573 next_entry <= limit && *next_entry == dirty_card;
duke@0 574 dirty_cards++, next_entry++);
duke@0 575 MemRegion cur_cards(addr_for(cur_entry),
duke@0 576 dirty_cards*card_size_in_words);
duke@0 577 cl->do_MemRegion(cur_cards);
duke@0 578 }
duke@0 579 }
duke@0 580 }
duke@0 581 }
duke@0 582 }
duke@0 583
ysr@342 584 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
ysr@342 585 bool reset,
ysr@342 586 int reset_val) {
duke@0 587 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 588 MemRegion mri = mr.intersection(_covered[i]);
duke@0 589 if (!mri.is_empty()) {
duke@0 590 jbyte* cur_entry, *next_entry, *limit;
duke@0 591 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
duke@0 592 cur_entry <= limit;
duke@0 593 cur_entry = next_entry) {
duke@0 594 next_entry = cur_entry + 1;
duke@0 595 if (*cur_entry == dirty_card) {
duke@0 596 size_t dirty_cards;
duke@0 597 // Accumulate maximal dirty card range, starting at cur_entry
duke@0 598 for (dirty_cards = 1;
duke@0 599 next_entry <= limit && *next_entry == dirty_card;
duke@0 600 dirty_cards++, next_entry++);
duke@0 601 MemRegion cur_cards(addr_for(cur_entry),
duke@0 602 dirty_cards*card_size_in_words);
ysr@342 603 if (reset) {
ysr@342 604 for (size_t i = 0; i < dirty_cards; i++) {
ysr@342 605 cur_entry[i] = reset_val;
ysr@342 606 }
duke@0 607 }
duke@0 608 return cur_cards;
duke@0 609 }
duke@0 610 }
duke@0 611 }
duke@0 612 }
duke@0 613 return MemRegion(mr.end(), mr.end());
duke@0 614 }
duke@0 615
duke@0 616 // Set all the dirty cards in the given region to "precleaned" state.
duke@0 617 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
duke@0 618 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 619 MemRegion mri = mr.intersection(_covered[i]);
duke@0 620 if (!mri.is_empty()) {
duke@0 621 jbyte *cur_entry, *limit;
duke@0 622 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
duke@0 623 cur_entry <= limit;
duke@0 624 cur_entry++) {
duke@0 625 if (*cur_entry == dirty_card) {
duke@0 626 *cur_entry = precleaned_card;
duke@0 627 }
duke@0 628 }
duke@0 629 }
duke@0 630 }
duke@0 631 }
duke@0 632
duke@0 633 uintx CardTableModRefBS::ct_max_alignment_constraint() {
duke@0 634 return card_size * os::vm_page_size();
duke@0 635 }
duke@0 636
duke@0 637 void CardTableModRefBS::verify_guard() {
duke@0 638 // For product build verification
duke@0 639 guarantee(_byte_map[_guard_index] == last_card,
duke@0 640 "card table guard has been modified");
duke@0 641 }
duke@0 642
duke@0 643 void CardTableModRefBS::verify() {
duke@0 644 verify_guard();
duke@0 645 }
duke@0 646
duke@0 647 #ifndef PRODUCT
duke@0 648 class GuaranteeNotModClosure: public MemRegionClosure {
duke@0 649 CardTableModRefBS* _ct;
duke@0 650 public:
duke@0 651 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
duke@0 652 void do_MemRegion(MemRegion mr) {
duke@0 653 jbyte* entry = _ct->byte_for(mr.start());
duke@0 654 guarantee(*entry != CardTableModRefBS::clean_card,
duke@0 655 "Dirty card in region that should be clean");
duke@0 656 }
duke@0 657 };
duke@0 658
duke@0 659 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
duke@0 660 GuaranteeNotModClosure blk(this);
duke@0 661 non_clean_card_iterate_work(mr, &blk, false);
duke@0 662 }
duke@0 663 #endif
duke@0 664
duke@0 665 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
duke@0 666 return
duke@0 667 CardTableModRefBS::card_will_be_scanned(cv) ||
duke@0 668 _rs->is_prev_nonclean_card_val(cv);
duke@0 669 };
duke@0 670
duke@0 671 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
duke@0 672 return
duke@0 673 cv != clean_card &&
duke@0 674 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
duke@0 675 CardTableRS::youngergen_may_have_been_dirty(cv));
duke@0 676 };