annotate src/share/vm/memory/cardTableModRefBS.cpp @ 489:2494ab195856

6653214: MemoryPoolMXBean.setUsageThreshold() does not support large heap sizes. Reviewed-by: ysr, mchung
author swamyv
date Mon, 15 Dec 2008 13:58:57 -0800
parents 1ee8caae33af
children 0fbdb4381b99 9e5a6ed08fc9
rev   line source
duke@0 1 /*
xdono@196 2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
duke@0 26 // enumerate ref fields that have been modified (since the last
duke@0 27 // enumeration.)
duke@0 28
duke@0 29 # include "incls/_precompiled.incl"
duke@0 30 # include "incls/_cardTableModRefBS.cpp.incl"
duke@0 31
duke@0 32 size_t CardTableModRefBS::cards_required(size_t covered_words)
duke@0 33 {
duke@0 34 // Add one for a guard card, used to detect errors.
duke@0 35 const size_t words = align_size_up(covered_words, card_size_in_words);
duke@0 36 return words / card_size_in_words + 1;
duke@0 37 }
duke@0 38
duke@0 39 size_t CardTableModRefBS::compute_byte_map_size()
duke@0 40 {
duke@0 41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
duke@0 42 "unitialized, check declaration order");
duke@0 43 assert(_page_size != 0, "unitialized, check declaration order");
duke@0 44 const size_t granularity = os::vm_allocation_granularity();
duke@0 45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
duke@0 46 }
duke@0 47
duke@0 48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
duke@0 49 int max_covered_regions):
duke@0 50 ModRefBarrierSet(max_covered_regions),
duke@0 51 _whole_heap(whole_heap),
duke@0 52 _guard_index(cards_required(whole_heap.word_size()) - 1),
duke@0 53 _last_valid_index(_guard_index - 1),
jcoomes@21 54 _page_size(os::vm_page_size()),
duke@0 55 _byte_map_size(compute_byte_map_size())
duke@0 56 {
duke@0 57 _kind = BarrierSet::CardTableModRef;
duke@0 58
duke@0 59 HeapWord* low_bound = _whole_heap.start();
duke@0 60 HeapWord* high_bound = _whole_heap.end();
duke@0 61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
duke@0 62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
duke@0 63
duke@0 64 assert(card_size <= 512, "card_size must be less than 512"); // why?
duke@0 65
duke@0 66 _covered = new MemRegion[max_covered_regions];
duke@0 67 _committed = new MemRegion[max_covered_regions];
duke@0 68 if (_covered == NULL || _committed == NULL)
duke@0 69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
duke@0 70 int i;
duke@0 71 for (i = 0; i < max_covered_regions; i++) {
duke@0 72 _covered[i].set_word_size(0);
duke@0 73 _committed[i].set_word_size(0);
duke@0 74 }
duke@0 75 _cur_covered_regions = 0;
duke@0 76
duke@0 77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
duke@0 78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
duke@0 79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
duke@0 80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
duke@0 81 _page_size, heap_rs.base(), heap_rs.size());
duke@0 82 if (!heap_rs.is_reserved()) {
duke@0 83 vm_exit_during_initialization("Could not reserve enough space for the "
duke@0 84 "card marking array");
duke@0 85 }
duke@0 86
duke@0 87 // The assember store_check code will do an unsigned shift of the oop,
duke@0 88 // then add it to byte_map_base, i.e.
duke@0 89 //
duke@0 90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
duke@0 91 _byte_map = (jbyte*) heap_rs.base();
duke@0 92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
duke@0 93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
duke@0 94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
duke@0 95
duke@0 96 jbyte* guard_card = &_byte_map[_guard_index];
duke@0 97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
duke@0 98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
duke@0 99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
duke@0 100 // Do better than this for Merlin
duke@0 101 vm_exit_out_of_memory(_page_size, "card table last card");
duke@0 102 }
duke@0 103 *guard_card = last_card;
duke@0 104
duke@0 105 _lowest_non_clean =
duke@0 106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
duke@0 107 _lowest_non_clean_chunk_size =
duke@0 108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
duke@0 109 _lowest_non_clean_base_chunk_index =
duke@0 110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
duke@0 111 _last_LNC_resizing_collection =
duke@0 112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
duke@0 113 if (_lowest_non_clean == NULL
duke@0 114 || _lowest_non_clean_chunk_size == NULL
duke@0 115 || _lowest_non_clean_base_chunk_index == NULL
duke@0 116 || _last_LNC_resizing_collection == NULL)
duke@0 117 vm_exit_during_initialization("couldn't allocate an LNC array.");
duke@0 118 for (i = 0; i < max_covered_regions; i++) {
duke@0 119 _lowest_non_clean[i] = NULL;
duke@0 120 _lowest_non_clean_chunk_size[i] = 0;
duke@0 121 _last_LNC_resizing_collection[i] = -1;
duke@0 122 }
duke@0 123
duke@0 124 if (TraceCardTableModRefBS) {
duke@0 125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
duke@0 126 gclog_or_tty->print_cr(" "
duke@0 127 " &_byte_map[0]: " INTPTR_FORMAT
duke@0 128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
duke@0 129 &_byte_map[0],
duke@0 130 &_byte_map[_last_valid_index]);
duke@0 131 gclog_or_tty->print_cr(" "
duke@0 132 " byte_map_base: " INTPTR_FORMAT,
duke@0 133 byte_map_base);
duke@0 134 }
duke@0 135 }
duke@0 136
duke@0 137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
duke@0 138 int i;
duke@0 139 for (i = 0; i < _cur_covered_regions; i++) {
duke@0 140 if (_covered[i].start() == base) return i;
duke@0 141 if (_covered[i].start() > base) break;
duke@0 142 }
duke@0 143 // If we didn't find it, create a new one.
duke@0 144 assert(_cur_covered_regions < _max_covered_regions,
duke@0 145 "too many covered regions");
duke@0 146 // Move the ones above up, to maintain sorted order.
duke@0 147 for (int j = _cur_covered_regions; j > i; j--) {
duke@0 148 _covered[j] = _covered[j-1];
duke@0 149 _committed[j] = _committed[j-1];
duke@0 150 }
duke@0 151 int res = i;
duke@0 152 _cur_covered_regions++;
duke@0 153 _covered[res].set_start(base);
duke@0 154 _covered[res].set_word_size(0);
duke@0 155 jbyte* ct_start = byte_for(base);
duke@0 156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
duke@0 157 _committed[res].set_start((HeapWord*)ct_start_aligned);
duke@0 158 _committed[res].set_word_size(0);
duke@0 159 return res;
duke@0 160 }
duke@0 161
duke@0 162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
duke@0 163 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 164 if (_covered[i].contains(addr)) {
duke@0 165 return i;
duke@0 166 }
duke@0 167 }
duke@0 168 assert(0, "address outside of heap?");
duke@0 169 return -1;
duke@0 170 }
duke@0 171
duke@0 172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
duke@0 173 HeapWord* max_end = NULL;
duke@0 174 for (int j = 0; j < ind; j++) {
duke@0 175 HeapWord* this_end = _committed[j].end();
duke@0 176 if (this_end > max_end) max_end = this_end;
duke@0 177 }
duke@0 178 return max_end;
duke@0 179 }
duke@0 180
duke@0 181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
duke@0 182 MemRegion mr) const {
duke@0 183 MemRegion result = mr;
duke@0 184 for (int r = 0; r < _cur_covered_regions; r += 1) {
duke@0 185 if (r != self) {
duke@0 186 result = result.minus(_committed[r]);
duke@0 187 }
duke@0 188 }
duke@0 189 // Never include the guard page.
duke@0 190 result = result.minus(_guard_region);
duke@0 191 return result;
duke@0 192 }
duke@0 193
duke@0 194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
duke@0 195 // We don't change the start of a region, only the end.
duke@0 196 assert(_whole_heap.contains(new_region),
duke@0 197 "attempt to cover area not in reserved area");
duke@0 198 debug_only(verify_guard();)
jmasa@208 199 // collided is true if the expansion would push into another committed region
jmasa@208 200 debug_only(bool collided = false;)
jmasa@6 201 int const ind = find_covering_region_by_base(new_region.start());
jmasa@6 202 MemRegion const old_region = _covered[ind];
duke@0 203 assert(old_region.start() == new_region.start(), "just checking");
duke@0 204 if (new_region.word_size() != old_region.word_size()) {
duke@0 205 // Commit new or uncommit old pages, if necessary.
duke@0 206 MemRegion cur_committed = _committed[ind];
duke@0 207 // Extend the end of this _commited region
duke@0 208 // to cover the end of any lower _committed regions.
duke@0 209 // This forms overlapping regions, but never interior regions.
jmasa@6 210 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
duke@0 211 if (max_prev_end > cur_committed.end()) {
duke@0 212 cur_committed.set_end(max_prev_end);
duke@0 213 }
duke@0 214 // Align the end up to a page size (starts are already aligned).
jmasa@6 215 jbyte* const new_end = byte_after(new_region.last());
jmasa@208 216 HeapWord* new_end_aligned =
jmasa@6 217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
duke@0 218 assert(new_end_aligned >= (HeapWord*) new_end,
duke@0 219 "align up, but less");
jmasa@208 220 int ri = 0;
jmasa@208 221 for (ri = 0; ri < _cur_covered_regions; ri++) {
jmasa@208 222 if (ri != ind) {
jmasa@208 223 if (_committed[ri].contains(new_end_aligned)) {
jmasa@208 224 assert((new_end_aligned >= _committed[ri].start()) &&
jmasa@208 225 (_committed[ri].start() > _committed[ind].start()),
jmasa@208 226 "New end of committed region is inconsistent");
jmasa@208 227 new_end_aligned = _committed[ri].start();
jmasa@208 228 assert(new_end_aligned > _committed[ind].start(),
jmasa@208 229 "New end of committed region is before start");
jmasa@208 230 debug_only(collided = true;)
jmasa@208 231 // Should only collide with 1 region
jmasa@208 232 break;
jmasa@208 233 }
jmasa@208 234 }
jmasa@208 235 }
jmasa@208 236 #ifdef ASSERT
jmasa@208 237 for (++ri; ri < _cur_covered_regions; ri++) {
jmasa@208 238 assert(!_committed[ri].contains(new_end_aligned),
jmasa@208 239 "New end of committed region is in a second committed region");
jmasa@208 240 }
jmasa@208 241 #endif
duke@0 242 // The guard page is always committed and should not be committed over.
jmasa@208 243 HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
jmasa@208 244 _guard_region.start());
jmasa@208 245
duke@0 246 if (new_end_for_commit > cur_committed.end()) {
duke@0 247 // Must commit new pages.
jmasa@6 248 MemRegion const new_committed =
duke@0 249 MemRegion(cur_committed.end(), new_end_for_commit);
duke@0 250
duke@0 251 assert(!new_committed.is_empty(), "Region should not be empty here");
duke@0 252 if (!os::commit_memory((char*)new_committed.start(),
duke@0 253 new_committed.byte_size(), _page_size)) {
duke@0 254 // Do better than this for Merlin
duke@0 255 vm_exit_out_of_memory(new_committed.byte_size(),
duke@0 256 "card table expansion");
duke@0 257 }
duke@0 258 // Use new_end_aligned (as opposed to new_end_for_commit) because
duke@0 259 // the cur_committed region may include the guard region.
duke@0 260 } else if (new_end_aligned < cur_committed.end()) {
duke@0 261 // Must uncommit pages.
jmasa@6 262 MemRegion const uncommit_region =
duke@0 263 committed_unique_to_self(ind, MemRegion(new_end_aligned,
duke@0 264 cur_committed.end()));
duke@0 265 if (!uncommit_region.is_empty()) {
duke@0 266 if (!os::uncommit_memory((char*)uncommit_region.start(),
duke@0 267 uncommit_region.byte_size())) {
jmasa@208 268 assert(false, "Card table contraction failed");
jmasa@208 269 // The call failed so don't change the end of the
jmasa@208 270 // committed region. This is better than taking the
jmasa@208 271 // VM down.
jmasa@208 272 new_end_aligned = _committed[ind].end();
duke@0 273 }
duke@0 274 }
duke@0 275 }
duke@0 276 // In any case, we can reset the end of the current committed entry.
duke@0 277 _committed[ind].set_end(new_end_aligned);
duke@0 278
duke@0 279 // The default of 0 is not necessarily clean cards.
duke@0 280 jbyte* entry;
duke@0 281 if (old_region.last() < _whole_heap.start()) {
duke@0 282 entry = byte_for(_whole_heap.start());
duke@0 283 } else {
duke@0 284 entry = byte_after(old_region.last());
duke@0 285 }
swamyv@489 286 assert(index_for(new_region.last()) < _guard_index,
duke@0 287 "The guard card will be overwritten");
jmasa@208 288 // This line commented out cleans the newly expanded region and
jmasa@208 289 // not the aligned up expanded region.
jmasa@208 290 // jbyte* const end = byte_after(new_region.last());
jmasa@208 291 jbyte* const end = (jbyte*) new_end_for_commit;
jmasa@208 292 assert((end >= byte_after(new_region.last())) || collided,
jmasa@208 293 "Expect to be beyond new region unless impacting another region");
duke@0 294 // do nothing if we resized downward.
jmasa@208 295 #ifdef ASSERT
jmasa@208 296 for (int ri = 0; ri < _cur_covered_regions; ri++) {
jmasa@208 297 if (ri != ind) {
jmasa@208 298 // The end of the new committed region should not
jmasa@208 299 // be in any existing region unless it matches
jmasa@208 300 // the start of the next region.
jmasa@208 301 assert(!_committed[ri].contains(end) ||
jmasa@208 302 (_committed[ri].start() == (HeapWord*) end),
jmasa@208 303 "Overlapping committed regions");
jmasa@208 304 }
jmasa@208 305 }
jmasa@208 306 #endif
duke@0 307 if (entry < end) {
duke@0 308 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
duke@0 309 }
duke@0 310 }
duke@0 311 // In any case, the covered size changes.
duke@0 312 _covered[ind].set_word_size(new_region.word_size());
duke@0 313 if (TraceCardTableModRefBS) {
duke@0 314 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
duke@0 315 gclog_or_tty->print_cr(" "
duke@0 316 " _covered[%d].start(): " INTPTR_FORMAT
duke@0 317 " _covered[%d].last(): " INTPTR_FORMAT,
duke@0 318 ind, _covered[ind].start(),
duke@0 319 ind, _covered[ind].last());
duke@0 320 gclog_or_tty->print_cr(" "
duke@0 321 " _committed[%d].start(): " INTPTR_FORMAT
duke@0 322 " _committed[%d].last(): " INTPTR_FORMAT,
duke@0 323 ind, _committed[ind].start(),
duke@0 324 ind, _committed[ind].last());
duke@0 325 gclog_or_tty->print_cr(" "
duke@0 326 " byte_for(start): " INTPTR_FORMAT
duke@0 327 " byte_for(last): " INTPTR_FORMAT,
duke@0 328 byte_for(_covered[ind].start()),
duke@0 329 byte_for(_covered[ind].last()));
duke@0 330 gclog_or_tty->print_cr(" "
duke@0 331 " addr_for(start): " INTPTR_FORMAT
duke@0 332 " addr_for(last): " INTPTR_FORMAT,
duke@0 333 addr_for((jbyte*) _committed[ind].start()),
duke@0 334 addr_for((jbyte*) _committed[ind].last()));
duke@0 335 }
duke@0 336 debug_only(verify_guard();)
duke@0 337 }
duke@0 338
duke@0 339 // Note that these versions are precise! The scanning code has to handle the
duke@0 340 // fact that the write barrier may be either precise or imprecise.
duke@0 341
coleenp@113 342 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
duke@0 343 inline_write_ref_field(field, newVal);
duke@0 344 }
duke@0 345
duke@0 346
ysr@342 347 bool CardTableModRefBS::claim_card(size_t card_index) {
ysr@342 348 jbyte val = _byte_map[card_index];
ysr@342 349 if (val != claimed_card_val()) {
ysr@342 350 jbyte res = Atomic::cmpxchg((jbyte) claimed_card_val(), &_byte_map[card_index], val);
ysr@342 351 if (res == val)
ysr@342 352 return true;
ysr@342 353 else return false;
ysr@342 354 }
ysr@342 355 return false;
ysr@342 356 }
ysr@342 357
duke@0 358 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
duke@0 359 MemRegion mr,
duke@0 360 DirtyCardToOopClosure* dcto_cl,
duke@0 361 MemRegionClosure* cl,
duke@0 362 bool clear) {
duke@0 363 if (!mr.is_empty()) {
duke@0 364 int n_threads = SharedHeap::heap()->n_par_threads();
duke@0 365 if (n_threads > 0) {
duke@0 366 #ifndef SERIALGC
duke@0 367 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
duke@0 368 #else // SERIALGC
duke@0 369 fatal("Parallel gc not supported here.");
duke@0 370 #endif // SERIALGC
duke@0 371 } else {
duke@0 372 non_clean_card_iterate_work(mr, cl, clear);
duke@0 373 }
duke@0 374 }
duke@0 375 }
duke@0 376
duke@0 377 // NOTE: For this to work correctly, it is important that
duke@0 378 // we look for non-clean cards below (so as to catch those
duke@0 379 // marked precleaned), rather than look explicitly for dirty
duke@0 380 // cards (and miss those marked precleaned). In that sense,
duke@0 381 // the name precleaned is currently somewhat of a misnomer.
duke@0 382 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
duke@0 383 MemRegionClosure* cl,
duke@0 384 bool clear) {
duke@0 385 // Figure out whether we have to worry about parallelism.
duke@0 386 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
duke@0 387 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 388 MemRegion mri = mr.intersection(_covered[i]);
duke@0 389 if (mri.word_size() > 0) {
duke@0 390 jbyte* cur_entry = byte_for(mri.last());
duke@0 391 jbyte* limit = byte_for(mri.start());
duke@0 392 while (cur_entry >= limit) {
duke@0 393 jbyte* next_entry = cur_entry - 1;
duke@0 394 if (*cur_entry != clean_card) {
duke@0 395 size_t non_clean_cards = 1;
duke@0 396 // Should the next card be included in this range of dirty cards.
duke@0 397 while (next_entry >= limit && *next_entry != clean_card) {
duke@0 398 non_clean_cards++;
duke@0 399 cur_entry = next_entry;
duke@0 400 next_entry--;
duke@0 401 }
duke@0 402 // The memory region may not be on a card boundary. So that
duke@0 403 // objects beyond the end of the region are not processed, make
duke@0 404 // cur_cards precise with regard to the end of the memory region.
duke@0 405 MemRegion cur_cards(addr_for(cur_entry),
duke@0 406 non_clean_cards * card_size_in_words);
duke@0 407 MemRegion dirty_region = cur_cards.intersection(mri);
duke@0 408 if (clear) {
duke@0 409 for (size_t i = 0; i < non_clean_cards; i++) {
duke@0 410 // Clean the dirty cards (but leave the other non-clean
duke@0 411 // alone.) If parallel, do the cleaning atomically.
duke@0 412 jbyte cur_entry_val = cur_entry[i];
duke@0 413 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
duke@0 414 if (is_par) {
duke@0 415 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
duke@0 416 assert(res != clean_card,
duke@0 417 "Dirty card mysteriously cleaned");
duke@0 418 } else {
duke@0 419 cur_entry[i] = clean_card;
duke@0 420 }
duke@0 421 }
duke@0 422 }
duke@0 423 }
duke@0 424 cl->do_MemRegion(dirty_region);
duke@0 425 }
duke@0 426 cur_entry = next_entry;
duke@0 427 }
duke@0 428 }
duke@0 429 }
duke@0 430 }
duke@0 431
duke@0 432 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
duke@0 433 OopClosure* cl,
duke@0 434 bool clear,
duke@0 435 bool before_save_marks) {
duke@0 436 // Note that dcto_cl is resource-allocated, so there is no
duke@0 437 // corresponding "delete".
duke@0 438 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
duke@0 439 MemRegion used_mr;
duke@0 440 if (before_save_marks) {
duke@0 441 used_mr = sp->used_region_at_save_marks();
duke@0 442 } else {
duke@0 443 used_mr = sp->used_region();
duke@0 444 }
duke@0 445 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
duke@0 446 }
duke@0 447
duke@0 448 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
duke@0 449 jbyte* cur = byte_for(mr.start());
duke@0 450 jbyte* last = byte_after(mr.last());
duke@0 451 while (cur < last) {
duke@0 452 *cur = dirty_card;
duke@0 453 cur++;
duke@0 454 }
duke@0 455 }
duke@0 456
ysr@342 457 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
duke@0 458 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 459 MemRegion mri = mr.intersection(_covered[i]);
duke@0 460 if (!mri.is_empty()) dirty_MemRegion(mri);
duke@0 461 }
duke@0 462 }
duke@0 463
duke@0 464 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
duke@0 465 // Be conservative: only clean cards entirely contained within the
duke@0 466 // region.
duke@0 467 jbyte* cur;
duke@0 468 if (mr.start() == _whole_heap.start()) {
duke@0 469 cur = byte_for(mr.start());
duke@0 470 } else {
duke@0 471 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
duke@0 472 cur = byte_after(mr.start() - 1);
duke@0 473 }
duke@0 474 jbyte* last = byte_after(mr.last());
duke@0 475 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
duke@0 476 }
duke@0 477
duke@0 478 void CardTableModRefBS::clear(MemRegion mr) {
duke@0 479 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 480 MemRegion mri = mr.intersection(_covered[i]);
duke@0 481 if (!mri.is_empty()) clear_MemRegion(mri);
duke@0 482 }
duke@0 483 }
duke@0 484
ysr@342 485 void CardTableModRefBS::dirty(MemRegion mr) {
ysr@342 486 jbyte* first = byte_for(mr.start());
ysr@342 487 jbyte* last = byte_after(mr.last());
ysr@342 488 memset(first, dirty_card, last-first);
ysr@342 489 }
ysr@342 490
duke@0 491 // NOTES:
duke@0 492 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
duke@0 493 // iterates over dirty cards ranges in increasing address order.
duke@0 494 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
duke@0 495 MemRegionClosure* cl) {
duke@0 496 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 497 MemRegion mri = mr.intersection(_covered[i]);
duke@0 498 if (!mri.is_empty()) {
duke@0 499 jbyte *cur_entry, *next_entry, *limit;
duke@0 500 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
duke@0 501 cur_entry <= limit;
duke@0 502 cur_entry = next_entry) {
duke@0 503 next_entry = cur_entry + 1;
duke@0 504 if (*cur_entry == dirty_card) {
duke@0 505 size_t dirty_cards;
duke@0 506 // Accumulate maximal dirty card range, starting at cur_entry
duke@0 507 for (dirty_cards = 1;
duke@0 508 next_entry <= limit && *next_entry == dirty_card;
duke@0 509 dirty_cards++, next_entry++);
duke@0 510 MemRegion cur_cards(addr_for(cur_entry),
duke@0 511 dirty_cards*card_size_in_words);
duke@0 512 cl->do_MemRegion(cur_cards);
duke@0 513 }
duke@0 514 }
duke@0 515 }
duke@0 516 }
duke@0 517 }
duke@0 518
ysr@342 519 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
ysr@342 520 bool reset,
ysr@342 521 int reset_val) {
duke@0 522 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 523 MemRegion mri = mr.intersection(_covered[i]);
duke@0 524 if (!mri.is_empty()) {
duke@0 525 jbyte* cur_entry, *next_entry, *limit;
duke@0 526 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
duke@0 527 cur_entry <= limit;
duke@0 528 cur_entry = next_entry) {
duke@0 529 next_entry = cur_entry + 1;
duke@0 530 if (*cur_entry == dirty_card) {
duke@0 531 size_t dirty_cards;
duke@0 532 // Accumulate maximal dirty card range, starting at cur_entry
duke@0 533 for (dirty_cards = 1;
duke@0 534 next_entry <= limit && *next_entry == dirty_card;
duke@0 535 dirty_cards++, next_entry++);
duke@0 536 MemRegion cur_cards(addr_for(cur_entry),
duke@0 537 dirty_cards*card_size_in_words);
ysr@342 538 if (reset) {
ysr@342 539 for (size_t i = 0; i < dirty_cards; i++) {
ysr@342 540 cur_entry[i] = reset_val;
ysr@342 541 }
duke@0 542 }
duke@0 543 return cur_cards;
duke@0 544 }
duke@0 545 }
duke@0 546 }
duke@0 547 }
duke@0 548 return MemRegion(mr.end(), mr.end());
duke@0 549 }
duke@0 550
duke@0 551 // Set all the dirty cards in the given region to "precleaned" state.
duke@0 552 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
duke@0 553 for (int i = 0; i < _cur_covered_regions; i++) {
duke@0 554 MemRegion mri = mr.intersection(_covered[i]);
duke@0 555 if (!mri.is_empty()) {
duke@0 556 jbyte *cur_entry, *limit;
duke@0 557 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
duke@0 558 cur_entry <= limit;
duke@0 559 cur_entry++) {
duke@0 560 if (*cur_entry == dirty_card) {
duke@0 561 *cur_entry = precleaned_card;
duke@0 562 }
duke@0 563 }
duke@0 564 }
duke@0 565 }
duke@0 566 }
duke@0 567
duke@0 568 uintx CardTableModRefBS::ct_max_alignment_constraint() {
duke@0 569 return card_size * os::vm_page_size();
duke@0 570 }
duke@0 571
duke@0 572 void CardTableModRefBS::verify_guard() {
duke@0 573 // For product build verification
duke@0 574 guarantee(_byte_map[_guard_index] == last_card,
duke@0 575 "card table guard has been modified");
duke@0 576 }
duke@0 577
duke@0 578 void CardTableModRefBS::verify() {
duke@0 579 verify_guard();
duke@0 580 }
duke@0 581
duke@0 582 #ifndef PRODUCT
duke@0 583 class GuaranteeNotModClosure: public MemRegionClosure {
duke@0 584 CardTableModRefBS* _ct;
duke@0 585 public:
duke@0 586 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
duke@0 587 void do_MemRegion(MemRegion mr) {
duke@0 588 jbyte* entry = _ct->byte_for(mr.start());
duke@0 589 guarantee(*entry != CardTableModRefBS::clean_card,
duke@0 590 "Dirty card in region that should be clean");
duke@0 591 }
duke@0 592 };
duke@0 593
duke@0 594 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
duke@0 595 GuaranteeNotModClosure blk(this);
duke@0 596 non_clean_card_iterate_work(mr, &blk, false);
duke@0 597 }
duke@0 598 #endif
duke@0 599
duke@0 600 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
duke@0 601 return
duke@0 602 CardTableModRefBS::card_will_be_scanned(cv) ||
duke@0 603 _rs->is_prev_nonclean_card_val(cv);
duke@0 604 };
duke@0 605
duke@0 606 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
duke@0 607 return
duke@0 608 cv != clean_card &&
duke@0 609 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
duke@0 610 CardTableRS::youngergen_may_have_been_dirty(cv));
duke@0 611 };