annotate src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp @ 2076:c33825b68624

6923430: G1: assert(res != 0,"This should have worked.") 7007446: G1: expand the heap with a single step, not one region at a time Summary: Changed G1CollectedHeap::expand() to expand the committed space by calling VirtualSpace::expand_by() once rather than for every region in the expansion amount. This allows the success or failure of the expansion to be determined before creating any heap regions. Introduced a develop flag G1ExitOnExpansionFailure (false by default) that, when true, will exit the VM if the expansion of the committed space fails. Finally G1CollectedHeap::expand() returns a status back to it's caller so that the caller knows whether to attempt the allocation. Reviewed-by: brutisso, tonyp
author johnc
date Wed, 02 Feb 2011 10:41:20 -0800
parents f95d63e2154a
children 04d1138b4cce
rev   line source
ysr@342 1 /*
johnc@2076 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@342 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@342 4 *
ysr@342 5 * This code is free software; you can redistribute it and/or modify it
ysr@342 6 * under the terms of the GNU General Public License version 2 only, as
ysr@342 7 * published by the Free Software Foundation.
ysr@342 8 *
ysr@342 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@342 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@342 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@342 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@342 13 * accompanied this code).
ysr@342 14 *
ysr@342 15 * You should have received a copy of the GNU General Public License version
ysr@342 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@342 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@342 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
ysr@342 22 *
ysr@342 23 */
ysr@342 24
stefank@1885 25 #include "precompiled.hpp"
stefank@1885 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@1885 27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@1885 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@1885 29 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@1885 30 #include "gc_implementation/g1/g1RemSet.hpp"
stefank@1885 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@1885 32 #include "memory/space.inline.hpp"
stefank@1885 33 #include "runtime/atomic.hpp"
stefank@1885 34 #include "utilities/copy.hpp"
ysr@342 35
johnc@890 36 // Possible sizes for the card counts cache: odd primes that roughly double in size.
johnc@890 37 // (See jvmtiTagMap.cpp).
johnc@890 38 int ConcurrentG1Refine::_cc_cache_sizes[] = {
johnc@890 39 16381, 32771, 76831, 150001, 307261,
johnc@890 40 614563, 1228891, 2457733, 4915219, 9830479,
johnc@890 41 19660831, 39321619, 78643219, 157286461, -1
johnc@890 42 };
johnc@890 43
ysr@342 44 ConcurrentG1Refine::ConcurrentG1Refine() :
johnc@890 45 _card_counts(NULL), _card_epochs(NULL),
johnc@890 46 _n_card_counts(0), _max_n_card_counts(0),
johnc@890 47 _cache_size_index(0), _expand_card_counts(false),
ysr@342 48 _hot_cache(NULL),
ysr@342 49 _def_use_cache(false), _use_cache(false),
johnc@890 50 _n_periods(0),
iveresov@794 51 _threads(NULL), _n_threads(0)
ysr@342 52 {
iveresov@1111 53
iveresov@1111 54 // Ergomonically select initial concurrent refinement parameters
tonyp@1282 55 if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
tonyp@1282 56 FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
iveresov@1111 57 }
tonyp@1282 58 set_green_zone(G1ConcRefinementGreenZone);
iveresov@1111 59
tonyp@1282 60 if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
tonyp@1282 61 FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
iveresov@1111 62 }
tonyp@1282 63 set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
iveresov@1111 64
tonyp@1282 65 if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
tonyp@1282 66 FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
iveresov@1111 67 }
tonyp@1282 68 set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
iveresov@1111 69 _n_worker_threads = thread_num();
iveresov@1111 70 // We need one extra thread to do the young gen rset size sampling.
iveresov@1111 71 _n_threads = _n_worker_threads + 1;
iveresov@1111 72 reset_threshold_step();
iveresov@1111 73
iveresov@1111 74 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
iveresov@1111 75 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
iveresov@1111 76 ConcurrentG1RefineThread *next = NULL;
iveresov@1111 77 for (int i = _n_threads - 1; i >= 0; i--) {
iveresov@1111 78 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
iveresov@1111 79 assert(t != NULL, "Conc refine should have been created");
iveresov@1111 80 assert(t->cg1r() == this, "Conc refine thread should refer to this");
iveresov@1111 81 _threads[i] = t;
iveresov@1111 82 next = t;
ysr@342 83 }
ysr@342 84 }
ysr@342 85
iveresov@1111 86 void ConcurrentG1Refine::reset_threshold_step() {
tonyp@1282 87 if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
iveresov@1111 88 _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
iveresov@1111 89 } else {
tonyp@1282 90 _thread_threshold_step = G1ConcRefinementThresholdStep;
iveresov@795 91 }
iveresov@1111 92 }
iveresov@1111 93
iveresov@1111 94 int ConcurrentG1Refine::thread_num() {
tonyp@1282 95 return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
iveresov@795 96 }
iveresov@795 97
ysr@342 98 void ConcurrentG1Refine::init() {
johnc@890 99 if (G1ConcRSLogCacheSize > 0) {
johnc@890 100 _g1h = G1CollectedHeap::heap();
johnc@890 101 _max_n_card_counts =
johnc@2076 102 (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift);
johnc@890 103
johnc@890 104 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
johnc@890 105 guarantee(_max_n_card_counts < max_card_num, "card_num representation");
johnc@890 106
johnc@890 107 int desired = _max_n_card_counts / InitialCacheFraction;
johnc@890 108 for (_cache_size_index = 0;
johnc@890 109 _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
johnc@890 110 if (_cc_cache_sizes[_cache_size_index] >= desired) break;
johnc@890 111 }
johnc@890 112 _cache_size_index = MAX2(0, (_cache_size_index - 1));
johnc@890 113
johnc@890 114 int initial_size = _cc_cache_sizes[_cache_size_index];
johnc@890 115 if (initial_size < 0) initial_size = _max_n_card_counts;
johnc@890 116
johnc@890 117 // Make sure we don't go bigger than we will ever need
johnc@890 118 _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);
johnc@890 119
johnc@890 120 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
johnc@890 121 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
johnc@890 122
johnc@890 123 Copy::fill_to_bytes(&_card_counts[0],
johnc@890 124 _n_card_counts * sizeof(CardCountCacheEntry));
johnc@890 125 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
johnc@890 126
johnc@890 127 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@342 128 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
johnc@890 129 _ct_bs = (CardTableModRefBS*)bs;
johnc@890 130 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
johnc@890 131
ysr@342 132 _def_use_cache = true;
ysr@342 133 _use_cache = true;
ysr@342 134 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
ysr@342 135 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
ysr@342 136 _n_hot = 0;
ysr@342 137 _hot_cache_idx = 0;
johnc@889 138
johnc@889 139 // For refining the cards in the hot cache in parallel
johnc@889 140 int n_workers = (ParallelGCThreads > 0 ?
johnc@890 141 _g1h->workers()->total_workers() : 1);
johnc@889 142 _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
johnc@889 143 _hot_cache_par_claimed_idx = 0;
ysr@342 144 }
ysr@342 145 }
ysr@342 146
iveresov@794 147 void ConcurrentG1Refine::stop() {
iveresov@794 148 if (_threads != NULL) {
iveresov@794 149 for (int i = 0; i < _n_threads; i++) {
iveresov@794 150 _threads[i]->stop();
iveresov@794 151 }
iveresov@794 152 }
iveresov@794 153 }
iveresov@794 154
iveresov@1111 155 void ConcurrentG1Refine::reinitialize_threads() {
iveresov@1111 156 reset_threshold_step();
iveresov@1111 157 if (_threads != NULL) {
iveresov@1111 158 for (int i = 0; i < _n_threads; i++) {
iveresov@1111 159 _threads[i]->initialize();
iveresov@1111 160 }
iveresov@1111 161 }
iveresov@1111 162 }
iveresov@1111 163
ysr@342 164 ConcurrentG1Refine::~ConcurrentG1Refine() {
johnc@890 165 if (G1ConcRSLogCacheSize > 0) {
ysr@342 166 assert(_card_counts != NULL, "Logic");
johnc@890 167 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
johnc@890 168 assert(_card_epochs != NULL, "Logic");
johnc@890 169 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
ysr@342 170 assert(_hot_cache != NULL, "Logic");
ysr@342 171 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
ysr@342 172 }
iveresov@794 173 if (_threads != NULL) {
iveresov@794 174 for (int i = 0; i < _n_threads; i++) {
iveresov@794 175 delete _threads[i];
iveresov@794 176 }
iveresov@799 177 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
ysr@342 178 }
ysr@342 179 }
ysr@342 180
iveresov@794 181 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
iveresov@794 182 if (_threads != NULL) {
iveresov@794 183 for (int i = 0; i < _n_threads; i++) {
iveresov@794 184 tc->do_thread(_threads[i]);
iveresov@794 185 }
ysr@342 186 }
ysr@342 187 }
ysr@342 188
johnc@890 189 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
johnc@890 190 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@890 191 HeapRegion* r = _g1h->heap_region_containing(start);
johnc@890 192 if (r != NULL && r->is_young()) {
johnc@890 193 return true;
johnc@890 194 }
johnc@890 195 // This card is not associated with a heap region
johnc@890 196 // so can't be young.
johnc@890 197 return false;
ysr@342 198 }
ysr@342 199
johnc@890 200 jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
johnc@890 201 unsigned new_card_num = ptr_2_card_num(card_ptr);
johnc@890 202 unsigned bucket = hash(new_card_num);
johnc@890 203 assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
johnc@890 204
johnc@890 205 CardCountCacheEntry* count_ptr = &_card_counts[bucket];
johnc@890 206 CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
johnc@890 207
johnc@890 208 // We have to construct a new entry if we haven't updated the counts
johnc@890 209 // during the current period, or if the count was updated for a
johnc@890 210 // different card number.
johnc@890 211 unsigned int new_epoch = (unsigned int) _n_periods;
johnc@890 212 julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
johnc@890 213
johnc@890 214 while (true) {
johnc@890 215 // Fetch the previous epoch value
johnc@890 216 julong prev_epoch_entry = epoch_ptr->_value;
johnc@890 217 julong cas_res;
johnc@890 218
johnc@890 219 if (extract_epoch(prev_epoch_entry) != new_epoch) {
johnc@890 220 // This entry has not yet been updated during this period.
johnc@890 221 // Note: we update the epoch value atomically to ensure
johnc@890 222 // that there is only one winner that updates the cached
johnc@890 223 // card_ptr value even though all the refine threads share
johnc@890 224 // the same epoch value.
johnc@890 225
johnc@890 226 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@890 227 (volatile jlong*)&epoch_ptr->_value,
johnc@890 228 (jlong) prev_epoch_entry);
johnc@890 229
johnc@890 230 if (cas_res == prev_epoch_entry) {
johnc@890 231 // We have successfully won the race to update the
johnc@890 232 // epoch and card_num value. Make it look like the
johnc@890 233 // count and eviction count were previously cleared.
johnc@890 234 count_ptr->_count = 1;
johnc@890 235 count_ptr->_evict_count = 0;
johnc@890 236 *count = 0;
johnc@890 237 // We can defer the processing of card_ptr
johnc@890 238 *defer = true;
johnc@890 239 return card_ptr;
johnc@890 240 }
johnc@890 241 // We did not win the race to update the epoch field, so some other
johnc@890 242 // thread must have done it. The value that gets returned by CAS
johnc@890 243 // should be the new epoch value.
johnc@890 244 assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
johnc@890 245 // We could 'continue' here or just re-read the previous epoch value
johnc@890 246 prev_epoch_entry = epoch_ptr->_value;
johnc@890 247 }
johnc@890 248
johnc@890 249 // The epoch entry for card_ptr has been updated during this period.
johnc@890 250 unsigned old_card_num = extract_card_num(prev_epoch_entry);
johnc@890 251
johnc@890 252 // The card count that will be returned to caller
johnc@890 253 *count = count_ptr->_count;
johnc@890 254
johnc@890 255 // Are we updating the count for the same card?
johnc@890 256 if (new_card_num == old_card_num) {
johnc@890 257 // Same card - just update the count. We could have more than one
johnc@890 258 // thread racing to update count for the current card. It should be
johnc@890 259 // OK not to use a CAS as the only penalty should be some missed
johnc@890 260 // increments of the count which delays identifying the card as "hot".
johnc@890 261
johnc@890 262 if (*count < max_jubyte) count_ptr->_count++;
johnc@890 263 // We can defer the processing of card_ptr
johnc@890 264 *defer = true;
johnc@890 265 return card_ptr;
johnc@890 266 }
johnc@890 267
johnc@890 268 // Different card - evict old card info
johnc@890 269 if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
johnc@890 270 if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
johnc@890 271 // Trigger a resize the next time we clear
johnc@890 272 _expand_card_counts = true;
johnc@890 273 }
johnc@890 274
johnc@890 275 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@890 276 (volatile jlong*)&epoch_ptr->_value,
johnc@890 277 (jlong) prev_epoch_entry);
johnc@890 278
johnc@890 279 if (cas_res == prev_epoch_entry) {
johnc@890 280 // We successfully updated the card num value in the epoch entry
johnc@890 281 count_ptr->_count = 0; // initialize counter for new card num
johnc@1589 282 jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
johnc@890 283
johnc@890 284 // Even though the region containg the card at old_card_num was not
johnc@890 285 // in the young list when old_card_num was recorded in the epoch
johnc@890 286 // cache it could have been added to the free list and subsequently
johnc@1589 287 // added to the young list in the intervening time. See CR 6817995.
johnc@1589 288 // We do not deal with this case here - it will be handled in
johnc@1589 289 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
johnc@1589 290 // determined that the region containing the card has been allocated
johnc@1589 291 // to, and it's safe to check the young type of the region.
johnc@890 292
johnc@890 293 // We do not want to defer processing of card_ptr in this case
johnc@890 294 // (we need to refine old_card_ptr and card_ptr)
johnc@890 295 *defer = false;
johnc@890 296 return old_card_ptr;
johnc@890 297 }
johnc@890 298 // Someone else beat us - try again.
johnc@890 299 }
johnc@890 300 }
johnc@890 301
johnc@890 302 jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
johnc@890 303 int count;
johnc@890 304 jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
johnc@890 305 assert(cached_ptr != NULL, "bad cached card ptr");
johnc@1246 306
johnc@1589 307 // We've just inserted a card pointer into the card count cache
johnc@1589 308 // and got back the card that we just inserted or (evicted) the
johnc@1589 309 // previous contents of that count slot.
johnc@1246 310
johnc@1589 311 // The card we got back could be in a young region. When the
johnc@1589 312 // returned card (if evicted) was originally inserted, we had
johnc@1589 313 // determined that its containing region was not young. However
johnc@1589 314 // it is possible for the region to be freed during a cleanup
johnc@1589 315 // pause, then reallocated and tagged as young which will result
johnc@1589 316 // in the returned card residing in a young region.
johnc@1589 317 //
johnc@1589 318 // We do not deal with this case here - the change from non-young
johnc@1589 319 // to young could be observed at any time - it will be handled in
johnc@1589 320 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
johnc@1589 321 // determined that the region containing the card has been allocated
johnc@1589 322 // to.
johnc@890 323
johnc@890 324 // The card pointer we obtained from card count cache is not hot
johnc@890 325 // so do not store it in the cache; return it for immediate
johnc@890 326 // refining.
ysr@342 327 if (count < G1ConcRSHotCardLimit) {
johnc@890 328 return cached_ptr;
ysr@342 329 }
johnc@890 330
johnc@1589 331 // Otherwise, the pointer we got from the _card_counts cache is hot.
ysr@342 332 jbyte* res = NULL;
ysr@342 333 MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
ysr@342 334 if (_n_hot == _hot_cache_size) {
ysr@342 335 res = _hot_cache[_hot_cache_idx];
ysr@342 336 _n_hot--;
ysr@342 337 }
ysr@342 338 // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
johnc@890 339 _hot_cache[_hot_cache_idx] = cached_ptr;
ysr@342 340 _hot_cache_idx++;
ysr@342 341 if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
ysr@342 342 _n_hot++;
johnc@890 343
johnc@1589 344 // The card obtained from the hot card cache could be in a young
johnc@1589 345 // region. See above on how this can happen.
johnc@890 346
ysr@342 347 return res;
ysr@342 348 }
ysr@342 349
johnc@1629 350 void ConcurrentG1Refine::clean_up_cache(int worker_i,
johnc@1629 351 G1RemSet* g1rs,
johnc@1629 352 DirtyCardQueue* into_cset_dcq) {
ysr@342 353 assert(!use_cache(), "cache should be disabled");
johnc@889 354 int start_idx;
johnc@889 355
johnc@889 356 while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
johnc@889 357 int end_idx = start_idx + _hot_cache_par_chunk_size;
johnc@889 358
johnc@889 359 if (start_idx ==
johnc@889 360 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
johnc@889 361 // The current worker has successfully claimed the chunk [start_idx..end_idx)
johnc@889 362 end_idx = MIN2(end_idx, _n_hot);
johnc@889 363 for (int i = start_idx; i < end_idx; i++) {
johnc@889 364 jbyte* entry = _hot_cache[i];
johnc@889 365 if (entry != NULL) {
johnc@1629 366 if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
johnc@1629 367 // 'entry' contains references that point into the current
johnc@1629 368 // collection set. We need to record 'entry' in the DCQS
johnc@1629 369 // that's used for that purpose.
johnc@1629 370 //
johnc@1629 371 // The only time we care about recording cards that contain
johnc@1629 372 // references that point into the collection set is during
johnc@1629 373 // RSet updating while within an evacuation pause.
johnc@1629 374 // In this case worker_i should be the id of a GC worker thread
johnc@1629 375 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
johnc@1629 376 assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "incorrect worker id");
johnc@1629 377 into_cset_dcq->enqueue(entry);
johnc@1629 378 }
johnc@889 379 }
johnc@889 380 }
ysr@342 381 }
ysr@342 382 }
ysr@342 383 }
ysr@342 384
johnc@890 385 void ConcurrentG1Refine::expand_card_count_cache() {
johnc@890 386 if (_n_card_counts < _max_n_card_counts) {
johnc@890 387 int new_idx = _cache_size_index+1;
johnc@890 388 int new_size = _cc_cache_sizes[new_idx];
johnc@890 389 if (new_size < 0) new_size = _max_n_card_counts;
johnc@890 390
johnc@890 391 // Make sure we don't go bigger than we will ever need
johnc@890 392 new_size = MIN2((unsigned) new_size, _max_n_card_counts);
johnc@890 393
johnc@890 394 // Expand the card count and card epoch tables
johnc@890 395 if (new_size > (int)_n_card_counts) {
johnc@890 396 // We can just free and allocate a new array as we're
johnc@890 397 // not interested in preserving the contents
johnc@890 398 assert(_card_counts != NULL, "Logic!");
johnc@890 399 assert(_card_epochs != NULL, "Logic!");
johnc@890 400 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
johnc@890 401 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
johnc@890 402 _n_card_counts = new_size;
johnc@890 403 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
johnc@890 404 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
johnc@890 405 _cache_size_index = new_idx;
ysr@342 406 }
ysr@342 407 }
ysr@342 408 }
ysr@342 409
johnc@890 410 void ConcurrentG1Refine::clear_and_record_card_counts() {
johnc@890 411 if (G1ConcRSLogCacheSize == 0) return;
johnc@890 412
johnc@890 413 #ifndef PRODUCT
johnc@890 414 double start = os::elapsedTime();
johnc@890 415 #endif
johnc@890 416
johnc@890 417 if (_expand_card_counts) {
johnc@890 418 expand_card_count_cache();
johnc@890 419 _expand_card_counts = false;
johnc@890 420 // Only need to clear the epochs.
johnc@890 421 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
ysr@342 422 }
ysr@342 423
johnc@890 424 int this_epoch = (int) _n_periods;
johnc@890 425 assert((this_epoch+1) <= max_jint, "to many periods");
johnc@890 426 // Update epoch
johnc@890 427 _n_periods++;
johnc@890 428
johnc@890 429 #ifndef PRODUCT
johnc@890 430 double elapsed = os::elapsedTime() - start;
johnc@890 431 _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
johnc@890 432 #endif
ysr@342 433 }
tonyp@1019 434
tonyp@1019 435 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
tonyp@1019 436 for (int i = 0; i < _n_threads; ++i) {
tonyp@1019 437 _threads[i]->print_on(st);
tonyp@1019 438 st->cr();
tonyp@1019 439 }
tonyp@1019 440 }