annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 1911:72a161e62cc4

6991377: G1: race between concurrent refinement and humongous object allocation Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT. Reviewed-by: jcoomes, johnc, ysr
author tonyp
date Sat, 16 Oct 2010 17:12:19 -0400
parents bb847e31b836
children f95d63e2154a
rev   line source
ysr@345 1 /*
trims@1563 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@345 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@345 4 *
ysr@345 5 * This code is free software; you can redistribute it and/or modify it
ysr@345 6 * under the terms of the GNU General Public License version 2 only, as
ysr@345 7 * published by the Free Software Foundation.
ysr@345 8 *
ysr@345 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@345 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@345 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@345 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@345 13 * accompanied this code).
ysr@345 14 *
ysr@345 15 * You should have received a copy of the GNU General Public License version
ysr@345 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@345 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@345 18 *
trims@1563 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1563 20 * or visit www.oracle.com if you need additional information or have any
trims@1563 21 * questions.
ysr@345 22 *
ysr@345 23 */
ysr@345 24
ysr@345 25 #include "incls/_precompiled.incl"
ysr@345 26 #include "incls/_heapRegion.cpp.incl"
ysr@345 27
tonyp@996 28 int HeapRegion::LogOfHRGrainBytes = 0;
tonyp@996 29 int HeapRegion::LogOfHRGrainWords = 0;
tonyp@996 30 int HeapRegion::GrainBytes = 0;
tonyp@996 31 int HeapRegion::GrainWords = 0;
tonyp@996 32 int HeapRegion::CardsPerRegion = 0;
tonyp@996 33
ysr@345 34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@345 35 HeapRegion* hr, OopClosure* cl,
ysr@345 36 CardTableModRefBS::PrecisionStyle precision,
ysr@345 37 FilterKind fk) :
ysr@345 38 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
ysr@345 39 _hr(hr), _fk(fk), _g1(g1)
ysr@345 40 {}
ysr@345 41
ysr@345 42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
ysr@345 43 OopClosure* oc) :
ysr@345 44 _r_bottom(r->bottom()), _r_end(r->end()),
ysr@345 45 _oc(oc), _out_of_region(0)
ysr@345 46 {}
ysr@345 47
ysr@345 48 class VerifyLiveClosure: public OopClosure {
tonyp@860 49 private:
ysr@345 50 G1CollectedHeap* _g1h;
ysr@345 51 CardTableModRefBS* _bs;
ysr@345 52 oop _containing_obj;
ysr@345 53 bool _failures;
ysr@345 54 int _n_failures;
tonyp@860 55 bool _use_prev_marking;
ysr@345 56 public:
tonyp@860 57 // use_prev_marking == true -> use "prev" marking information,
tonyp@860 58 // use_prev_marking == false -> use "next" marking information
tonyp@860 59 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
ysr@345 60 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
tonyp@860 61 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
ysr@345 62 {
ysr@345 63 BarrierSet* bs = _g1h->barrier_set();
ysr@345 64 if (bs->is_a(BarrierSet::CardTableModRef))
ysr@345 65 _bs = (CardTableModRefBS*)bs;
ysr@345 66 }
ysr@345 67
ysr@345 68 void set_containing_obj(oop obj) {
ysr@345 69 _containing_obj = obj;
ysr@345 70 }
ysr@345 71
ysr@345 72 bool failures() { return _failures; }
ysr@345 73 int n_failures() { return _n_failures; }
ysr@345 74
ysr@896 75 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@896 76 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@345 77
tonyp@1477 78 void print_object(outputStream* out, oop obj) {
tonyp@1477 79 #ifdef PRODUCT
tonyp@1477 80 klassOop k = obj->klass();
tonyp@1477 81 const char* class_name = instanceKlass::cast(k)->external_name();
tonyp@1477 82 out->print_cr("class name %s", class_name);
tonyp@1477 83 #else // PRODUCT
tonyp@1477 84 obj->print_on(out);
tonyp@1477 85 #endif // PRODUCT
tonyp@1477 86 }
tonyp@1477 87
ysr@896 88 template <class T> void do_oop_work(T* p) {
ysr@345 89 assert(_containing_obj != NULL, "Precondition");
tonyp@860 90 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
tonyp@860 91 "Precondition");
ysr@896 92 T heap_oop = oopDesc::load_heap_oop(p);
ysr@896 93 if (!oopDesc::is_null(heap_oop)) {
ysr@896 94 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@345 95 bool failed = false;
tonyp@860 96 if (!_g1h->is_in_closed_subset(obj) ||
tonyp@860 97 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
ysr@345 98 if (!_failures) {
ysr@345 99 gclog_or_tty->print_cr("");
ysr@345 100 gclog_or_tty->print_cr("----------");
ysr@345 101 }
ysr@345 102 if (!_g1h->is_in_closed_subset(obj)) {
tonyp@1477 103 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@345 104 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 105 " of live obj "PTR_FORMAT" in region "
tonyp@1477 106 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 107 p, (void*) _containing_obj,
tonyp@1477 108 from->bottom(), from->end());
tonyp@1477 109 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 110 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
tonyp@1477 111 (void*) obj);
ysr@345 112 } else {
tonyp@1477 113 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
tonyp@1477 114 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
ysr@345 115 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 116 " of live obj "PTR_FORMAT" in region "
tonyp@1477 117 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 118 p, (void*) _containing_obj,
tonyp@1477 119 from->bottom(), from->end());
tonyp@1477 120 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 121 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
tonyp@1477 122 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 123 (void*) obj, to->bottom(), to->end());
tonyp@1477 124 print_object(gclog_or_tty, obj);
ysr@345 125 }
ysr@345 126 gclog_or_tty->print_cr("----------");
ysr@345 127 _failures = true;
ysr@345 128 failed = true;
ysr@345 129 _n_failures++;
ysr@345 130 }
ysr@345 131
ysr@345 132 if (!_g1h->full_collection()) {
ysr@896 133 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@896 134 HeapRegion* to = _g1h->heap_region_containing(obj);
ysr@345 135 if (from != NULL && to != NULL &&
ysr@345 136 from != to &&
ysr@345 137 !to->isHumongous()) {
ysr@345 138 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
ysr@345 139 jbyte cv_field = *_bs->byte_for_const(p);
ysr@345 140 const jbyte dirty = CardTableModRefBS::dirty_card_val();
ysr@345 141
ysr@345 142 bool is_bad = !(from->is_young()
ysr@345 143 || to->rem_set()->contains_reference(p)
ysr@345 144 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
ysr@345 145 (_containing_obj->is_objArray() ?
ysr@345 146 cv_field == dirty
ysr@345 147 : cv_obj == dirty || cv_field == dirty));
ysr@345 148 if (is_bad) {
ysr@345 149 if (!_failures) {
ysr@345 150 gclog_or_tty->print_cr("");
ysr@345 151 gclog_or_tty->print_cr("----------");
ysr@345 152 }
ysr@345 153 gclog_or_tty->print_cr("Missing rem set entry:");
ysr@345 154 gclog_or_tty->print_cr("Field "PTR_FORMAT
ysr@345 155 " of obj "PTR_FORMAT
ysr@345 156 ", in region %d ["PTR_FORMAT
ysr@345 157 ", "PTR_FORMAT"),",
ysr@345 158 p, (void*) _containing_obj,
ysr@345 159 from->hrs_index(),
ysr@345 160 from->bottom(),
ysr@345 161 from->end());
ysr@345 162 _containing_obj->print_on(gclog_or_tty);
ysr@345 163 gclog_or_tty->print_cr("points to obj "PTR_FORMAT
ysr@345 164 " in region %d ["PTR_FORMAT
ysr@345 165 ", "PTR_FORMAT").",
ysr@345 166 (void*) obj, to->hrs_index(),
ysr@345 167 to->bottom(), to->end());
ysr@345 168 obj->print_on(gclog_or_tty);
ysr@345 169 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
ysr@345 170 cv_obj, cv_field);
ysr@345 171 gclog_or_tty->print_cr("----------");
ysr@345 172 _failures = true;
ysr@345 173 if (!failed) _n_failures++;
ysr@345 174 }
ysr@345 175 }
ysr@345 176 }
ysr@345 177 }
ysr@345 178 }
ysr@345 179 };
ysr@345 180
ysr@345 181 template<class ClosureType>
ysr@345 182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
ysr@345 183 HeapRegion* hr,
ysr@345 184 HeapWord* cur, HeapWord* top) {
ysr@345 185 oop cur_oop = oop(cur);
ysr@345 186 int oop_size = cur_oop->size();
ysr@345 187 HeapWord* next_obj = cur + oop_size;
ysr@345 188 while (next_obj < top) {
ysr@345 189 // Keep filtering the remembered set.
ysr@345 190 if (!g1h->is_obj_dead(cur_oop, hr)) {
ysr@345 191 // Bottom lies entirely below top, so we can call the
ysr@345 192 // non-memRegion version of oop_iterate below.
ysr@345 193 cur_oop->oop_iterate(cl);
ysr@345 194 }
ysr@345 195 cur = next_obj;
ysr@345 196 cur_oop = oop(cur);
ysr@345 197 oop_size = cur_oop->size();
ysr@345 198 next_obj = cur + oop_size;
ysr@345 199 }
ysr@345 200 return cur;
ysr@345 201 }
ysr@345 202
ysr@345 203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
ysr@345 204 HeapWord* bottom,
ysr@345 205 HeapWord* top,
ysr@345 206 OopClosure* cl) {
ysr@345 207 G1CollectedHeap* g1h = _g1;
ysr@345 208
ysr@345 209 int oop_size;
ysr@345 210
ysr@345 211 OopClosure* cl2 = cl;
ysr@345 212 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
ysr@345 213 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
ysr@345 214 switch (_fk) {
ysr@345 215 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
ysr@345 216 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
ysr@345 217 }
ysr@345 218
ysr@345 219 // Start filtering what we add to the remembered set. If the object is
ysr@345 220 // not considered dead, either because it is marked (in the mark bitmap)
ysr@345 221 // or it was allocated after marking finished, then we add it. Otherwise
ysr@345 222 // we can safely ignore the object.
ysr@345 223 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 224 oop_size = oop(bottom)->oop_iterate(cl2, mr);
ysr@345 225 } else {
ysr@345 226 oop_size = oop(bottom)->size();
ysr@345 227 }
ysr@345 228
ysr@345 229 bottom += oop_size;
ysr@345 230
ysr@345 231 if (bottom < top) {
ysr@345 232 // We replicate the loop below for several kinds of possible filters.
ysr@345 233 switch (_fk) {
ysr@345 234 case NoFilterKind:
ysr@345 235 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
ysr@345 236 break;
ysr@345 237 case IntoCSFilterKind: {
ysr@345 238 FilterIntoCSClosure filt(this, g1h, cl);
ysr@345 239 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 240 break;
ysr@345 241 }
ysr@345 242 case OutOfRegionFilterKind: {
ysr@345 243 FilterOutOfRegionClosure filt(_hr, cl);
ysr@345 244 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 245 break;
ysr@345 246 }
ysr@345 247 default:
ysr@345 248 ShouldNotReachHere();
ysr@345 249 }
ysr@345 250
ysr@345 251 // Last object. Need to do dead-obj filtering here too.
ysr@345 252 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 253 oop(bottom)->oop_iterate(cl2, mr);
ysr@345 254 }
ysr@345 255 }
ysr@345 256 }
ysr@345 257
tonyp@996 258 // Minimum region size; we won't go lower than that.
tonyp@996 259 // We might want to decrease this in the future, to deal with small
tonyp@996 260 // heaps a bit more efficiently.
tonyp@996 261 #define MIN_REGION_SIZE ( 1024 * 1024 )
tonyp@996 262
tonyp@996 263 // Maximum region size; we don't go higher than that. There's a good
tonyp@996 264 // reason for having an upper bound. We don't want regions to get too
tonyp@996 265 // large, otherwise cleanup's effectiveness would decrease as there
tonyp@996 266 // will be fewer opportunities to find totally empty regions after
tonyp@996 267 // marking.
tonyp@996 268 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
tonyp@996 269
tonyp@996 270 // The automatic region size calculation will try to have around this
tonyp@996 271 // many regions in the heap (based on the min heap size).
tonyp@996 272 #define TARGET_REGION_NUMBER 2048
tonyp@996 273
tonyp@996 274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
tonyp@996 275 // region_size in bytes
tonyp@996 276 uintx region_size = G1HeapRegionSize;
tonyp@996 277 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
tonyp@996 278 // We base the automatic calculation on the min heap size. This
tonyp@996 279 // can be problematic if the spread between min and max is quite
tonyp@996 280 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
tonyp@996 281 // the max size, the region size might be way too large for the
tonyp@996 282 // min size. Either way, some users might have to set the region
tonyp@996 283 // size manually for some -Xms / -Xmx combos.
tonyp@996 284
tonyp@996 285 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
tonyp@996 286 (uintx) MIN_REGION_SIZE);
tonyp@996 287 }
tonyp@996 288
tonyp@996 289 int region_size_log = log2_long((jlong) region_size);
tonyp@996 290 // Recalculate the region size to make sure it's a power of
tonyp@996 291 // 2. This means that region_size is the largest power of 2 that's
tonyp@996 292 // <= what we've calculated so far.
prr@1496 293 region_size = ((uintx)1 << region_size_log);
tonyp@996 294
tonyp@996 295 // Now make sure that we don't go over or under our limits.
tonyp@996 296 if (region_size < MIN_REGION_SIZE) {
tonyp@996 297 region_size = MIN_REGION_SIZE;
tonyp@996 298 } else if (region_size > MAX_REGION_SIZE) {
tonyp@996 299 region_size = MAX_REGION_SIZE;
tonyp@996 300 }
tonyp@996 301
tonyp@996 302 // And recalculate the log.
tonyp@996 303 region_size_log = log2_long((jlong) region_size);
tonyp@996 304
tonyp@996 305 // Now, set up the globals.
tonyp@996 306 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
tonyp@996 307 LogOfHRGrainBytes = region_size_log;
tonyp@996 308
tonyp@996 309 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
tonyp@996 310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
tonyp@996 311
tonyp@996 312 guarantee(GrainBytes == 0, "we should only set it once");
tonyp@996 313 // The cast to int is safe, given that we've bounded region_size by
tonyp@996 314 // MIN_REGION_SIZE and MAX_REGION_SIZE.
tonyp@996 315 GrainBytes = (int) region_size;
tonyp@996 316
tonyp@996 317 guarantee(GrainWords == 0, "we should only set it once");
tonyp@996 318 GrainWords = GrainBytes >> LogHeapWordSize;
tonyp@996 319 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
tonyp@996 320
tonyp@996 321 guarantee(CardsPerRegion == 0, "we should only set it once");
tonyp@996 322 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
tonyp@996 323 }
tonyp@996 324
ysr@345 325 void HeapRegion::reset_after_compaction() {
ysr@345 326 G1OffsetTableContigSpace::reset_after_compaction();
ysr@345 327 // After a compaction the mark bitmap is invalid, so we must
ysr@345 328 // treat all objects as being inside the unmarked area.
ysr@345 329 zero_marked_bytes();
ysr@345 330 init_top_at_mark_start();
ysr@345 331 }
ysr@345 332
ysr@345 333 DirtyCardToOopClosure*
ysr@345 334 HeapRegion::new_dcto_closure(OopClosure* cl,
ysr@345 335 CardTableModRefBS::PrecisionStyle precision,
ysr@345 336 HeapRegionDCTOC::FilterKind fk) {
ysr@345 337 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
ysr@345 338 this, cl, precision, fk);
ysr@345 339 }
ysr@345 340
ysr@345 341 void HeapRegion::hr_clear(bool par, bool clear_space) {
tonyp@358 342 _humongous_type = NotHumongous;
ysr@345 343 _humongous_start_region = NULL;
ysr@345 344 _in_collection_set = false;
ysr@345 345 _is_gc_alloc_region = false;
ysr@345 346
ysr@345 347 // Age stuff (if parallel, this will be done separately, since it needs
ysr@345 348 // to be sequential).
ysr@345 349 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 350
ysr@345 351 set_young_index_in_cset(-1);
ysr@345 352 uninstall_surv_rate_group();
ysr@345 353 set_young_type(NotYoung);
ysr@345 354
ysr@345 355 // In case it had been the start of a humongous sequence, reset its end.
ysr@345 356 set_end(_orig_end);
ysr@345 357
ysr@345 358 if (!par) {
ysr@345 359 // If this is parallel, this will be done later.
ysr@345 360 HeapRegionRemSet* hrrs = rem_set();
ysr@345 361 if (hrrs != NULL) hrrs->clear();
tonyp@358 362 _claimed = InitialClaimValue;
ysr@345 363 }
ysr@345 364 zero_marked_bytes();
ysr@345 365 set_sort_index(-1);
ysr@345 366
ysr@345 367 _offsets.resize(HeapRegion::GrainWords);
ysr@345 368 init_top_at_mark_start();
tonyp@359 369 if (clear_space) clear(SpaceDecorator::Mangle);
ysr@345 370 }
ysr@345 371
ysr@345 372 // <PREDICTION>
ysr@345 373 void HeapRegion::calc_gc_efficiency() {
ysr@345 374 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 375 _gc_efficiency = (double) garbage_bytes() /
ysr@345 376 g1h->predict_region_elapsed_time_ms(this, false);
ysr@345 377 }
ysr@345 378 // </PREDICTION>
ysr@345 379
tonyp@1911 380 void HeapRegion::set_startsHumongous(HeapWord* new_end) {
tonyp@1911 381 assert(end() == _orig_end,
tonyp@1911 382 "Should be normal before the humongous object allocation");
tonyp@1911 383 assert(top() == bottom(), "should be empty");
tonyp@1911 384
tonyp@358 385 _humongous_type = StartsHumongous;
ysr@345 386 _humongous_start_region = this;
tonyp@1911 387
tonyp@1911 388 set_end(new_end);
tonyp@1911 389 _offsets.set_for_starts_humongous(new_end);
tonyp@1911 390 }
tonyp@1911 391
tonyp@1911 392 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
tonyp@1911 393 assert(end() == _orig_end,
tonyp@1911 394 "Should be normal before the humongous object allocation");
tonyp@1911 395 assert(top() == bottom(), "should be empty");
tonyp@1911 396 assert(start->startsHumongous(), "pre-condition");
tonyp@1911 397
tonyp@1911 398 _humongous_type = ContinuesHumongous;
tonyp@1911 399 _humongous_start_region = start;
ysr@345 400 }
ysr@345 401
ysr@345 402 bool HeapRegion::claimHeapRegion(jint claimValue) {
ysr@345 403 jint current = _claimed;
ysr@345 404 if (current != claimValue) {
ysr@345 405 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
ysr@345 406 if (res == current) {
ysr@345 407 return true;
ysr@345 408 }
ysr@345 409 }
ysr@345 410 return false;
ysr@345 411 }
ysr@345 412
ysr@345 413 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
ysr@345 414 HeapWord* low = addr;
ysr@345 415 HeapWord* high = end();
ysr@345 416 while (low < high) {
ysr@345 417 size_t diff = pointer_delta(high, low);
ysr@345 418 // Must add one below to bias toward the high amount. Otherwise, if
ysr@345 419 // "high" were at the desired value, and "low" were one less, we
ysr@345 420 // would not converge on "high". This is not symmetric, because
ysr@345 421 // we set "high" to a block start, which might be the right one,
ysr@345 422 // which we don't do for "low".
ysr@345 423 HeapWord* middle = low + (diff+1)/2;
ysr@345 424 if (middle == high) return high;
ysr@345 425 HeapWord* mid_bs = block_start_careful(middle);
ysr@345 426 if (mid_bs < addr) {
ysr@345 427 low = middle;
ysr@345 428 } else {
ysr@345 429 high = mid_bs;
ysr@345 430 }
ysr@345 431 }
ysr@345 432 assert(low == high && low >= addr, "Didn't work.");
ysr@345 433 return low;
ysr@345 434 }
ysr@345 435
ysr@345 436 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
ysr@345 437 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
ysr@345 438 _next_in_special_set = r;
ysr@345 439 }
ysr@345 440
ysr@345 441 void HeapRegion::set_on_unclean_list(bool b) {
ysr@345 442 _is_on_unclean_list = b;
ysr@345 443 }
ysr@345 444
tonyp@359 445 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
tonyp@359 446 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
ysr@345 447 hr_clear(false/*par*/, clear_space);
ysr@345 448 }
ysr@345 449 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@345 450 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@345 451 #endif // _MSC_VER
ysr@345 452
ysr@345 453
ysr@345 454 HeapRegion::
ysr@345 455 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@345 456 MemRegion mr, bool is_zeroed)
ysr@345 457 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
ysr@345 458 _next_fk(HeapRegionDCTOC::NoFilterKind),
ysr@345 459 _hrs_index(-1),
tonyp@358 460 _humongous_type(NotHumongous), _humongous_start_region(NULL),
ysr@345 461 _in_collection_set(false), _is_gc_alloc_region(false),
ysr@345 462 _is_on_free_list(false), _is_on_unclean_list(false),
ysr@345 463 _next_in_special_set(NULL), _orig_end(NULL),
tonyp@358 464 _claimed(InitialClaimValue), _evacuation_failed(false),
ysr@345 465 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
ysr@345 466 _young_type(NotYoung), _next_young_region(NULL),
apetrusenko@844 467 _next_dirty_cards_region(NULL),
ysr@345 468 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
johnc@1483 469 _rem_set(NULL), _zfs(NotZeroFilled),
johnc@1483 470 _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
johnc@1483 471 _predicted_bytes_to_copy(0)
ysr@345 472 {
ysr@345 473 _orig_end = mr.end();
ysr@345 474 // Note that initialize() will set the start of the unmarked area of the
ysr@345 475 // region.
tonyp@359 476 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
tonyp@359 477 set_top(bottom());
tonyp@359 478 set_saved_mark();
ysr@345 479
ysr@345 480 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
ysr@345 481
ysr@345 482 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
ysr@345 483 // In case the region is allocated during a pause, note the top.
ysr@345 484 // We haven't done any counting on a brand new region.
ysr@345 485 _top_at_conc_mark_count = bottom();
ysr@345 486 }
ysr@345 487
ysr@345 488 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
ysr@345 489 const HeapRegion* _target;
ysr@345 490 bool _target_seen;
ysr@345 491 HeapRegion* _last;
ysr@345 492 CompactibleSpace* _res;
ysr@345 493 public:
ysr@345 494 NextCompactionHeapRegionClosure(const HeapRegion* target) :
ysr@345 495 _target(target), _target_seen(false), _res(NULL) {}
ysr@345 496 bool doHeapRegion(HeapRegion* cur) {
ysr@345 497 if (_target_seen) {
ysr@345 498 if (!cur->isHumongous()) {
ysr@345 499 _res = cur;
ysr@345 500 return true;
ysr@345 501 }
ysr@345 502 } else if (cur == _target) {
ysr@345 503 _target_seen = true;
ysr@345 504 }
ysr@345 505 return false;
ysr@345 506 }
ysr@345 507 CompactibleSpace* result() { return _res; }
ysr@345 508 };
ysr@345 509
ysr@345 510 CompactibleSpace* HeapRegion::next_compaction_space() const {
ysr@345 511 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 512 // cast away const-ness
ysr@345 513 HeapRegion* r = (HeapRegion*) this;
ysr@345 514 NextCompactionHeapRegionClosure blk(r);
ysr@345 515 g1h->heap_region_iterate_from(r, &blk);
ysr@345 516 return blk.result();
ysr@345 517 }
ysr@345 518
ysr@345 519 void HeapRegion::save_marks() {
ysr@345 520 set_saved_mark();
ysr@345 521 }
ysr@345 522
ysr@345 523 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
ysr@345 524 HeapWord* p = mr.start();
ysr@345 525 HeapWord* e = mr.end();
ysr@345 526 oop obj;
ysr@345 527 while (p < e) {
ysr@345 528 obj = oop(p);
ysr@345 529 p += obj->oop_iterate(cl);
ysr@345 530 }
ysr@345 531 assert(p == e, "bad memregion: doesn't end on obj boundary");
ysr@345 532 }
ysr@345 533
ysr@345 534 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
ysr@345 535 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ysr@345 536 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
ysr@345 537 }
ysr@345 538 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
ysr@345 539
ysr@345 540
ysr@345 541 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
ysr@345 542 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
ysr@345 543 }
ysr@345 544
ysr@345 545 #ifdef DEBUG
ysr@345 546 HeapWord* HeapRegion::allocate(size_t size) {
ysr@345 547 jint state = zero_fill_state();
ysr@345 548 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
ysr@345 549 zero_fill_is_allocated(),
ysr@345 550 "When ZF is on, only alloc in ZF'd regions");
ysr@345 551 return G1OffsetTableContigSpace::allocate(size);
ysr@345 552 }
ysr@345 553 #endif
ysr@345 554
ysr@345 555 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
ysr@345 556 assert(ZF_mon->owned_by_self() ||
ysr@345 557 Universe::heap()->is_gc_active(),
ysr@345 558 "Must hold the lock or be a full GC to modify.");
apetrusenko@1556 559 #ifdef ASSERT
apetrusenko@1556 560 if (top() != bottom() && zfs != Allocated) {
apetrusenko@1556 561 ResourceMark rm;
apetrusenko@1556 562 stringStream region_str;
apetrusenko@1556 563 print_on(&region_str);
apetrusenko@1556 564 assert(top() == bottom() || zfs == Allocated,
apetrusenko@1556 565 err_msg("Region must be empty, or we must be setting it to allocated. "
apetrusenko@1556 566 "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
apetrusenko@1556 567 }
apetrusenko@1556 568 #endif
ysr@345 569 _zfs = zfs;
ysr@345 570 }
ysr@345 571
ysr@345 572 void HeapRegion::set_zero_fill_complete() {
ysr@345 573 set_zero_fill_state_work(ZeroFilled);
ysr@345 574 if (ZF_mon->owned_by_self()) {
ysr@345 575 ZF_mon->notify_all();
ysr@345 576 }
ysr@345 577 }
ysr@345 578
ysr@345 579
ysr@345 580 void HeapRegion::ensure_zero_filled() {
ysr@345 581 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ysr@345 582 ensure_zero_filled_locked();
ysr@345 583 }
ysr@345 584
ysr@345 585 void HeapRegion::ensure_zero_filled_locked() {
ysr@345 586 assert(ZF_mon->owned_by_self(), "Precondition");
ysr@345 587 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
ysr@345 588 assert(should_ignore_zf || Heap_lock->is_locked(),
ysr@345 589 "Either we're in a GC or we're allocating a region.");
ysr@345 590 switch (zero_fill_state()) {
ysr@345 591 case HeapRegion::NotZeroFilled:
ysr@345 592 set_zero_fill_in_progress(Thread::current());
ysr@345 593 {
ysr@345 594 ZF_mon->unlock();
ysr@345 595 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@345 596 ZF_mon->lock_without_safepoint_check();
ysr@345 597 }
ysr@345 598 // A trap.
ysr@345 599 guarantee(zero_fill_state() == HeapRegion::ZeroFilling
ysr@345 600 && zero_filler() == Thread::current(),
ysr@345 601 "AHA! Tell Dave D if you see this...");
ysr@345 602 set_zero_fill_complete();
ysr@345 603 // gclog_or_tty->print_cr("Did sync ZF.");
ysr@345 604 ConcurrentZFThread::note_sync_zfs();
ysr@345 605 break;
ysr@345 606 case HeapRegion::ZeroFilling:
ysr@345 607 if (should_ignore_zf) {
ysr@345 608 // We can "break" the lock and take over the work.
ysr@345 609 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@345 610 set_zero_fill_complete();
ysr@345 611 ConcurrentZFThread::note_sync_zfs();
ysr@345 612 break;
ysr@345 613 } else {
ysr@345 614 ConcurrentZFThread::wait_for_ZF_completed(this);
ysr@345 615 }
ysr@345 616 case HeapRegion::ZeroFilled:
ysr@345 617 // Nothing to do.
ysr@345 618 break;
ysr@345 619 case HeapRegion::Allocated:
ysr@345 620 guarantee(false, "Should not call on allocated regions.");
ysr@345 621 }
ysr@345 622 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
ysr@345 623 }
ysr@345 624
ysr@345 625 HeapWord*
ysr@345 626 HeapRegion::object_iterate_mem_careful(MemRegion mr,
ysr@345 627 ObjectClosure* cl) {
ysr@345 628 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 629 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 630 // to update the BOT while we do this...
ysr@345 631 HeapWord* cur = block_start(mr.start());
ysr@345 632 mr = mr.intersection(used_region());
ysr@345 633 if (mr.is_empty()) return NULL;
ysr@345 634 // Otherwise, find the obj that extends onto mr.start().
ysr@345 635
ysr@345 636 assert(cur <= mr.start()
ysr@896 637 && (oop(cur)->klass_or_null() == NULL ||
ysr@345 638 cur + oop(cur)->size() > mr.start()),
ysr@345 639 "postcondition of block_start");
ysr@345 640 oop obj;
ysr@345 641 while (cur < mr.end()) {
ysr@345 642 obj = oop(cur);
ysr@896 643 if (obj->klass_or_null() == NULL) {
ysr@345 644 // Ran into an unparseable point.
ysr@345 645 return cur;
ysr@345 646 } else if (!g1h->is_obj_dead(obj)) {
ysr@345 647 cl->do_object(obj);
ysr@345 648 }
ysr@345 649 if (cl->abort()) return cur;
ysr@345 650 // The check above must occur before the operation below, since an
ysr@345 651 // abort might invalidate the "size" operation.
ysr@345 652 cur += obj->size();
ysr@345 653 }
ysr@345 654 return NULL;
ysr@345 655 }
ysr@345 656
ysr@345 657 HeapWord*
ysr@345 658 HeapRegion::
ysr@345 659 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@1685 660 FilterOutOfRegionClosure* cl,
johnc@1685 661 bool filter_young) {
ysr@345 662 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 663
ysr@345 664 // If we're within a stop-world GC, then we might look at a card in a
ysr@345 665 // GC alloc region that extends onto a GC LAB, which may not be
ysr@345 666 // parseable. Stop such at the "saved_mark" of the region.
ysr@345 667 if (G1CollectedHeap::heap()->is_gc_active()) {
ysr@345 668 mr = mr.intersection(used_region_at_save_marks());
ysr@345 669 } else {
ysr@345 670 mr = mr.intersection(used_region());
ysr@345 671 }
ysr@345 672 if (mr.is_empty()) return NULL;
ysr@345 673 // Otherwise, find the obj that extends onto mr.start().
ysr@345 674
johnc@1685 675 // The intersection of the incoming mr (for the card) and the
johnc@1685 676 // allocated part of the region is non-empty. This implies that
johnc@1685 677 // we have actually allocated into this region. The code in
johnc@1685 678 // G1CollectedHeap.cpp that allocates a new region sets the
johnc@1685 679 // is_young tag on the region before allocating. Thus we
johnc@1685 680 // safely know if this region is young.
johnc@1685 681 if (is_young() && filter_young) {
johnc@1685 682 return NULL;
johnc@1685 683 }
johnc@1685 684
johnc@1727 685 assert(!is_young(), "check value of filter_young");
johnc@1727 686
ysr@345 687 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 688 // to update the BOT while we do this...
ysr@345 689 HeapWord* cur = block_start(mr.start());
ysr@345 690 assert(cur <= mr.start(), "Postcondition");
ysr@345 691
ysr@345 692 while (cur <= mr.start()) {
ysr@896 693 if (oop(cur)->klass_or_null() == NULL) {
ysr@345 694 // Ran into an unparseable point.
ysr@345 695 return cur;
ysr@345 696 }
ysr@345 697 // Otherwise...
ysr@345 698 int sz = oop(cur)->size();
ysr@345 699 if (cur + sz > mr.start()) break;
ysr@345 700 // Otherwise, go on.
ysr@345 701 cur = cur + sz;
ysr@345 702 }
ysr@345 703 oop obj;
ysr@345 704 obj = oop(cur);
ysr@345 705 // If we finish this loop...
ysr@345 706 assert(cur <= mr.start()
ysr@896 707 && obj->klass_or_null() != NULL
ysr@345 708 && cur + obj->size() > mr.start(),
ysr@345 709 "Loop postcondition");
ysr@345 710 if (!g1h->is_obj_dead(obj)) {
ysr@345 711 obj->oop_iterate(cl, mr);
ysr@345 712 }
ysr@345 713
ysr@345 714 HeapWord* next;
ysr@345 715 while (cur < mr.end()) {
ysr@345 716 obj = oop(cur);
ysr@896 717 if (obj->klass_or_null() == NULL) {
ysr@345 718 // Ran into an unparseable point.
ysr@345 719 return cur;
ysr@345 720 };
ysr@345 721 // Otherwise:
ysr@345 722 next = (cur + obj->size());
ysr@345 723 if (!g1h->is_obj_dead(obj)) {
ysr@345 724 if (next < mr.end()) {
ysr@345 725 obj->oop_iterate(cl);
ysr@345 726 } else {
ysr@345 727 // this obj spans the boundary. If it's an array, stop at the
ysr@345 728 // boundary.
ysr@345 729 if (obj->is_objArray()) {
ysr@345 730 obj->oop_iterate(cl, mr);
ysr@345 731 } else {
ysr@345 732 obj->oop_iterate(cl);
ysr@345 733 }
ysr@345 734 }
ysr@345 735 }
ysr@345 736 cur = next;
ysr@345 737 }
ysr@345 738 return NULL;
ysr@345 739 }
ysr@345 740
ysr@345 741 void HeapRegion::print() const { print_on(gclog_or_tty); }
ysr@345 742 void HeapRegion::print_on(outputStream* st) const {
ysr@345 743 if (isHumongous()) {
ysr@345 744 if (startsHumongous())
ysr@345 745 st->print(" HS");
ysr@345 746 else
ysr@345 747 st->print(" HC");
ysr@345 748 } else {
ysr@345 749 st->print(" ");
ysr@345 750 }
ysr@345 751 if (in_collection_set())
ysr@345 752 st->print(" CS");
ysr@345 753 else if (is_gc_alloc_region())
ysr@345 754 st->print(" A ");
ysr@345 755 else
ysr@345 756 st->print(" ");
ysr@345 757 if (is_young())
johnc@1483 758 st->print(is_survivor() ? " SU" : " Y ");
ysr@345 759 else
ysr@345 760 st->print(" ");
ysr@345 761 if (is_empty())
ysr@345 762 st->print(" F");
ysr@345 763 else
ysr@345 764 st->print(" ");
tonyp@1079 765 st->print(" %5d", _gc_time_stamp);
tonyp@1477 766 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
tonyp@1477 767 prev_top_at_mark_start(), next_top_at_mark_start());
ysr@345 768 G1OffsetTableContigSpace::print_on(st);
ysr@345 769 }
ysr@345 770
tonyp@860 771 void HeapRegion::verify(bool allow_dirty) const {
tonyp@1079 772 bool dummy = false;
tonyp@1079 773 verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
tonyp@860 774 }
tonyp@860 775
ysr@345 776 #define OBJ_SAMPLE_INTERVAL 0
ysr@345 777 #define BLOCK_SAMPLE_INTERVAL 100
ysr@345 778
ysr@345 779 // This really ought to be commoned up into OffsetTableContigSpace somehow.
ysr@345 780 // We would need a mechanism to make that code skip dead objects.
ysr@345 781
tonyp@1079 782 void HeapRegion::verify(bool allow_dirty,
tonyp@1079 783 bool use_prev_marking,
tonyp@1079 784 bool* failures) const {
ysr@345 785 G1CollectedHeap* g1 = G1CollectedHeap::heap();
tonyp@1079 786 *failures = false;
ysr@345 787 HeapWord* p = bottom();
ysr@345 788 HeapWord* prev_p = NULL;
ysr@345 789 int objs = 0;
ysr@345 790 int blocks = 0;
tonyp@860 791 VerifyLiveClosure vl_cl(g1, use_prev_marking);
tonyp@1740 792 bool is_humongous = isHumongous();
tonyp@1740 793 size_t object_num = 0;
ysr@345 794 while (p < top()) {
ysr@345 795 size_t size = oop(p)->size();
tonyp@1740 796 if (is_humongous != g1->isHumongous(size)) {
tonyp@1740 797 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
tonyp@1740 798 SIZE_FORMAT" words) in a %shumongous region",
tonyp@1740 799 p, g1->isHumongous(size) ? "" : "non-",
tonyp@1740 800 size, is_humongous ? "" : "non-");
tonyp@1740 801 *failures = true;
tonyp@1740 802 }
tonyp@1740 803 object_num += 1;
ysr@345 804 if (blocks == BLOCK_SAMPLE_INTERVAL) {
tonyp@1079 805 HeapWord* res = block_start_const(p + (size/2));
tonyp@1079 806 if (p != res) {
tonyp@1079 807 gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
tonyp@1079 808 SIZE_FORMAT" returned "PTR_FORMAT,
tonyp@1079 809 p, size, res);
tonyp@1079 810 *failures = true;
tonyp@1079 811 return;
tonyp@1079 812 }
ysr@345 813 blocks = 0;
ysr@345 814 } else {
ysr@345 815 blocks++;
ysr@345 816 }
ysr@345 817 if (objs == OBJ_SAMPLE_INTERVAL) {
ysr@345 818 oop obj = oop(p);
tonyp@860 819 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
tonyp@1079 820 if (obj->is_oop()) {
tonyp@1079 821 klassOop klass = obj->klass();
tonyp@1079 822 if (!klass->is_perm()) {
tonyp@1079 823 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@1079 824 "not in perm", klass, obj);
tonyp@1079 825 *failures = true;
tonyp@1079 826 return;
tonyp@1079 827 } else if (!klass->is_klass()) {
tonyp@1079 828 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@1079 829 "not a klass", klass, obj);
tonyp@1079 830 *failures = true;
tonyp@1079 831 return;
tonyp@1079 832 } else {
tonyp@1079 833 vl_cl.set_containing_obj(obj);
tonyp@1079 834 obj->oop_iterate(&vl_cl);
tonyp@1079 835 if (vl_cl.failures()) {
tonyp@1079 836 *failures = true;
tonyp@1079 837 }
tonyp@1079 838 if (G1MaxVerifyFailures >= 0 &&
tonyp@1079 839 vl_cl.n_failures() >= G1MaxVerifyFailures) {
tonyp@1079 840 return;
tonyp@1079 841 }
tonyp@1079 842 }
tonyp@1079 843 } else {
tonyp@1079 844 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
tonyp@1079 845 *failures = true;
tonyp@1079 846 return;
tonyp@1079 847 }
ysr@345 848 }
ysr@345 849 objs = 0;
ysr@345 850 } else {
ysr@345 851 objs++;
ysr@345 852 }
ysr@345 853 prev_p = p;
ysr@345 854 p += size;
ysr@345 855 }
ysr@345 856 HeapWord* rend = end();
ysr@345 857 HeapWord* rtop = top();
ysr@345 858 if (rtop < rend) {
tonyp@1079 859 HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
tonyp@1079 860 if (res != rtop) {
tonyp@1079 861 gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
tonyp@1079 862 PTR_FORMAT" returned "PTR_FORMAT,
tonyp@1079 863 rtop, rend, res);
tonyp@1079 864 *failures = true;
tonyp@1079 865 return;
tonyp@1079 866 }
ysr@345 867 }
tonyp@1079 868
tonyp@1740 869 if (is_humongous && object_num > 1) {
tonyp@1740 870 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
tonyp@1740 871 "but has "SIZE_FORMAT", objects",
tonyp@1740 872 bottom(), end(), object_num);
tonyp@1740 873 *failures = true;
tonyp@1740 874 }
tonyp@1740 875
tonyp@1079 876 if (p != top()) {
tonyp@1079 877 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
tonyp@1079 878 "does not match top "PTR_FORMAT, p, top());
tonyp@1079 879 *failures = true;
tonyp@1079 880 return;
ysr@345 881 }
ysr@345 882 }
ysr@345 883
ysr@345 884 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
ysr@345 885 // away eventually.
ysr@345 886
tonyp@359 887 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
ysr@345 888 // false ==> we'll do the clearing if there's clearing to be done.
tonyp@359 889 ContiguousSpace::initialize(mr, false, mangle_space);
ysr@345 890 _offsets.zero_bottom_entry();
ysr@345 891 _offsets.initialize_threshold();
tonyp@359 892 if (clear_space) clear(mangle_space);
ysr@345 893 }
ysr@345 894
tonyp@359 895 void G1OffsetTableContigSpace::clear(bool mangle_space) {
tonyp@359 896 ContiguousSpace::clear(mangle_space);
ysr@345 897 _offsets.zero_bottom_entry();
ysr@345 898 _offsets.initialize_threshold();
ysr@345 899 }
ysr@345 900
ysr@345 901 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
ysr@345 902 Space::set_bottom(new_bottom);
ysr@345 903 _offsets.set_bottom(new_bottom);
ysr@345 904 }
ysr@345 905
ysr@345 906 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
ysr@345 907 Space::set_end(new_end);
ysr@345 908 _offsets.resize(new_end - bottom());
ysr@345 909 }
ysr@345 910
ysr@345 911 void G1OffsetTableContigSpace::print() const {
ysr@345 912 print_short();
ysr@345 913 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
ysr@345 914 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
ysr@345 915 bottom(), top(), _offsets.threshold(), end());
ysr@345 916 }
ysr@345 917
ysr@345 918 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
ysr@345 919 return _offsets.initialize_threshold();
ysr@345 920 }
ysr@345 921
ysr@345 922 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
ysr@345 923 HeapWord* end) {
ysr@345 924 _offsets.alloc_block(start, end);
ysr@345 925 return _offsets.threshold();
ysr@345 926 }
ysr@345 927
ysr@345 928 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
ysr@345 929 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 930 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
ysr@345 931 if (_gc_time_stamp < g1h->get_gc_time_stamp())
ysr@345 932 return top();
ysr@345 933 else
ysr@345 934 return ContiguousSpace::saved_mark_word();
ysr@345 935 }
ysr@345 936
ysr@345 937 void G1OffsetTableContigSpace::set_saved_mark() {
ysr@345 938 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 939 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
ysr@345 940
ysr@345 941 if (_gc_time_stamp < curr_gc_time_stamp) {
ysr@345 942 // The order of these is important, as another thread might be
ysr@345 943 // about to start scanning this region. If it does so after
ysr@345 944 // set_saved_mark and before _gc_time_stamp = ..., then the latter
ysr@345 945 // will be false, and it will pick up top() as the high water mark
ysr@345 946 // of region. If it does so after _gc_time_stamp = ..., then it
ysr@345 947 // will pick up the right saved_mark_word() as the high water mark
ysr@345 948 // of the region. Either way, the behaviour will be correct.
ysr@345 949 ContiguousSpace::set_saved_mark();
ysr@896 950 OrderAccess::storestore();
iveresov@356 951 _gc_time_stamp = curr_gc_time_stamp;
ysr@896 952 // The following fence is to force a flush of the writes above, but
ysr@896 953 // is strictly not needed because when an allocating worker thread
ysr@896 954 // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
ysr@896 955 // when the lock is released, the write will be flushed.
ysr@896 956 // OrderAccess::fence();
ysr@345 957 }
ysr@345 958 }
ysr@345 959
ysr@345 960 G1OffsetTableContigSpace::
ysr@345 961 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@345 962 MemRegion mr, bool is_zeroed) :
ysr@345 963 _offsets(sharedOffsetArray, mr),
ysr@345 964 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
ysr@345 965 _gc_time_stamp(0)
ysr@345 966 {
ysr@345 967 _offsets.set_space(this);
tonyp@359 968 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
ysr@345 969 }
ysr@345 970
ysr@345 971 size_t RegionList::length() {
ysr@345 972 size_t len = 0;
ysr@345 973 HeapRegion* cur = hd();
ysr@345 974 DEBUG_ONLY(HeapRegion* last = NULL);
ysr@345 975 while (cur != NULL) {
ysr@345 976 len++;
ysr@345 977 DEBUG_ONLY(last = cur);
ysr@345 978 cur = get_next(cur);
ysr@345 979 }
ysr@345 980 assert(last == tl(), "Invariant");
ysr@345 981 return len;
ysr@345 982 }
ysr@345 983
ysr@345 984 void RegionList::insert_before_head(HeapRegion* r) {
ysr@345 985 assert(well_formed(), "Inv");
ysr@345 986 set_next(r, hd());
ysr@345 987 _hd = r;
ysr@345 988 _sz++;
ysr@345 989 if (tl() == NULL) _tl = r;
ysr@345 990 assert(well_formed(), "Inv");
ysr@345 991 }
ysr@345 992
ysr@345 993 void RegionList::prepend_list(RegionList* new_list) {
ysr@345 994 assert(well_formed(), "Precondition");
ysr@345 995 assert(new_list->well_formed(), "Precondition");
ysr@345 996 HeapRegion* new_tl = new_list->tl();
ysr@345 997 if (new_tl != NULL) {
ysr@345 998 set_next(new_tl, hd());
ysr@345 999 _hd = new_list->hd();
ysr@345 1000 _sz += new_list->sz();
ysr@345 1001 if (tl() == NULL) _tl = new_list->tl();
ysr@345 1002 } else {
ysr@345 1003 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
ysr@345 1004 }
ysr@345 1005 assert(well_formed(), "Inv");
ysr@345 1006 }
ysr@345 1007
ysr@345 1008 void RegionList::delete_after(HeapRegion* r) {
ysr@345 1009 assert(well_formed(), "Precondition");
ysr@345 1010 HeapRegion* next = get_next(r);
ysr@345 1011 assert(r != NULL, "Precondition");
ysr@345 1012 HeapRegion* next_tl = get_next(next);
ysr@345 1013 set_next(r, next_tl);
ysr@345 1014 dec_sz();
ysr@345 1015 if (next == tl()) {
ysr@345 1016 assert(next_tl == NULL, "Inv");
ysr@345 1017 _tl = r;
ysr@345 1018 }
ysr@345 1019 assert(well_formed(), "Inv");
ysr@345 1020 }
ysr@345 1021
ysr@345 1022 HeapRegion* RegionList::pop() {
ysr@345 1023 assert(well_formed(), "Inv");
ysr@345 1024 HeapRegion* res = hd();
ysr@345 1025 if (res != NULL) {
ysr@345 1026 _hd = get_next(res);
ysr@345 1027 _sz--;
ysr@345 1028 set_next(res, NULL);
ysr@345 1029 if (sz() == 0) _tl = NULL;
ysr@345 1030 }
ysr@345 1031 assert(well_formed(), "Inv");
ysr@345 1032 return res;
ysr@345 1033 }