annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 1727:2d160770d2e5

6814437: G1: remove the _new_refs array Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure. Reviewed-by: iveresov, jmasa, tonyp
author johnc
date Mon, 02 Aug 2010 12:51:43 -0700
parents 5cbac8938c4c
children bb847e31b836
rev   line source
ysr@345 1 /*
trims@1563 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@345 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@345 4 *
ysr@345 5 * This code is free software; you can redistribute it and/or modify it
ysr@345 6 * under the terms of the GNU General Public License version 2 only, as
ysr@345 7 * published by the Free Software Foundation.
ysr@345 8 *
ysr@345 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@345 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@345 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@345 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@345 13 * accompanied this code).
ysr@345 14 *
ysr@345 15 * You should have received a copy of the GNU General Public License version
ysr@345 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@345 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@345 18 *
trims@1563 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1563 20 * or visit www.oracle.com if you need additional information or have any
trims@1563 21 * questions.
ysr@345 22 *
ysr@345 23 */
ysr@345 24
ysr@345 25 #include "incls/_precompiled.incl"
ysr@345 26 #include "incls/_heapRegion.cpp.incl"
ysr@345 27
tonyp@996 28 int HeapRegion::LogOfHRGrainBytes = 0;
tonyp@996 29 int HeapRegion::LogOfHRGrainWords = 0;
tonyp@996 30 int HeapRegion::GrainBytes = 0;
tonyp@996 31 int HeapRegion::GrainWords = 0;
tonyp@996 32 int HeapRegion::CardsPerRegion = 0;
tonyp@996 33
ysr@345 34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@345 35 HeapRegion* hr, OopClosure* cl,
ysr@345 36 CardTableModRefBS::PrecisionStyle precision,
ysr@345 37 FilterKind fk) :
ysr@345 38 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
ysr@345 39 _hr(hr), _fk(fk), _g1(g1)
ysr@345 40 {}
ysr@345 41
ysr@345 42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
ysr@345 43 OopClosure* oc) :
ysr@345 44 _r_bottom(r->bottom()), _r_end(r->end()),
ysr@345 45 _oc(oc), _out_of_region(0)
ysr@345 46 {}
ysr@345 47
ysr@345 48 class VerifyLiveClosure: public OopClosure {
tonyp@860 49 private:
ysr@345 50 G1CollectedHeap* _g1h;
ysr@345 51 CardTableModRefBS* _bs;
ysr@345 52 oop _containing_obj;
ysr@345 53 bool _failures;
ysr@345 54 int _n_failures;
tonyp@860 55 bool _use_prev_marking;
ysr@345 56 public:
tonyp@860 57 // use_prev_marking == true -> use "prev" marking information,
tonyp@860 58 // use_prev_marking == false -> use "next" marking information
tonyp@860 59 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
ysr@345 60 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
tonyp@860 61 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
ysr@345 62 {
ysr@345 63 BarrierSet* bs = _g1h->barrier_set();
ysr@345 64 if (bs->is_a(BarrierSet::CardTableModRef))
ysr@345 65 _bs = (CardTableModRefBS*)bs;
ysr@345 66 }
ysr@345 67
ysr@345 68 void set_containing_obj(oop obj) {
ysr@345 69 _containing_obj = obj;
ysr@345 70 }
ysr@345 71
ysr@345 72 bool failures() { return _failures; }
ysr@345 73 int n_failures() { return _n_failures; }
ysr@345 74
ysr@896 75 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@896 76 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@345 77
tonyp@1477 78 void print_object(outputStream* out, oop obj) {
tonyp@1477 79 #ifdef PRODUCT
tonyp@1477 80 klassOop k = obj->klass();
tonyp@1477 81 const char* class_name = instanceKlass::cast(k)->external_name();
tonyp@1477 82 out->print_cr("class name %s", class_name);
tonyp@1477 83 #else // PRODUCT
tonyp@1477 84 obj->print_on(out);
tonyp@1477 85 #endif // PRODUCT
tonyp@1477 86 }
tonyp@1477 87
ysr@896 88 template <class T> void do_oop_work(T* p) {
ysr@345 89 assert(_containing_obj != NULL, "Precondition");
tonyp@860 90 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
tonyp@860 91 "Precondition");
ysr@896 92 T heap_oop = oopDesc::load_heap_oop(p);
ysr@896 93 if (!oopDesc::is_null(heap_oop)) {
ysr@896 94 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@345 95 bool failed = false;
tonyp@860 96 if (!_g1h->is_in_closed_subset(obj) ||
tonyp@860 97 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
ysr@345 98 if (!_failures) {
ysr@345 99 gclog_or_tty->print_cr("");
ysr@345 100 gclog_or_tty->print_cr("----------");
ysr@345 101 }
ysr@345 102 if (!_g1h->is_in_closed_subset(obj)) {
tonyp@1477 103 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@345 104 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 105 " of live obj "PTR_FORMAT" in region "
tonyp@1477 106 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 107 p, (void*) _containing_obj,
tonyp@1477 108 from->bottom(), from->end());
tonyp@1477 109 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 110 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
tonyp@1477 111 (void*) obj);
ysr@345 112 } else {
tonyp@1477 113 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
tonyp@1477 114 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
ysr@345 115 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 116 " of live obj "PTR_FORMAT" in region "
tonyp@1477 117 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 118 p, (void*) _containing_obj,
tonyp@1477 119 from->bottom(), from->end());
tonyp@1477 120 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 121 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
tonyp@1477 122 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 123 (void*) obj, to->bottom(), to->end());
tonyp@1477 124 print_object(gclog_or_tty, obj);
ysr@345 125 }
ysr@345 126 gclog_or_tty->print_cr("----------");
ysr@345 127 _failures = true;
ysr@345 128 failed = true;
ysr@345 129 _n_failures++;
ysr@345 130 }
ysr@345 131
ysr@345 132 if (!_g1h->full_collection()) {
ysr@896 133 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@896 134 HeapRegion* to = _g1h->heap_region_containing(obj);
ysr@345 135 if (from != NULL && to != NULL &&
ysr@345 136 from != to &&
ysr@345 137 !to->isHumongous()) {
ysr@345 138 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
ysr@345 139 jbyte cv_field = *_bs->byte_for_const(p);
ysr@345 140 const jbyte dirty = CardTableModRefBS::dirty_card_val();
ysr@345 141
ysr@345 142 bool is_bad = !(from->is_young()
ysr@345 143 || to->rem_set()->contains_reference(p)
ysr@345 144 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
ysr@345 145 (_containing_obj->is_objArray() ?
ysr@345 146 cv_field == dirty
ysr@345 147 : cv_obj == dirty || cv_field == dirty));
ysr@345 148 if (is_bad) {
ysr@345 149 if (!_failures) {
ysr@345 150 gclog_or_tty->print_cr("");
ysr@345 151 gclog_or_tty->print_cr("----------");
ysr@345 152 }
ysr@345 153 gclog_or_tty->print_cr("Missing rem set entry:");
ysr@345 154 gclog_or_tty->print_cr("Field "PTR_FORMAT
ysr@345 155 " of obj "PTR_FORMAT
ysr@345 156 ", in region %d ["PTR_FORMAT
ysr@345 157 ", "PTR_FORMAT"),",
ysr@345 158 p, (void*) _containing_obj,
ysr@345 159 from->hrs_index(),
ysr@345 160 from->bottom(),
ysr@345 161 from->end());
ysr@345 162 _containing_obj->print_on(gclog_or_tty);
ysr@345 163 gclog_or_tty->print_cr("points to obj "PTR_FORMAT
ysr@345 164 " in region %d ["PTR_FORMAT
ysr@345 165 ", "PTR_FORMAT").",
ysr@345 166 (void*) obj, to->hrs_index(),
ysr@345 167 to->bottom(), to->end());
ysr@345 168 obj->print_on(gclog_or_tty);
ysr@345 169 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
ysr@345 170 cv_obj, cv_field);
ysr@345 171 gclog_or_tty->print_cr("----------");
ysr@345 172 _failures = true;
ysr@345 173 if (!failed) _n_failures++;
ysr@345 174 }
ysr@345 175 }
ysr@345 176 }
ysr@345 177 }
ysr@345 178 }
ysr@345 179 };
ysr@345 180
ysr@345 181 template<class ClosureType>
ysr@345 182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
ysr@345 183 HeapRegion* hr,
ysr@345 184 HeapWord* cur, HeapWord* top) {
ysr@345 185 oop cur_oop = oop(cur);
ysr@345 186 int oop_size = cur_oop->size();
ysr@345 187 HeapWord* next_obj = cur + oop_size;
ysr@345 188 while (next_obj < top) {
ysr@345 189 // Keep filtering the remembered set.
ysr@345 190 if (!g1h->is_obj_dead(cur_oop, hr)) {
ysr@345 191 // Bottom lies entirely below top, so we can call the
ysr@345 192 // non-memRegion version of oop_iterate below.
ysr@345 193 cur_oop->oop_iterate(cl);
ysr@345 194 }
ysr@345 195 cur = next_obj;
ysr@345 196 cur_oop = oop(cur);
ysr@345 197 oop_size = cur_oop->size();
ysr@345 198 next_obj = cur + oop_size;
ysr@345 199 }
ysr@345 200 return cur;
ysr@345 201 }
ysr@345 202
ysr@345 203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
ysr@345 204 HeapWord* bottom,
ysr@345 205 HeapWord* top,
ysr@345 206 OopClosure* cl) {
ysr@345 207 G1CollectedHeap* g1h = _g1;
ysr@345 208
ysr@345 209 int oop_size;
ysr@345 210
ysr@345 211 OopClosure* cl2 = cl;
ysr@345 212 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
ysr@345 213 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
ysr@345 214 switch (_fk) {
ysr@345 215 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
ysr@345 216 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
ysr@345 217 }
ysr@345 218
ysr@345 219 // Start filtering what we add to the remembered set. If the object is
ysr@345 220 // not considered dead, either because it is marked (in the mark bitmap)
ysr@345 221 // or it was allocated after marking finished, then we add it. Otherwise
ysr@345 222 // we can safely ignore the object.
ysr@345 223 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 224 oop_size = oop(bottom)->oop_iterate(cl2, mr);
ysr@345 225 } else {
ysr@345 226 oop_size = oop(bottom)->size();
ysr@345 227 }
ysr@345 228
ysr@345 229 bottom += oop_size;
ysr@345 230
ysr@345 231 if (bottom < top) {
ysr@345 232 // We replicate the loop below for several kinds of possible filters.
ysr@345 233 switch (_fk) {
ysr@345 234 case NoFilterKind:
ysr@345 235 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
ysr@345 236 break;
ysr@345 237 case IntoCSFilterKind: {
ysr@345 238 FilterIntoCSClosure filt(this, g1h, cl);
ysr@345 239 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 240 break;
ysr@345 241 }
ysr@345 242 case OutOfRegionFilterKind: {
ysr@345 243 FilterOutOfRegionClosure filt(_hr, cl);
ysr@345 244 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 245 break;
ysr@345 246 }
ysr@345 247 default:
ysr@345 248 ShouldNotReachHere();
ysr@345 249 }
ysr@345 250
ysr@345 251 // Last object. Need to do dead-obj filtering here too.
ysr@345 252 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 253 oop(bottom)->oop_iterate(cl2, mr);
ysr@345 254 }
ysr@345 255 }
ysr@345 256 }
ysr@345 257
tonyp@996 258 // Minimum region size; we won't go lower than that.
tonyp@996 259 // We might want to decrease this in the future, to deal with small
tonyp@996 260 // heaps a bit more efficiently.
tonyp@996 261 #define MIN_REGION_SIZE ( 1024 * 1024 )
tonyp@996 262
tonyp@996 263 // Maximum region size; we don't go higher than that. There's a good
tonyp@996 264 // reason for having an upper bound. We don't want regions to get too
tonyp@996 265 // large, otherwise cleanup's effectiveness would decrease as there
tonyp@996 266 // will be fewer opportunities to find totally empty regions after
tonyp@996 267 // marking.
tonyp@996 268 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
tonyp@996 269
tonyp@996 270 // The automatic region size calculation will try to have around this
tonyp@996 271 // many regions in the heap (based on the min heap size).
tonyp@996 272 #define TARGET_REGION_NUMBER 2048
tonyp@996 273
tonyp@996 274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
tonyp@996 275 // region_size in bytes
tonyp@996 276 uintx region_size = G1HeapRegionSize;
tonyp@996 277 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
tonyp@996 278 // We base the automatic calculation on the min heap size. This
tonyp@996 279 // can be problematic if the spread between min and max is quite
tonyp@996 280 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
tonyp@996 281 // the max size, the region size might be way too large for the
tonyp@996 282 // min size. Either way, some users might have to set the region
tonyp@996 283 // size manually for some -Xms / -Xmx combos.
tonyp@996 284
tonyp@996 285 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
tonyp@996 286 (uintx) MIN_REGION_SIZE);
tonyp@996 287 }
tonyp@996 288
tonyp@996 289 int region_size_log = log2_long((jlong) region_size);
tonyp@996 290 // Recalculate the region size to make sure it's a power of
tonyp@996 291 // 2. This means that region_size is the largest power of 2 that's
tonyp@996 292 // <= what we've calculated so far.
prr@1496 293 region_size = ((uintx)1 << region_size_log);
tonyp@996 294
tonyp@996 295 // Now make sure that we don't go over or under our limits.
tonyp@996 296 if (region_size < MIN_REGION_SIZE) {
tonyp@996 297 region_size = MIN_REGION_SIZE;
tonyp@996 298 } else if (region_size > MAX_REGION_SIZE) {
tonyp@996 299 region_size = MAX_REGION_SIZE;
tonyp@996 300 }
tonyp@996 301
tonyp@996 302 // And recalculate the log.
tonyp@996 303 region_size_log = log2_long((jlong) region_size);
tonyp@996 304
tonyp@996 305 // Now, set up the globals.
tonyp@996 306 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
tonyp@996 307 LogOfHRGrainBytes = region_size_log;
tonyp@996 308
tonyp@996 309 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
tonyp@996 310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
tonyp@996 311
tonyp@996 312 guarantee(GrainBytes == 0, "we should only set it once");
tonyp@996 313 // The cast to int is safe, given that we've bounded region_size by
tonyp@996 314 // MIN_REGION_SIZE and MAX_REGION_SIZE.
tonyp@996 315 GrainBytes = (int) region_size;
tonyp@996 316
tonyp@996 317 guarantee(GrainWords == 0, "we should only set it once");
tonyp@996 318 GrainWords = GrainBytes >> LogHeapWordSize;
tonyp@996 319 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
tonyp@996 320
tonyp@996 321 guarantee(CardsPerRegion == 0, "we should only set it once");
tonyp@996 322 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
tonyp@996 323 }
tonyp@996 324
ysr@345 325 void HeapRegion::reset_after_compaction() {
ysr@345 326 G1OffsetTableContigSpace::reset_after_compaction();
ysr@345 327 // After a compaction the mark bitmap is invalid, so we must
ysr@345 328 // treat all objects as being inside the unmarked area.
ysr@345 329 zero_marked_bytes();
ysr@345 330 init_top_at_mark_start();
ysr@345 331 }
ysr@345 332
ysr@345 333 DirtyCardToOopClosure*
ysr@345 334 HeapRegion::new_dcto_closure(OopClosure* cl,
ysr@345 335 CardTableModRefBS::PrecisionStyle precision,
ysr@345 336 HeapRegionDCTOC::FilterKind fk) {
ysr@345 337 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
ysr@345 338 this, cl, precision, fk);
ysr@345 339 }
ysr@345 340
ysr@345 341 void HeapRegion::hr_clear(bool par, bool clear_space) {
tonyp@358 342 _humongous_type = NotHumongous;
ysr@345 343 _humongous_start_region = NULL;
ysr@345 344 _in_collection_set = false;
ysr@345 345 _is_gc_alloc_region = false;
ysr@345 346
ysr@345 347 // Age stuff (if parallel, this will be done separately, since it needs
ysr@345 348 // to be sequential).
ysr@345 349 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 350
ysr@345 351 set_young_index_in_cset(-1);
ysr@345 352 uninstall_surv_rate_group();
ysr@345 353 set_young_type(NotYoung);
ysr@345 354
ysr@345 355 // In case it had been the start of a humongous sequence, reset its end.
ysr@345 356 set_end(_orig_end);
ysr@345 357
ysr@345 358 if (!par) {
ysr@345 359 // If this is parallel, this will be done later.
ysr@345 360 HeapRegionRemSet* hrrs = rem_set();
ysr@345 361 if (hrrs != NULL) hrrs->clear();
tonyp@358 362 _claimed = InitialClaimValue;
ysr@345 363 }
ysr@345 364 zero_marked_bytes();
ysr@345 365 set_sort_index(-1);
ysr@345 366
ysr@345 367 _offsets.resize(HeapRegion::GrainWords);
ysr@345 368 init_top_at_mark_start();
tonyp@359 369 if (clear_space) clear(SpaceDecorator::Mangle);
ysr@345 370 }
ysr@345 371
ysr@345 372 // <PREDICTION>
ysr@345 373 void HeapRegion::calc_gc_efficiency() {
ysr@345 374 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 375 _gc_efficiency = (double) garbage_bytes() /
ysr@345 376 g1h->predict_region_elapsed_time_ms(this, false);
ysr@345 377 }
ysr@345 378 // </PREDICTION>
ysr@345 379
ysr@345 380 void HeapRegion::set_startsHumongous() {
tonyp@358 381 _humongous_type = StartsHumongous;
ysr@345 382 _humongous_start_region = this;
ysr@345 383 assert(end() == _orig_end, "Should be normal before alloc.");
ysr@345 384 }
ysr@345 385
ysr@345 386 bool HeapRegion::claimHeapRegion(jint claimValue) {
ysr@345 387 jint current = _claimed;
ysr@345 388 if (current != claimValue) {
ysr@345 389 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
ysr@345 390 if (res == current) {
ysr@345 391 return true;
ysr@345 392 }
ysr@345 393 }
ysr@345 394 return false;
ysr@345 395 }
ysr@345 396
ysr@345 397 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
ysr@345 398 HeapWord* low = addr;
ysr@345 399 HeapWord* high = end();
ysr@345 400 while (low < high) {
ysr@345 401 size_t diff = pointer_delta(high, low);
ysr@345 402 // Must add one below to bias toward the high amount. Otherwise, if
ysr@345 403 // "high" were at the desired value, and "low" were one less, we
ysr@345 404 // would not converge on "high". This is not symmetric, because
ysr@345 405 // we set "high" to a block start, which might be the right one,
ysr@345 406 // which we don't do for "low".
ysr@345 407 HeapWord* middle = low + (diff+1)/2;
ysr@345 408 if (middle == high) return high;
ysr@345 409 HeapWord* mid_bs = block_start_careful(middle);
ysr@345 410 if (mid_bs < addr) {
ysr@345 411 low = middle;
ysr@345 412 } else {
ysr@345 413 high = mid_bs;
ysr@345 414 }
ysr@345 415 }
ysr@345 416 assert(low == high && low >= addr, "Didn't work.");
ysr@345 417 return low;
ysr@345 418 }
ysr@345 419
ysr@345 420 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
ysr@345 421 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
ysr@345 422 _next_in_special_set = r;
ysr@345 423 }
ysr@345 424
ysr@345 425 void HeapRegion::set_on_unclean_list(bool b) {
ysr@345 426 _is_on_unclean_list = b;
ysr@345 427 }
ysr@345 428
tonyp@359 429 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
tonyp@359 430 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
ysr@345 431 hr_clear(false/*par*/, clear_space);
ysr@345 432 }
ysr@345 433 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@345 434 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@345 435 #endif // _MSC_VER
ysr@345 436
ysr@345 437
ysr@345 438 HeapRegion::
ysr@345 439 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@345 440 MemRegion mr, bool is_zeroed)
ysr@345 441 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
ysr@345 442 _next_fk(HeapRegionDCTOC::NoFilterKind),
ysr@345 443 _hrs_index(-1),
tonyp@358 444 _humongous_type(NotHumongous), _humongous_start_region(NULL),
ysr@345 445 _in_collection_set(false), _is_gc_alloc_region(false),
ysr@345 446 _is_on_free_list(false), _is_on_unclean_list(false),
ysr@345 447 _next_in_special_set(NULL), _orig_end(NULL),
tonyp@358 448 _claimed(InitialClaimValue), _evacuation_failed(false),
ysr@345 449 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
ysr@345 450 _young_type(NotYoung), _next_young_region(NULL),
apetrusenko@844 451 _next_dirty_cards_region(NULL),
ysr@345 452 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
johnc@1483 453 _rem_set(NULL), _zfs(NotZeroFilled),
johnc@1483 454 _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
johnc@1483 455 _predicted_bytes_to_copy(0)
ysr@345 456 {
ysr@345 457 _orig_end = mr.end();
ysr@345 458 // Note that initialize() will set the start of the unmarked area of the
ysr@345 459 // region.
tonyp@359 460 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
tonyp@359 461 set_top(bottom());
tonyp@359 462 set_saved_mark();
ysr@345 463
ysr@345 464 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
ysr@345 465
ysr@345 466 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
ysr@345 467 // In case the region is allocated during a pause, note the top.
ysr@345 468 // We haven't done any counting on a brand new region.
ysr@345 469 _top_at_conc_mark_count = bottom();
ysr@345 470 }
ysr@345 471
ysr@345 472 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
ysr@345 473 const HeapRegion* _target;
ysr@345 474 bool _target_seen;
ysr@345 475 HeapRegion* _last;
ysr@345 476 CompactibleSpace* _res;
ysr@345 477 public:
ysr@345 478 NextCompactionHeapRegionClosure(const HeapRegion* target) :
ysr@345 479 _target(target), _target_seen(false), _res(NULL) {}
ysr@345 480 bool doHeapRegion(HeapRegion* cur) {
ysr@345 481 if (_target_seen) {
ysr@345 482 if (!cur->isHumongous()) {
ysr@345 483 _res = cur;
ysr@345 484 return true;
ysr@345 485 }
ysr@345 486 } else if (cur == _target) {
ysr@345 487 _target_seen = true;
ysr@345 488 }
ysr@345 489 return false;
ysr@345 490 }
ysr@345 491 CompactibleSpace* result() { return _res; }
ysr@345 492 };
ysr@345 493
ysr@345 494 CompactibleSpace* HeapRegion::next_compaction_space() const {
ysr@345 495 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 496 // cast away const-ness
ysr@345 497 HeapRegion* r = (HeapRegion*) this;
ysr@345 498 NextCompactionHeapRegionClosure blk(r);
ysr@345 499 g1h->heap_region_iterate_from(r, &blk);
ysr@345 500 return blk.result();
ysr@345 501 }
ysr@345 502
ysr@345 503 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
ysr@345 504 // The order is important here.
ysr@345 505 start->add_continuingHumongousRegion(this);
tonyp@358 506 _humongous_type = ContinuesHumongous;
ysr@345 507 _humongous_start_region = start;
ysr@345 508 }
ysr@345 509
ysr@345 510 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
ysr@345 511 // Must join the blocks of the current H region seq with the block of the
ysr@345 512 // added region.
ysr@345 513 offsets()->join_blocks(bottom(), cont->bottom());
ysr@345 514 arrayOop obj = (arrayOop)(bottom());
ysr@345 515 obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
ysr@345 516 set_end(cont->end());
ysr@345 517 set_top(cont->end());
ysr@345 518 }
ysr@345 519
ysr@345 520 void HeapRegion::save_marks() {
ysr@345 521 set_saved_mark();
ysr@345 522 }
ysr@345 523
ysr@345 524 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
ysr@345 525 HeapWord* p = mr.start();
ysr@345 526 HeapWord* e = mr.end();
ysr@345 527 oop obj;
ysr@345 528 while (p < e) {
ysr@345 529 obj = oop(p);
ysr@345 530 p += obj->oop_iterate(cl);
ysr@345 531 }
ysr@345 532 assert(p == e, "bad memregion: doesn't end on obj boundary");
ysr@345 533 }
ysr@345 534
ysr@345 535 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
ysr@345 536 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ysr@345 537 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
ysr@345 538 }
ysr@345 539 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
ysr@345 540
ysr@345 541
ysr@345 542 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
ysr@345 543 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
ysr@345 544 }
ysr@345 545
ysr@345 546 #ifdef DEBUG
ysr@345 547 HeapWord* HeapRegion::allocate(size_t size) {
ysr@345 548 jint state = zero_fill_state();
ysr@345 549 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
ysr@345 550 zero_fill_is_allocated(),
ysr@345 551 "When ZF is on, only alloc in ZF'd regions");
ysr@345 552 return G1OffsetTableContigSpace::allocate(size);
ysr@345 553 }
ysr@345 554 #endif
ysr@345 555
ysr@345 556 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
ysr@345 557 assert(ZF_mon->owned_by_self() ||
ysr@345 558 Universe::heap()->is_gc_active(),
ysr@345 559 "Must hold the lock or be a full GC to modify.");
apetrusenko@1556 560 #ifdef ASSERT
apetrusenko@1556 561 if (top() != bottom() && zfs != Allocated) {
apetrusenko@1556 562 ResourceMark rm;
apetrusenko@1556 563 stringStream region_str;
apetrusenko@1556 564 print_on(&region_str);
apetrusenko@1556 565 assert(top() == bottom() || zfs == Allocated,
apetrusenko@1556 566 err_msg("Region must be empty, or we must be setting it to allocated. "
apetrusenko@1556 567 "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
apetrusenko@1556 568 }
apetrusenko@1556 569 #endif
ysr@345 570 _zfs = zfs;
ysr@345 571 }
ysr@345 572
ysr@345 573 void HeapRegion::set_zero_fill_complete() {
ysr@345 574 set_zero_fill_state_work(ZeroFilled);
ysr@345 575 if (ZF_mon->owned_by_self()) {
ysr@345 576 ZF_mon->notify_all();
ysr@345 577 }
ysr@345 578 }
ysr@345 579
ysr@345 580
ysr@345 581 void HeapRegion::ensure_zero_filled() {
ysr@345 582 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ysr@345 583 ensure_zero_filled_locked();
ysr@345 584 }
ysr@345 585
ysr@345 586 void HeapRegion::ensure_zero_filled_locked() {
ysr@345 587 assert(ZF_mon->owned_by_self(), "Precondition");
ysr@345 588 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
ysr@345 589 assert(should_ignore_zf || Heap_lock->is_locked(),
ysr@345 590 "Either we're in a GC or we're allocating a region.");
ysr@345 591 switch (zero_fill_state()) {
ysr@345 592 case HeapRegion::NotZeroFilled:
ysr@345 593 set_zero_fill_in_progress(Thread::current());
ysr@345 594 {
ysr@345 595 ZF_mon->unlock();
ysr@345 596 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@345 597 ZF_mon->lock_without_safepoint_check();
ysr@345 598 }
ysr@345 599 // A trap.
ysr@345 600 guarantee(zero_fill_state() == HeapRegion::ZeroFilling
ysr@345 601 && zero_filler() == Thread::current(),
ysr@345 602 "AHA! Tell Dave D if you see this...");
ysr@345 603 set_zero_fill_complete();
ysr@345 604 // gclog_or_tty->print_cr("Did sync ZF.");
ysr@345 605 ConcurrentZFThread::note_sync_zfs();
ysr@345 606 break;
ysr@345 607 case HeapRegion::ZeroFilling:
ysr@345 608 if (should_ignore_zf) {
ysr@345 609 // We can "break" the lock and take over the work.
ysr@345 610 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@345 611 set_zero_fill_complete();
ysr@345 612 ConcurrentZFThread::note_sync_zfs();
ysr@345 613 break;
ysr@345 614 } else {
ysr@345 615 ConcurrentZFThread::wait_for_ZF_completed(this);
ysr@345 616 }
ysr@345 617 case HeapRegion::ZeroFilled:
ysr@345 618 // Nothing to do.
ysr@345 619 break;
ysr@345 620 case HeapRegion::Allocated:
ysr@345 621 guarantee(false, "Should not call on allocated regions.");
ysr@345 622 }
ysr@345 623 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
ysr@345 624 }
ysr@345 625
ysr@345 626 HeapWord*
ysr@345 627 HeapRegion::object_iterate_mem_careful(MemRegion mr,
ysr@345 628 ObjectClosure* cl) {
ysr@345 629 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 630 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 631 // to update the BOT while we do this...
ysr@345 632 HeapWord* cur = block_start(mr.start());
ysr@345 633 mr = mr.intersection(used_region());
ysr@345 634 if (mr.is_empty()) return NULL;
ysr@345 635 // Otherwise, find the obj that extends onto mr.start().
ysr@345 636
ysr@345 637 assert(cur <= mr.start()
ysr@896 638 && (oop(cur)->klass_or_null() == NULL ||
ysr@345 639 cur + oop(cur)->size() > mr.start()),
ysr@345 640 "postcondition of block_start");
ysr@345 641 oop obj;
ysr@345 642 while (cur < mr.end()) {
ysr@345 643 obj = oop(cur);
ysr@896 644 if (obj->klass_or_null() == NULL) {
ysr@345 645 // Ran into an unparseable point.
ysr@345 646 return cur;
ysr@345 647 } else if (!g1h->is_obj_dead(obj)) {
ysr@345 648 cl->do_object(obj);
ysr@345 649 }
ysr@345 650 if (cl->abort()) return cur;
ysr@345 651 // The check above must occur before the operation below, since an
ysr@345 652 // abort might invalidate the "size" operation.
ysr@345 653 cur += obj->size();
ysr@345 654 }
ysr@345 655 return NULL;
ysr@345 656 }
ysr@345 657
ysr@345 658 HeapWord*
ysr@345 659 HeapRegion::
ysr@345 660 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@1685 661 FilterOutOfRegionClosure* cl,
johnc@1685 662 bool filter_young) {
ysr@345 663 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 664
ysr@345 665 // If we're within a stop-world GC, then we might look at a card in a
ysr@345 666 // GC alloc region that extends onto a GC LAB, which may not be
ysr@345 667 // parseable. Stop such at the "saved_mark" of the region.
ysr@345 668 if (G1CollectedHeap::heap()->is_gc_active()) {
ysr@345 669 mr = mr.intersection(used_region_at_save_marks());
ysr@345 670 } else {
ysr@345 671 mr = mr.intersection(used_region());
ysr@345 672 }
ysr@345 673 if (mr.is_empty()) return NULL;
ysr@345 674 // Otherwise, find the obj that extends onto mr.start().
ysr@345 675
johnc@1685 676 // The intersection of the incoming mr (for the card) and the
johnc@1685 677 // allocated part of the region is non-empty. This implies that
johnc@1685 678 // we have actually allocated into this region. The code in
johnc@1685 679 // G1CollectedHeap.cpp that allocates a new region sets the
johnc@1685 680 // is_young tag on the region before allocating. Thus we
johnc@1685 681 // safely know if this region is young.
johnc@1685 682 if (is_young() && filter_young) {
johnc@1685 683 return NULL;
johnc@1685 684 }
johnc@1685 685
johnc@1727 686 assert(!is_young(), "check value of filter_young");
johnc@1727 687
ysr@345 688 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 689 // to update the BOT while we do this...
ysr@345 690 HeapWord* cur = block_start(mr.start());
ysr@345 691 assert(cur <= mr.start(), "Postcondition");
ysr@345 692
ysr@345 693 while (cur <= mr.start()) {
ysr@896 694 if (oop(cur)->klass_or_null() == NULL) {
ysr@345 695 // Ran into an unparseable point.
ysr@345 696 return cur;
ysr@345 697 }
ysr@345 698 // Otherwise...
ysr@345 699 int sz = oop(cur)->size();
ysr@345 700 if (cur + sz > mr.start()) break;
ysr@345 701 // Otherwise, go on.
ysr@345 702 cur = cur + sz;
ysr@345 703 }
ysr@345 704 oop obj;
ysr@345 705 obj = oop(cur);
ysr@345 706 // If we finish this loop...
ysr@345 707 assert(cur <= mr.start()
ysr@896 708 && obj->klass_or_null() != NULL
ysr@345 709 && cur + obj->size() > mr.start(),
ysr@345 710 "Loop postcondition");
ysr@345 711 if (!g1h->is_obj_dead(obj)) {
ysr@345 712 obj->oop_iterate(cl, mr);
ysr@345 713 }
ysr@345 714
ysr@345 715 HeapWord* next;
ysr@345 716 while (cur < mr.end()) {
ysr@345 717 obj = oop(cur);
ysr@896 718 if (obj->klass_or_null() == NULL) {
ysr@345 719 // Ran into an unparseable point.
ysr@345 720 return cur;
ysr@345 721 };
ysr@345 722 // Otherwise:
ysr@345 723 next = (cur + obj->size());
ysr@345 724 if (!g1h->is_obj_dead(obj)) {
ysr@345 725 if (next < mr.end()) {
ysr@345 726 obj->oop_iterate(cl);
ysr@345 727 } else {
ysr@345 728 // this obj spans the boundary. If it's an array, stop at the
ysr@345 729 // boundary.
ysr@345 730 if (obj->is_objArray()) {
ysr@345 731 obj->oop_iterate(cl, mr);
ysr@345 732 } else {
ysr@345 733 obj->oop_iterate(cl);
ysr@345 734 }
ysr@345 735 }
ysr@345 736 }
ysr@345 737 cur = next;
ysr@345 738 }
ysr@345 739 return NULL;
ysr@345 740 }
ysr@345 741
ysr@345 742 void HeapRegion::print() const { print_on(gclog_or_tty); }
ysr@345 743 void HeapRegion::print_on(outputStream* st) const {
ysr@345 744 if (isHumongous()) {
ysr@345 745 if (startsHumongous())
ysr@345 746 st->print(" HS");
ysr@345 747 else
ysr@345 748 st->print(" HC");
ysr@345 749 } else {
ysr@345 750 st->print(" ");
ysr@345 751 }
ysr@345 752 if (in_collection_set())
ysr@345 753 st->print(" CS");
ysr@345 754 else if (is_gc_alloc_region())
ysr@345 755 st->print(" A ");
ysr@345 756 else
ysr@345 757 st->print(" ");
ysr@345 758 if (is_young())
johnc@1483 759 st->print(is_survivor() ? " SU" : " Y ");
ysr@345 760 else
ysr@345 761 st->print(" ");
ysr@345 762 if (is_empty())
ysr@345 763 st->print(" F");
ysr@345 764 else
ysr@345 765 st->print(" ");
tonyp@1079 766 st->print(" %5d", _gc_time_stamp);
tonyp@1477 767 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
tonyp@1477 768 prev_top_at_mark_start(), next_top_at_mark_start());
ysr@345 769 G1OffsetTableContigSpace::print_on(st);
ysr@345 770 }
ysr@345 771
tonyp@860 772 void HeapRegion::verify(bool allow_dirty) const {
tonyp@1079 773 bool dummy = false;
tonyp@1079 774 verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
tonyp@860 775 }
tonyp@860 776
ysr@345 777 #define OBJ_SAMPLE_INTERVAL 0
ysr@345 778 #define BLOCK_SAMPLE_INTERVAL 100
ysr@345 779
ysr@345 780 // This really ought to be commoned up into OffsetTableContigSpace somehow.
ysr@345 781 // We would need a mechanism to make that code skip dead objects.
ysr@345 782
tonyp@1079 783 void HeapRegion::verify(bool allow_dirty,
tonyp@1079 784 bool use_prev_marking,
tonyp@1079 785 bool* failures) const {
ysr@345 786 G1CollectedHeap* g1 = G1CollectedHeap::heap();
tonyp@1079 787 *failures = false;
ysr@345 788 HeapWord* p = bottom();
ysr@345 789 HeapWord* prev_p = NULL;
ysr@345 790 int objs = 0;
ysr@345 791 int blocks = 0;
tonyp@860 792 VerifyLiveClosure vl_cl(g1, use_prev_marking);
ysr@345 793 while (p < top()) {
ysr@345 794 size_t size = oop(p)->size();
ysr@345 795 if (blocks == BLOCK_SAMPLE_INTERVAL) {
tonyp@1079 796 HeapWord* res = block_start_const(p + (size/2));
tonyp@1079 797 if (p != res) {
tonyp@1079 798 gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
tonyp@1079 799 SIZE_FORMAT" returned "PTR_FORMAT,
tonyp@1079 800 p, size, res);
tonyp@1079 801 *failures = true;
tonyp@1079 802 return;
tonyp@1079 803 }
ysr@345 804 blocks = 0;
ysr@345 805 } else {
ysr@345 806 blocks++;
ysr@345 807 }
ysr@345 808 if (objs == OBJ_SAMPLE_INTERVAL) {
ysr@345 809 oop obj = oop(p);
tonyp@860 810 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
tonyp@1079 811 if (obj->is_oop()) {
tonyp@1079 812 klassOop klass = obj->klass();
tonyp@1079 813 if (!klass->is_perm()) {
tonyp@1079 814 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@1079 815 "not in perm", klass, obj);
tonyp@1079 816 *failures = true;
tonyp@1079 817 return;
tonyp@1079 818 } else if (!klass->is_klass()) {
tonyp@1079 819 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@1079 820 "not a klass", klass, obj);
tonyp@1079 821 *failures = true;
tonyp@1079 822 return;
tonyp@1079 823 } else {
tonyp@1079 824 vl_cl.set_containing_obj(obj);
tonyp@1079 825 obj->oop_iterate(&vl_cl);
tonyp@1079 826 if (vl_cl.failures()) {
tonyp@1079 827 *failures = true;
tonyp@1079 828 }
tonyp@1079 829 if (G1MaxVerifyFailures >= 0 &&
tonyp@1079 830 vl_cl.n_failures() >= G1MaxVerifyFailures) {
tonyp@1079 831 return;
tonyp@1079 832 }
tonyp@1079 833 }
tonyp@1079 834 } else {
tonyp@1079 835 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
tonyp@1079 836 *failures = true;
tonyp@1079 837 return;
tonyp@1079 838 }
ysr@345 839 }
ysr@345 840 objs = 0;
ysr@345 841 } else {
ysr@345 842 objs++;
ysr@345 843 }
ysr@345 844 prev_p = p;
ysr@345 845 p += size;
ysr@345 846 }
ysr@345 847 HeapWord* rend = end();
ysr@345 848 HeapWord* rtop = top();
ysr@345 849 if (rtop < rend) {
tonyp@1079 850 HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
tonyp@1079 851 if (res != rtop) {
tonyp@1079 852 gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
tonyp@1079 853 PTR_FORMAT" returned "PTR_FORMAT,
tonyp@1079 854 rtop, rend, res);
tonyp@1079 855 *failures = true;
tonyp@1079 856 return;
tonyp@1079 857 }
ysr@345 858 }
tonyp@1079 859
tonyp@1079 860 if (p != top()) {
tonyp@1079 861 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
tonyp@1079 862 "does not match top "PTR_FORMAT, p, top());
tonyp@1079 863 *failures = true;
tonyp@1079 864 return;
ysr@345 865 }
ysr@345 866 }
ysr@345 867
ysr@345 868 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
ysr@345 869 // away eventually.
ysr@345 870
tonyp@359 871 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
ysr@345 872 // false ==> we'll do the clearing if there's clearing to be done.
tonyp@359 873 ContiguousSpace::initialize(mr, false, mangle_space);
ysr@345 874 _offsets.zero_bottom_entry();
ysr@345 875 _offsets.initialize_threshold();
tonyp@359 876 if (clear_space) clear(mangle_space);
ysr@345 877 }
ysr@345 878
tonyp@359 879 void G1OffsetTableContigSpace::clear(bool mangle_space) {
tonyp@359 880 ContiguousSpace::clear(mangle_space);
ysr@345 881 _offsets.zero_bottom_entry();
ysr@345 882 _offsets.initialize_threshold();
ysr@345 883 }
ysr@345 884
ysr@345 885 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
ysr@345 886 Space::set_bottom(new_bottom);
ysr@345 887 _offsets.set_bottom(new_bottom);
ysr@345 888 }
ysr@345 889
ysr@345 890 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
ysr@345 891 Space::set_end(new_end);
ysr@345 892 _offsets.resize(new_end - bottom());
ysr@345 893 }
ysr@345 894
ysr@345 895 void G1OffsetTableContigSpace::print() const {
ysr@345 896 print_short();
ysr@345 897 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
ysr@345 898 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
ysr@345 899 bottom(), top(), _offsets.threshold(), end());
ysr@345 900 }
ysr@345 901
ysr@345 902 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
ysr@345 903 return _offsets.initialize_threshold();
ysr@345 904 }
ysr@345 905
ysr@345 906 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
ysr@345 907 HeapWord* end) {
ysr@345 908 _offsets.alloc_block(start, end);
ysr@345 909 return _offsets.threshold();
ysr@345 910 }
ysr@345 911
ysr@345 912 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
ysr@345 913 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 914 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
ysr@345 915 if (_gc_time_stamp < g1h->get_gc_time_stamp())
ysr@345 916 return top();
ysr@345 917 else
ysr@345 918 return ContiguousSpace::saved_mark_word();
ysr@345 919 }
ysr@345 920
ysr@345 921 void G1OffsetTableContigSpace::set_saved_mark() {
ysr@345 922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 923 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
ysr@345 924
ysr@345 925 if (_gc_time_stamp < curr_gc_time_stamp) {
ysr@345 926 // The order of these is important, as another thread might be
ysr@345 927 // about to start scanning this region. If it does so after
ysr@345 928 // set_saved_mark and before _gc_time_stamp = ..., then the latter
ysr@345 929 // will be false, and it will pick up top() as the high water mark
ysr@345 930 // of region. If it does so after _gc_time_stamp = ..., then it
ysr@345 931 // will pick up the right saved_mark_word() as the high water mark
ysr@345 932 // of the region. Either way, the behaviour will be correct.
ysr@345 933 ContiguousSpace::set_saved_mark();
ysr@896 934 OrderAccess::storestore();
iveresov@356 935 _gc_time_stamp = curr_gc_time_stamp;
ysr@896 936 // The following fence is to force a flush of the writes above, but
ysr@896 937 // is strictly not needed because when an allocating worker thread
ysr@896 938 // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
ysr@896 939 // when the lock is released, the write will be flushed.
ysr@896 940 // OrderAccess::fence();
ysr@345 941 }
ysr@345 942 }
ysr@345 943
ysr@345 944 G1OffsetTableContigSpace::
ysr@345 945 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@345 946 MemRegion mr, bool is_zeroed) :
ysr@345 947 _offsets(sharedOffsetArray, mr),
ysr@345 948 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
ysr@345 949 _gc_time_stamp(0)
ysr@345 950 {
ysr@345 951 _offsets.set_space(this);
tonyp@359 952 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
ysr@345 953 }
ysr@345 954
ysr@345 955 size_t RegionList::length() {
ysr@345 956 size_t len = 0;
ysr@345 957 HeapRegion* cur = hd();
ysr@345 958 DEBUG_ONLY(HeapRegion* last = NULL);
ysr@345 959 while (cur != NULL) {
ysr@345 960 len++;
ysr@345 961 DEBUG_ONLY(last = cur);
ysr@345 962 cur = get_next(cur);
ysr@345 963 }
ysr@345 964 assert(last == tl(), "Invariant");
ysr@345 965 return len;
ysr@345 966 }
ysr@345 967
ysr@345 968 void RegionList::insert_before_head(HeapRegion* r) {
ysr@345 969 assert(well_formed(), "Inv");
ysr@345 970 set_next(r, hd());
ysr@345 971 _hd = r;
ysr@345 972 _sz++;
ysr@345 973 if (tl() == NULL) _tl = r;
ysr@345 974 assert(well_formed(), "Inv");
ysr@345 975 }
ysr@345 976
ysr@345 977 void RegionList::prepend_list(RegionList* new_list) {
ysr@345 978 assert(well_formed(), "Precondition");
ysr@345 979 assert(new_list->well_formed(), "Precondition");
ysr@345 980 HeapRegion* new_tl = new_list->tl();
ysr@345 981 if (new_tl != NULL) {
ysr@345 982 set_next(new_tl, hd());
ysr@345 983 _hd = new_list->hd();
ysr@345 984 _sz += new_list->sz();
ysr@345 985 if (tl() == NULL) _tl = new_list->tl();
ysr@345 986 } else {
ysr@345 987 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
ysr@345 988 }
ysr@345 989 assert(well_formed(), "Inv");
ysr@345 990 }
ysr@345 991
ysr@345 992 void RegionList::delete_after(HeapRegion* r) {
ysr@345 993 assert(well_formed(), "Precondition");
ysr@345 994 HeapRegion* next = get_next(r);
ysr@345 995 assert(r != NULL, "Precondition");
ysr@345 996 HeapRegion* next_tl = get_next(next);
ysr@345 997 set_next(r, next_tl);
ysr@345 998 dec_sz();
ysr@345 999 if (next == tl()) {
ysr@345 1000 assert(next_tl == NULL, "Inv");
ysr@345 1001 _tl = r;
ysr@345 1002 }
ysr@345 1003 assert(well_formed(), "Inv");
ysr@345 1004 }
ysr@345 1005
ysr@345 1006 HeapRegion* RegionList::pop() {
ysr@345 1007 assert(well_formed(), "Inv");
ysr@345 1008 HeapRegion* res = hd();
ysr@345 1009 if (res != NULL) {
ysr@345 1010 _hd = get_next(res);
ysr@345 1011 _sz--;
ysr@345 1012 set_next(res, NULL);
ysr@345 1013 if (sz() == 0) _tl = NULL;
ysr@345 1014 }
ysr@345 1015 assert(well_formed(), "Inv");
ysr@345 1016 return res;
ysr@345 1017 }