annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 2967:d320dd70ca40

6484982: G1: process references during evacuation pauses Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate. Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
author johnc
date Thu, 22 Sep 2011 10:57:37 -0700
parents c20e006ee26a
children 7afaeffa5d9b
rev   line source
ysr@345 1 /*
tonyp@2146 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@345 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@345 4 *
ysr@345 5 * This code is free software; you can redistribute it and/or modify it
ysr@345 6 * under the terms of the GNU General Public License version 2 only, as
ysr@345 7 * published by the Free Software Foundation.
ysr@345 8 *
ysr@345 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@345 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@345 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@345 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@345 13 * accompanied this code).
ysr@345 14 *
ysr@345 15 * You should have received a copy of the GNU General Public License version
ysr@345 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@345 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@345 18 *
trims@1563 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1563 20 * or visit www.oracle.com if you need additional information or have any
trims@1563 21 * questions.
ysr@345 22 *
ysr@345 23 */
ysr@345 24
stefank@1992 25 #include "precompiled.hpp"
stefank@1992 26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@1992 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@1992 28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@1992 29 #include "gc_implementation/g1/heapRegion.inline.hpp"
stefank@1992 30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@1992 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@1992 32 #include "memory/genOopClosures.inline.hpp"
stefank@1992 33 #include "memory/iterator.hpp"
stefank@1992 34 #include "oops/oop.inline.hpp"
ysr@345 35
tonyp@996 36 int HeapRegion::LogOfHRGrainBytes = 0;
tonyp@996 37 int HeapRegion::LogOfHRGrainWords = 0;
tonyp@996 38 int HeapRegion::GrainBytes = 0;
tonyp@996 39 int HeapRegion::GrainWords = 0;
tonyp@996 40 int HeapRegion::CardsPerRegion = 0;
tonyp@996 41
ysr@345 42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@345 43 HeapRegion* hr, OopClosure* cl,
ysr@345 44 CardTableModRefBS::PrecisionStyle precision,
ysr@345 45 FilterKind fk) :
ysr@345 46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
ysr@345 47 _hr(hr), _fk(fk), _g1(g1)
johnc@2967 48 { }
ysr@345 49
ysr@345 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
ysr@345 51 OopClosure* oc) :
ysr@345 52 _r_bottom(r->bottom()), _r_end(r->end()),
ysr@345 53 _oc(oc), _out_of_region(0)
ysr@345 54 {}
ysr@345 55
ysr@345 56 class VerifyLiveClosure: public OopClosure {
tonyp@860 57 private:
ysr@345 58 G1CollectedHeap* _g1h;
ysr@345 59 CardTableModRefBS* _bs;
ysr@345 60 oop _containing_obj;
ysr@345 61 bool _failures;
ysr@345 62 int _n_failures;
johnc@2767 63 VerifyOption _vo;
ysr@345 64 public:
johnc@2767 65 // _vo == UsePrevMarking -> use "prev" marking information,
johnc@2767 66 // _vo == UseNextMarking -> use "next" marking information,
johnc@2767 67 // _vo == UseMarkWord -> use mark word from object header.
johnc@2767 68 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
ysr@345 69 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
johnc@2767 70 _failures(false), _n_failures(0), _vo(vo)
ysr@345 71 {
ysr@345 72 BarrierSet* bs = _g1h->barrier_set();
ysr@345 73 if (bs->is_a(BarrierSet::CardTableModRef))
ysr@345 74 _bs = (CardTableModRefBS*)bs;
ysr@345 75 }
ysr@345 76
ysr@345 77 void set_containing_obj(oop obj) {
ysr@345 78 _containing_obj = obj;
ysr@345 79 }
ysr@345 80
ysr@345 81 bool failures() { return _failures; }
ysr@345 82 int n_failures() { return _n_failures; }
ysr@345 83
ysr@896 84 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@896 85 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@345 86
tonyp@1477 87 void print_object(outputStream* out, oop obj) {
tonyp@1477 88 #ifdef PRODUCT
tonyp@1477 89 klassOop k = obj->klass();
tonyp@1477 90 const char* class_name = instanceKlass::cast(k)->external_name();
tonyp@1477 91 out->print_cr("class name %s", class_name);
tonyp@1477 92 #else // PRODUCT
tonyp@1477 93 obj->print_on(out);
tonyp@1477 94 #endif // PRODUCT
tonyp@1477 95 }
tonyp@1477 96
ysr@896 97 template <class T> void do_oop_work(T* p) {
ysr@345 98 assert(_containing_obj != NULL, "Precondition");
johnc@2767 99 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
tonyp@860 100 "Precondition");
ysr@896 101 T heap_oop = oopDesc::load_heap_oop(p);
ysr@896 102 if (!oopDesc::is_null(heap_oop)) {
ysr@896 103 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@345 104 bool failed = false;
tonyp@860 105 if (!_g1h->is_in_closed_subset(obj) ||
johnc@2767 106 _g1h->is_obj_dead_cond(obj, _vo)) {
ysr@345 107 if (!_failures) {
ysr@345 108 gclog_or_tty->print_cr("");
ysr@345 109 gclog_or_tty->print_cr("----------");
ysr@345 110 }
ysr@345 111 if (!_g1h->is_in_closed_subset(obj)) {
tonyp@1477 112 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@345 113 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 114 " of live obj "PTR_FORMAT" in region "
tonyp@1477 115 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 116 p, (void*) _containing_obj,
tonyp@1477 117 from->bottom(), from->end());
tonyp@1477 118 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 119 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
tonyp@1477 120 (void*) obj);
ysr@345 121 } else {
tonyp@1477 122 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
tonyp@1477 123 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
ysr@345 124 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 125 " of live obj "PTR_FORMAT" in region "
tonyp@1477 126 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 127 p, (void*) _containing_obj,
tonyp@1477 128 from->bottom(), from->end());
tonyp@1477 129 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 130 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
tonyp@1477 131 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 132 (void*) obj, to->bottom(), to->end());
tonyp@1477 133 print_object(gclog_or_tty, obj);
ysr@345 134 }
ysr@345 135 gclog_or_tty->print_cr("----------");
ysr@345 136 _failures = true;
ysr@345 137 failed = true;
ysr@345 138 _n_failures++;
ysr@345 139 }
ysr@345 140
ysr@345 141 if (!_g1h->full_collection()) {
ysr@896 142 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@896 143 HeapRegion* to = _g1h->heap_region_containing(obj);
ysr@345 144 if (from != NULL && to != NULL &&
ysr@345 145 from != to &&
ysr@345 146 !to->isHumongous()) {
ysr@345 147 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
ysr@345 148 jbyte cv_field = *_bs->byte_for_const(p);
ysr@345 149 const jbyte dirty = CardTableModRefBS::dirty_card_val();
ysr@345 150
ysr@345 151 bool is_bad = !(from->is_young()
ysr@345 152 || to->rem_set()->contains_reference(p)
ysr@345 153 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
ysr@345 154 (_containing_obj->is_objArray() ?
ysr@345 155 cv_field == dirty
ysr@345 156 : cv_obj == dirty || cv_field == dirty));
ysr@345 157 if (is_bad) {
ysr@345 158 if (!_failures) {
ysr@345 159 gclog_or_tty->print_cr("");
ysr@345 160 gclog_or_tty->print_cr("----------");
ysr@345 161 }
ysr@345 162 gclog_or_tty->print_cr("Missing rem set entry:");
tonyp@2761 163 gclog_or_tty->print_cr("Field "PTR_FORMAT" "
tonyp@2761 164 "of obj "PTR_FORMAT", "
tonyp@2761 165 "in region "HR_FORMAT,
tonyp@2761 166 p, (void*) _containing_obj,
tonyp@2761 167 HR_FORMAT_PARAMS(from));
ysr@345 168 _containing_obj->print_on(gclog_or_tty);
tonyp@2761 169 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
tonyp@2761 170 "in region "HR_FORMAT,
tonyp@2761 171 (void*) obj,
tonyp@2761 172 HR_FORMAT_PARAMS(to));
ysr@345 173 obj->print_on(gclog_or_tty);
ysr@345 174 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
ysr@345 175 cv_obj, cv_field);
ysr@345 176 gclog_or_tty->print_cr("----------");
ysr@345 177 _failures = true;
ysr@345 178 if (!failed) _n_failures++;
ysr@345 179 }
ysr@345 180 }
ysr@345 181 }
ysr@345 182 }
ysr@345 183 }
ysr@345 184 };
ysr@345 185
ysr@345 186 template<class ClosureType>
ysr@345 187 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
ysr@345 188 HeapRegion* hr,
ysr@345 189 HeapWord* cur, HeapWord* top) {
ysr@345 190 oop cur_oop = oop(cur);
ysr@345 191 int oop_size = cur_oop->size();
ysr@345 192 HeapWord* next_obj = cur + oop_size;
ysr@345 193 while (next_obj < top) {
ysr@345 194 // Keep filtering the remembered set.
ysr@345 195 if (!g1h->is_obj_dead(cur_oop, hr)) {
ysr@345 196 // Bottom lies entirely below top, so we can call the
ysr@345 197 // non-memRegion version of oop_iterate below.
ysr@345 198 cur_oop->oop_iterate(cl);
ysr@345 199 }
ysr@345 200 cur = next_obj;
ysr@345 201 cur_oop = oop(cur);
ysr@345 202 oop_size = cur_oop->size();
ysr@345 203 next_obj = cur + oop_size;
ysr@345 204 }
ysr@345 205 return cur;
ysr@345 206 }
ysr@345 207
ysr@345 208 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
ysr@345 209 HeapWord* bottom,
ysr@345 210 HeapWord* top,
ysr@345 211 OopClosure* cl) {
ysr@345 212 G1CollectedHeap* g1h = _g1;
ysr@345 213
ysr@345 214 int oop_size;
ysr@345 215
ysr@345 216 OopClosure* cl2 = cl;
johnc@2967 217
johnc@2967 218 // If we are scanning the remembered sets looking for refs
johnc@2967 219 // into the collection set during an evacuation pause then
johnc@2967 220 // we will want to 'discover' reference objects that point
johnc@2967 221 // to referents in the collection set.
johnc@2967 222 //
johnc@2967 223 // Unfortunately it is an instance of FilterIntoCSClosure
johnc@2967 224 // that is iterated over the reference fields of oops in
johnc@2967 225 // mr (and not the G1ParPushHeapRSClosure - which is the
johnc@2967 226 // cl parameter).
johnc@2967 227 // If we set the _ref_processor field in the FilterIntoCSClosure
johnc@2967 228 // instance, all the reference objects that are walked
johnc@2967 229 // (regardless of whether their referent object's are in
johnc@2967 230 // the cset) will be 'discovered'.
johnc@2967 231 //
johnc@2967 232 // The G1STWIsAlive closure considers a referent object that
johnc@2967 233 // is outside the cset as alive. The G1CopyingKeepAliveClosure
johnc@2967 234 // skips referents that are not in the cset.
johnc@2967 235 //
johnc@2967 236 // Therefore reference objects in mr with a referent that is
johnc@2967 237 // outside the cset should be OK.
johnc@2967 238
johnc@2967 239 ReferenceProcessor* rp = _cl->_ref_processor;
johnc@2967 240 if (rp != NULL) {
johnc@2967 241 assert(rp == _g1->ref_processor_stw(), "should be stw");
johnc@2967 242 assert(_fk == IntoCSFilterKind, "should be looking for refs into CS");
johnc@2967 243 }
johnc@2967 244
johnc@2967 245 FilterIntoCSClosure intoCSFilt(this, g1h, cl, rp);
ysr@345 246 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
johnc@2967 247
ysr@345 248 switch (_fk) {
ysr@345 249 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
ysr@345 250 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
ysr@345 251 }
ysr@345 252
ysr@345 253 // Start filtering what we add to the remembered set. If the object is
ysr@345 254 // not considered dead, either because it is marked (in the mark bitmap)
ysr@345 255 // or it was allocated after marking finished, then we add it. Otherwise
ysr@345 256 // we can safely ignore the object.
ysr@345 257 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 258 oop_size = oop(bottom)->oop_iterate(cl2, mr);
ysr@345 259 } else {
ysr@345 260 oop_size = oop(bottom)->size();
ysr@345 261 }
ysr@345 262
ysr@345 263 bottom += oop_size;
ysr@345 264
ysr@345 265 if (bottom < top) {
ysr@345 266 // We replicate the loop below for several kinds of possible filters.
ysr@345 267 switch (_fk) {
ysr@345 268 case NoFilterKind:
ysr@345 269 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
ysr@345 270 break;
johnc@2967 271
ysr@345 272 case IntoCSFilterKind: {
johnc@2967 273 FilterIntoCSClosure filt(this, g1h, cl, rp);
ysr@345 274 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 275 break;
ysr@345 276 }
johnc@2967 277
ysr@345 278 case OutOfRegionFilterKind: {
ysr@345 279 FilterOutOfRegionClosure filt(_hr, cl);
ysr@345 280 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 281 break;
ysr@345 282 }
johnc@2967 283
ysr@345 284 default:
ysr@345 285 ShouldNotReachHere();
ysr@345 286 }
ysr@345 287
ysr@345 288 // Last object. Need to do dead-obj filtering here too.
ysr@345 289 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 290 oop(bottom)->oop_iterate(cl2, mr);
ysr@345 291 }
ysr@345 292 }
ysr@345 293 }
ysr@345 294
tonyp@996 295 // Minimum region size; we won't go lower than that.
tonyp@996 296 // We might want to decrease this in the future, to deal with small
tonyp@996 297 // heaps a bit more efficiently.
tonyp@996 298 #define MIN_REGION_SIZE ( 1024 * 1024 )
tonyp@996 299
tonyp@996 300 // Maximum region size; we don't go higher than that. There's a good
tonyp@996 301 // reason for having an upper bound. We don't want regions to get too
tonyp@996 302 // large, otherwise cleanup's effectiveness would decrease as there
tonyp@996 303 // will be fewer opportunities to find totally empty regions after
tonyp@996 304 // marking.
tonyp@996 305 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
tonyp@996 306
tonyp@996 307 // The automatic region size calculation will try to have around this
tonyp@996 308 // many regions in the heap (based on the min heap size).
tonyp@996 309 #define TARGET_REGION_NUMBER 2048
tonyp@996 310
tonyp@996 311 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
tonyp@996 312 // region_size in bytes
tonyp@996 313 uintx region_size = G1HeapRegionSize;
tonyp@996 314 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
tonyp@996 315 // We base the automatic calculation on the min heap size. This
tonyp@996 316 // can be problematic if the spread between min and max is quite
tonyp@996 317 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
tonyp@996 318 // the max size, the region size might be way too large for the
tonyp@996 319 // min size. Either way, some users might have to set the region
tonyp@996 320 // size manually for some -Xms / -Xmx combos.
tonyp@996 321
tonyp@996 322 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
tonyp@996 323 (uintx) MIN_REGION_SIZE);
tonyp@996 324 }
tonyp@996 325
tonyp@996 326 int region_size_log = log2_long((jlong) region_size);
tonyp@996 327 // Recalculate the region size to make sure it's a power of
tonyp@996 328 // 2. This means that region_size is the largest power of 2 that's
tonyp@996 329 // <= what we've calculated so far.
prr@1496 330 region_size = ((uintx)1 << region_size_log);
tonyp@996 331
tonyp@996 332 // Now make sure that we don't go over or under our limits.
tonyp@996 333 if (region_size < MIN_REGION_SIZE) {
tonyp@996 334 region_size = MIN_REGION_SIZE;
tonyp@996 335 } else if (region_size > MAX_REGION_SIZE) {
tonyp@996 336 region_size = MAX_REGION_SIZE;
tonyp@996 337 }
tonyp@996 338
tonyp@996 339 // And recalculate the log.
tonyp@996 340 region_size_log = log2_long((jlong) region_size);
tonyp@996 341
tonyp@996 342 // Now, set up the globals.
tonyp@996 343 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
tonyp@996 344 LogOfHRGrainBytes = region_size_log;
tonyp@996 345
tonyp@996 346 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
tonyp@996 347 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
tonyp@996 348
tonyp@996 349 guarantee(GrainBytes == 0, "we should only set it once");
tonyp@996 350 // The cast to int is safe, given that we've bounded region_size by
tonyp@996 351 // MIN_REGION_SIZE and MAX_REGION_SIZE.
tonyp@996 352 GrainBytes = (int) region_size;
tonyp@996 353
tonyp@996 354 guarantee(GrainWords == 0, "we should only set it once");
tonyp@996 355 GrainWords = GrainBytes >> LogHeapWordSize;
tonyp@996 356 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
tonyp@996 357
tonyp@996 358 guarantee(CardsPerRegion == 0, "we should only set it once");
tonyp@996 359 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
tonyp@996 360 }
tonyp@996 361
ysr@345 362 void HeapRegion::reset_after_compaction() {
ysr@345 363 G1OffsetTableContigSpace::reset_after_compaction();
ysr@345 364 // After a compaction the mark bitmap is invalid, so we must
ysr@345 365 // treat all objects as being inside the unmarked area.
ysr@345 366 zero_marked_bytes();
ysr@345 367 init_top_at_mark_start();
ysr@345 368 }
ysr@345 369
ysr@345 370 DirtyCardToOopClosure*
ysr@345 371 HeapRegion::new_dcto_closure(OopClosure* cl,
ysr@345 372 CardTableModRefBS::PrecisionStyle precision,
ysr@345 373 HeapRegionDCTOC::FilterKind fk) {
ysr@345 374 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
ysr@345 375 this, cl, precision, fk);
ysr@345 376 }
ysr@345 377
ysr@345 378 void HeapRegion::hr_clear(bool par, bool clear_space) {
tonyp@2165 379 assert(_humongous_type == NotHumongous,
tonyp@2165 380 "we should have already filtered out humongous regions");
tonyp@2165 381 assert(_humongous_start_region == NULL,
tonyp@2165 382 "we should have already filtered out humongous regions");
tonyp@2165 383 assert(_end == _orig_end,
tonyp@2165 384 "we should have already filtered out humongous regions");
tonyp@2165 385
ysr@345 386 _in_collection_set = false;
ysr@345 387
ysr@345 388 set_young_index_in_cset(-1);
ysr@345 389 uninstall_surv_rate_group();
ysr@345 390 set_young_type(NotYoung);
tonyp@2417 391 reset_pre_dummy_top();
ysr@345 392
ysr@345 393 if (!par) {
ysr@345 394 // If this is parallel, this will be done later.
ysr@345 395 HeapRegionRemSet* hrrs = rem_set();
ysr@345 396 if (hrrs != NULL) hrrs->clear();
tonyp@358 397 _claimed = InitialClaimValue;
ysr@345 398 }
ysr@345 399 zero_marked_bytes();
ysr@345 400 set_sort_index(-1);
ysr@345 401
ysr@345 402 _offsets.resize(HeapRegion::GrainWords);
ysr@345 403 init_top_at_mark_start();
tonyp@359 404 if (clear_space) clear(SpaceDecorator::Mangle);
ysr@345 405 }
ysr@345 406
tonyp@2558 407 void HeapRegion::par_clear() {
tonyp@2558 408 assert(used() == 0, "the region should have been already cleared");
tonyp@2558 409 assert(capacity() == (size_t) HeapRegion::GrainBytes,
tonyp@2558 410 "should be back to normal");
tonyp@2558 411 HeapRegionRemSet* hrrs = rem_set();
tonyp@2558 412 hrrs->clear();
tonyp@2558 413 CardTableModRefBS* ct_bs =
tonyp@2558 414 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
tonyp@2558 415 ct_bs->clear(MemRegion(bottom(), end()));
tonyp@2558 416 }
tonyp@2558 417
ysr@345 418 // <PREDICTION>
ysr@345 419 void HeapRegion::calc_gc_efficiency() {
ysr@345 420 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 421 _gc_efficiency = (double) garbage_bytes() /
ysr@345 422 g1h->predict_region_elapsed_time_ms(this, false);
ysr@345 423 }
ysr@345 424 // </PREDICTION>
ysr@345 425
tonyp@2146 426 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
tonyp@2165 427 assert(!isHumongous(), "sanity / pre-condition");
tonyp@1911 428 assert(end() == _orig_end,
tonyp@1911 429 "Should be normal before the humongous object allocation");
tonyp@1911 430 assert(top() == bottom(), "should be empty");
tonyp@2146 431 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
tonyp@1911 432
tonyp@358 433 _humongous_type = StartsHumongous;
ysr@345 434 _humongous_start_region = this;
tonyp@1911 435
tonyp@1911 436 set_end(new_end);
tonyp@2146 437 _offsets.set_for_starts_humongous(new_top);
tonyp@1911 438 }
tonyp@1911 439
tonyp@2146 440 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
tonyp@2165 441 assert(!isHumongous(), "sanity / pre-condition");
tonyp@1911 442 assert(end() == _orig_end,
tonyp@1911 443 "Should be normal before the humongous object allocation");
tonyp@1911 444 assert(top() == bottom(), "should be empty");
tonyp@2146 445 assert(first_hr->startsHumongous(), "pre-condition");
tonyp@1911 446
tonyp@1911 447 _humongous_type = ContinuesHumongous;
tonyp@2146 448 _humongous_start_region = first_hr;
ysr@345 449 }
ysr@345 450
tonyp@2165 451 void HeapRegion::set_notHumongous() {
tonyp@2165 452 assert(isHumongous(), "pre-condition");
tonyp@2165 453
tonyp@2165 454 if (startsHumongous()) {
tonyp@2165 455 assert(top() <= end(), "pre-condition");
tonyp@2165 456 set_end(_orig_end);
tonyp@2165 457 if (top() > end()) {
tonyp@2165 458 // at least one "continues humongous" region after it
tonyp@2165 459 set_top(end());
tonyp@2165 460 }
tonyp@2165 461 } else {
tonyp@2165 462 // continues humongous
tonyp@2165 463 assert(end() == _orig_end, "sanity");
tonyp@2165 464 }
tonyp@2165 465
tonyp@2165 466 assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
tonyp@2165 467 _humongous_type = NotHumongous;
tonyp@2165 468 _humongous_start_region = NULL;
tonyp@2165 469 }
tonyp@2165 470
ysr@345 471 bool HeapRegion::claimHeapRegion(jint claimValue) {
ysr@345 472 jint current = _claimed;
ysr@345 473 if (current != claimValue) {
ysr@345 474 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
ysr@345 475 if (res == current) {
ysr@345 476 return true;
ysr@345 477 }
ysr@345 478 }
ysr@345 479 return false;
ysr@345 480 }
ysr@345 481
ysr@345 482 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
ysr@345 483 HeapWord* low = addr;
ysr@345 484 HeapWord* high = end();
ysr@345 485 while (low < high) {
ysr@345 486 size_t diff = pointer_delta(high, low);
ysr@345 487 // Must add one below to bias toward the high amount. Otherwise, if
ysr@345 488 // "high" were at the desired value, and "low" were one less, we
ysr@345 489 // would not converge on "high". This is not symmetric, because
ysr@345 490 // we set "high" to a block start, which might be the right one,
ysr@345 491 // which we don't do for "low".
ysr@345 492 HeapWord* middle = low + (diff+1)/2;
ysr@345 493 if (middle == high) return high;
ysr@345 494 HeapWord* mid_bs = block_start_careful(middle);
ysr@345 495 if (mid_bs < addr) {
ysr@345 496 low = middle;
ysr@345 497 } else {
ysr@345 498 high = mid_bs;
ysr@345 499 }
ysr@345 500 }
ysr@345 501 assert(low == high && low >= addr, "Didn't work.");
ysr@345 502 return low;
ysr@345 503 }
ysr@345 504
tonyp@359 505 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
tonyp@359 506 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
ysr@345 507 hr_clear(false/*par*/, clear_space);
ysr@345 508 }
ysr@345 509 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@345 510 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@345 511 #endif // _MSC_VER
ysr@345 512
ysr@345 513
ysr@345 514 HeapRegion::
tonyp@2761 515 HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
tonyp@2761 516 MemRegion mr, bool is_zeroed)
ysr@345 517 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
johnc@2967 518 _hrs_index(hrs_index),
tonyp@358 519 _humongous_type(NotHumongous), _humongous_start_region(NULL),
tonyp@2825 520 _in_collection_set(false),
ysr@345 521 _next_in_special_set(NULL), _orig_end(NULL),
tonyp@358 522 _claimed(InitialClaimValue), _evacuation_failed(false),
ysr@345 523 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
tonyp@2966 524 _gc_efficiency(0.0),
ysr@345 525 _young_type(NotYoung), _next_young_region(NULL),
tonyp@2165 526 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
tonyp@2165 527 #ifdef ASSERT
tonyp@2165 528 _containing_set(NULL),
tonyp@2165 529 #endif // ASSERT
tonyp@2165 530 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
tonyp@2165 531 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
johnc@1483 532 _predicted_bytes_to_copy(0)
ysr@345 533 {
ysr@345 534 _orig_end = mr.end();
ysr@345 535 // Note that initialize() will set the start of the unmarked area of the
ysr@345 536 // region.
tonyp@359 537 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
tonyp@359 538 set_top(bottom());
tonyp@359 539 set_saved_mark();
ysr@345 540
ysr@345 541 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
ysr@345 542
ysr@345 543 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
ysr@345 544 // In case the region is allocated during a pause, note the top.
ysr@345 545 // We haven't done any counting on a brand new region.
ysr@345 546 _top_at_conc_mark_count = bottom();
ysr@345 547 }
ysr@345 548
ysr@345 549 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
ysr@345 550 const HeapRegion* _target;
ysr@345 551 bool _target_seen;
ysr@345 552 HeapRegion* _last;
ysr@345 553 CompactibleSpace* _res;
ysr@345 554 public:
ysr@345 555 NextCompactionHeapRegionClosure(const HeapRegion* target) :
ysr@345 556 _target(target), _target_seen(false), _res(NULL) {}
ysr@345 557 bool doHeapRegion(HeapRegion* cur) {
ysr@345 558 if (_target_seen) {
ysr@345 559 if (!cur->isHumongous()) {
ysr@345 560 _res = cur;
ysr@345 561 return true;
ysr@345 562 }
ysr@345 563 } else if (cur == _target) {
ysr@345 564 _target_seen = true;
ysr@345 565 }
ysr@345 566 return false;
ysr@345 567 }
ysr@345 568 CompactibleSpace* result() { return _res; }
ysr@345 569 };
ysr@345 570
ysr@345 571 CompactibleSpace* HeapRegion::next_compaction_space() const {
ysr@345 572 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 573 // cast away const-ness
ysr@345 574 HeapRegion* r = (HeapRegion*) this;
ysr@345 575 NextCompactionHeapRegionClosure blk(r);
ysr@345 576 g1h->heap_region_iterate_from(r, &blk);
ysr@345 577 return blk.result();
ysr@345 578 }
ysr@345 579
ysr@345 580 void HeapRegion::save_marks() {
ysr@345 581 set_saved_mark();
ysr@345 582 }
ysr@345 583
ysr@345 584 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
ysr@345 585 HeapWord* p = mr.start();
ysr@345 586 HeapWord* e = mr.end();
ysr@345 587 oop obj;
ysr@345 588 while (p < e) {
ysr@345 589 obj = oop(p);
ysr@345 590 p += obj->oop_iterate(cl);
ysr@345 591 }
ysr@345 592 assert(p == e, "bad memregion: doesn't end on obj boundary");
ysr@345 593 }
ysr@345 594
ysr@345 595 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
ysr@345 596 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ysr@345 597 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
ysr@345 598 }
ysr@345 599 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
ysr@345 600
ysr@345 601
ysr@345 602 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
ysr@345 603 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
ysr@345 604 }
ysr@345 605
ysr@345 606 HeapWord*
ysr@345 607 HeapRegion::object_iterate_mem_careful(MemRegion mr,
ysr@345 608 ObjectClosure* cl) {
ysr@345 609 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 610 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 611 // to update the BOT while we do this...
ysr@345 612 HeapWord* cur = block_start(mr.start());
ysr@345 613 mr = mr.intersection(used_region());
ysr@345 614 if (mr.is_empty()) return NULL;
ysr@345 615 // Otherwise, find the obj that extends onto mr.start().
ysr@345 616
ysr@345 617 assert(cur <= mr.start()
ysr@896 618 && (oop(cur)->klass_or_null() == NULL ||
ysr@345 619 cur + oop(cur)->size() > mr.start()),
ysr@345 620 "postcondition of block_start");
ysr@345 621 oop obj;
ysr@345 622 while (cur < mr.end()) {
ysr@345 623 obj = oop(cur);
ysr@896 624 if (obj->klass_or_null() == NULL) {
ysr@345 625 // Ran into an unparseable point.
ysr@345 626 return cur;
ysr@345 627 } else if (!g1h->is_obj_dead(obj)) {
ysr@345 628 cl->do_object(obj);
ysr@345 629 }
ysr@345 630 if (cl->abort()) return cur;
ysr@345 631 // The check above must occur before the operation below, since an
ysr@345 632 // abort might invalidate the "size" operation.
ysr@345 633 cur += obj->size();
ysr@345 634 }
ysr@345 635 return NULL;
ysr@345 636 }
ysr@345 637
ysr@345 638 HeapWord*
ysr@345 639 HeapRegion::
ysr@345 640 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@1685 641 FilterOutOfRegionClosure* cl,
tonyp@2558 642 bool filter_young,
tonyp@2558 643 jbyte* card_ptr) {
tonyp@2558 644 // Currently, we should only have to clean the card if filter_young
tonyp@2558 645 // is true and vice versa.
tonyp@2558 646 if (filter_young) {
tonyp@2558 647 assert(card_ptr != NULL, "pre-condition");
tonyp@2558 648 } else {
tonyp@2558 649 assert(card_ptr == NULL, "pre-condition");
tonyp@2558 650 }
ysr@345 651 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 652
ysr@345 653 // If we're within a stop-world GC, then we might look at a card in a
ysr@345 654 // GC alloc region that extends onto a GC LAB, which may not be
ysr@345 655 // parseable. Stop such at the "saved_mark" of the region.
ysr@345 656 if (G1CollectedHeap::heap()->is_gc_active()) {
ysr@345 657 mr = mr.intersection(used_region_at_save_marks());
ysr@345 658 } else {
ysr@345 659 mr = mr.intersection(used_region());
ysr@345 660 }
ysr@345 661 if (mr.is_empty()) return NULL;
ysr@345 662 // Otherwise, find the obj that extends onto mr.start().
ysr@345 663
johnc@1685 664 // The intersection of the incoming mr (for the card) and the
johnc@1685 665 // allocated part of the region is non-empty. This implies that
johnc@1685 666 // we have actually allocated into this region. The code in
johnc@1685 667 // G1CollectedHeap.cpp that allocates a new region sets the
johnc@1685 668 // is_young tag on the region before allocating. Thus we
johnc@1685 669 // safely know if this region is young.
johnc@1685 670 if (is_young() && filter_young) {
johnc@1685 671 return NULL;
johnc@1685 672 }
johnc@1685 673
johnc@1727 674 assert(!is_young(), "check value of filter_young");
johnc@1727 675
tonyp@2558 676 // We can only clean the card here, after we make the decision that
tonyp@2558 677 // the card is not young. And we only clean the card if we have been
tonyp@2558 678 // asked to (i.e., card_ptr != NULL).
tonyp@2558 679 if (card_ptr != NULL) {
tonyp@2558 680 *card_ptr = CardTableModRefBS::clean_card_val();
tonyp@2558 681 // We must complete this write before we do any of the reads below.
tonyp@2558 682 OrderAccess::storeload();
tonyp@2558 683 }
tonyp@2558 684
ysr@345 685 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 686 // to update the BOT while we do this...
ysr@345 687 HeapWord* cur = block_start(mr.start());
ysr@345 688 assert(cur <= mr.start(), "Postcondition");
ysr@345 689
ysr@345 690 while (cur <= mr.start()) {
ysr@896 691 if (oop(cur)->klass_or_null() == NULL) {
ysr@345 692 // Ran into an unparseable point.
ysr@345 693 return cur;
ysr@345 694 }
ysr@345 695 // Otherwise...
ysr@345 696 int sz = oop(cur)->size();
ysr@345 697 if (cur + sz > mr.start()) break;
ysr@345 698 // Otherwise, go on.
ysr@345 699 cur = cur + sz;
ysr@345 700 }
ysr@345 701 oop obj;
ysr@345 702 obj = oop(cur);
ysr@345 703 // If we finish this loop...
ysr@345 704 assert(cur <= mr.start()
ysr@896 705 && obj->klass_or_null() != NULL
ysr@345 706 && cur + obj->size() > mr.start(),
ysr@345 707 "Loop postcondition");
ysr@345 708 if (!g1h->is_obj_dead(obj)) {
ysr@345 709 obj->oop_iterate(cl, mr);
ysr@345 710 }
ysr@345 711
ysr@345 712 HeapWord* next;
ysr@345 713 while (cur < mr.end()) {
ysr@345 714 obj = oop(cur);
ysr@896 715 if (obj->klass_or_null() == NULL) {
ysr@345 716 // Ran into an unparseable point.
ysr@345 717 return cur;
ysr@345 718 };
ysr@345 719 // Otherwise:
ysr@345 720 next = (cur + obj->size());
ysr@345 721 if (!g1h->is_obj_dead(obj)) {
ysr@345 722 if (next < mr.end()) {
ysr@345 723 obj->oop_iterate(cl);
ysr@345 724 } else {
ysr@345 725 // this obj spans the boundary. If it's an array, stop at the
ysr@345 726 // boundary.
ysr@345 727 if (obj->is_objArray()) {
ysr@345 728 obj->oop_iterate(cl, mr);
ysr@345 729 } else {
ysr@345 730 obj->oop_iterate(cl);
ysr@345 731 }
ysr@345 732 }
ysr@345 733 }
ysr@345 734 cur = next;
ysr@345 735 }
ysr@345 736 return NULL;
ysr@345 737 }
ysr@345 738
ysr@345 739 void HeapRegion::print() const { print_on(gclog_or_tty); }
ysr@345 740 void HeapRegion::print_on(outputStream* st) const {
ysr@345 741 if (isHumongous()) {
ysr@345 742 if (startsHumongous())
ysr@345 743 st->print(" HS");
ysr@345 744 else
ysr@345 745 st->print(" HC");
ysr@345 746 } else {
ysr@345 747 st->print(" ");
ysr@345 748 }
ysr@345 749 if (in_collection_set())
ysr@345 750 st->print(" CS");
ysr@345 751 else
ysr@345 752 st->print(" ");
ysr@345 753 if (is_young())
johnc@1483 754 st->print(is_survivor() ? " SU" : " Y ");
ysr@345 755 else
ysr@345 756 st->print(" ");
ysr@345 757 if (is_empty())
ysr@345 758 st->print(" F");
ysr@345 759 else
ysr@345 760 st->print(" ");
tonyp@1079 761 st->print(" %5d", _gc_time_stamp);
tonyp@1477 762 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
tonyp@1477 763 prev_top_at_mark_start(), next_top_at_mark_start());
ysr@345 764 G1OffsetTableContigSpace::print_on(st);
ysr@345 765 }
ysr@345 766
tonyp@860 767 void HeapRegion::verify(bool allow_dirty) const {
tonyp@1079 768 bool dummy = false;
johnc@2767 769 verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
tonyp@860 770 }
tonyp@860 771
ysr@345 772 // This really ought to be commoned up into OffsetTableContigSpace somehow.
ysr@345 773 // We would need a mechanism to make that code skip dead objects.
ysr@345 774
tonyp@1079 775 void HeapRegion::verify(bool allow_dirty,
johnc@2767 776 VerifyOption vo,
tonyp@1079 777 bool* failures) const {
ysr@345 778 G1CollectedHeap* g1 = G1CollectedHeap::heap();
tonyp@1079 779 *failures = false;
ysr@345 780 HeapWord* p = bottom();
ysr@345 781 HeapWord* prev_p = NULL;
johnc@2767 782 VerifyLiveClosure vl_cl(g1, vo);
tonyp@1740 783 bool is_humongous = isHumongous();
tonyp@2146 784 bool do_bot_verify = !is_young();
tonyp@1740 785 size_t object_num = 0;
ysr@345 786 while (p < top()) {
tonyp@2146 787 oop obj = oop(p);
tonyp@2146 788 size_t obj_size = obj->size();
tonyp@2146 789 object_num += 1;
tonyp@2146 790
tonyp@2146 791 if (is_humongous != g1->isHumongous(obj_size)) {
tonyp@1740 792 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
tonyp@1740 793 SIZE_FORMAT" words) in a %shumongous region",
tonyp@2146 794 p, g1->isHumongous(obj_size) ? "" : "non-",
tonyp@2146 795 obj_size, is_humongous ? "" : "non-");
tonyp@1740 796 *failures = true;
tonyp@2146 797 return;
tonyp@1740 798 }
tonyp@2146 799
tonyp@2146 800 // If it returns false, verify_for_object() will output the
tonyp@2146 801 // appropriate messasge.
tonyp@2146 802 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
tonyp@2146 803 *failures = true;
tonyp@2146 804 return;
tonyp@2146 805 }
tonyp@2146 806
johnc@2767 807 if (!g1->is_obj_dead_cond(obj, this, vo)) {
tonyp@2146 808 if (obj->is_oop()) {
tonyp@2146 809 klassOop klass = obj->klass();
tonyp@2146 810 if (!klass->is_perm()) {
tonyp@2146 811 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@2146 812 "not in perm", klass, obj);
tonyp@2146 813 *failures = true;
tonyp@2146 814 return;
tonyp@2146 815 } else if (!klass->is_klass()) {
tonyp@2146 816 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@2146 817 "not a klass", klass, obj);
tonyp@2146 818 *failures = true;
tonyp@2146 819 return;
tonyp@2146 820 } else {
tonyp@2146 821 vl_cl.set_containing_obj(obj);
tonyp@2146 822 obj->oop_iterate(&vl_cl);
tonyp@2146 823 if (vl_cl.failures()) {
tonyp@2146 824 *failures = true;
tonyp@2146 825 }
tonyp@2146 826 if (G1MaxVerifyFailures >= 0 &&
tonyp@2146 827 vl_cl.n_failures() >= G1MaxVerifyFailures) {
tonyp@2146 828 return;
tonyp@2146 829 }
tonyp@2146 830 }
tonyp@2146 831 } else {
tonyp@2146 832 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
tonyp@1079 833 *failures = true;
tonyp@1079 834 return;
tonyp@1079 835 }
ysr@345 836 }
ysr@345 837 prev_p = p;
tonyp@2146 838 p += obj_size;
ysr@345 839 }
tonyp@2146 840
tonyp@2146 841 if (p != top()) {
tonyp@2146 842 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
tonyp@2146 843 "does not match top "PTR_FORMAT, p, top());
tonyp@2146 844 *failures = true;
tonyp@2146 845 return;
tonyp@2146 846 }
tonyp@2146 847
tonyp@2146 848 HeapWord* the_end = end();
tonyp@2146 849 assert(p == top(), "it should still hold");
tonyp@2146 850 // Do some extra BOT consistency checking for addresses in the
tonyp@2146 851 // range [top, end). BOT look-ups in this range should yield
tonyp@2146 852 // top. No point in doing that if top == end (there's nothing there).
tonyp@2146 853 if (p < the_end) {
tonyp@2146 854 // Look up top
tonyp@2146 855 HeapWord* addr_1 = p;
tonyp@2146 856 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
tonyp@2146 857 if (b_start_1 != p) {
tonyp@2146 858 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
tonyp@2146 859 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 860 addr_1, b_start_1, p);
tonyp@2146 861 *failures = true;
tonyp@2146 862 return;
tonyp@2146 863 }
tonyp@2146 864
tonyp@2146 865 // Look up top + 1
tonyp@2146 866 HeapWord* addr_2 = p + 1;
tonyp@2146 867 if (addr_2 < the_end) {
tonyp@2146 868 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
tonyp@2146 869 if (b_start_2 != p) {
tonyp@2146 870 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
tonyp@2146 871 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 872 addr_2, b_start_2, p);
tonyp@1079 873 *failures = true;
tonyp@1079 874 return;
tonyp@2146 875 }
tonyp@2146 876 }
tonyp@2146 877
tonyp@2146 878 // Look up an address between top and end
tonyp@2146 879 size_t diff = pointer_delta(the_end, p) / 2;
tonyp@2146 880 HeapWord* addr_3 = p + diff;
tonyp@2146 881 if (addr_3 < the_end) {
tonyp@2146 882 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
tonyp@2146 883 if (b_start_3 != p) {
tonyp@2146 884 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
tonyp@2146 885 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 886 addr_3, b_start_3, p);
tonyp@2146 887 *failures = true;
tonyp@2146 888 return;
tonyp@2146 889 }
tonyp@2146 890 }
tonyp@2146 891
tonyp@2146 892 // Loook up end - 1
tonyp@2146 893 HeapWord* addr_4 = the_end - 1;
tonyp@2146 894 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
tonyp@2146 895 if (b_start_4 != p) {
tonyp@2146 896 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
tonyp@2146 897 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 898 addr_4, b_start_4, p);
tonyp@2146 899 *failures = true;
tonyp@2146 900 return;
tonyp@1079 901 }
ysr@345 902 }
tonyp@1079 903
tonyp@1740 904 if (is_humongous && object_num > 1) {
tonyp@1740 905 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
tonyp@1740 906 "but has "SIZE_FORMAT", objects",
tonyp@1740 907 bottom(), end(), object_num);
tonyp@1740 908 *failures = true;
tonyp@1079 909 return;
ysr@345 910 }
ysr@345 911 }
ysr@345 912
ysr@345 913 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
ysr@345 914 // away eventually.
ysr@345 915
tonyp@359 916 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
ysr@345 917 // false ==> we'll do the clearing if there's clearing to be done.
tonyp@359 918 ContiguousSpace::initialize(mr, false, mangle_space);
ysr@345 919 _offsets.zero_bottom_entry();
ysr@345 920 _offsets.initialize_threshold();
tonyp@359 921 if (clear_space) clear(mangle_space);
ysr@345 922 }
ysr@345 923
tonyp@359 924 void G1OffsetTableContigSpace::clear(bool mangle_space) {
tonyp@359 925 ContiguousSpace::clear(mangle_space);
ysr@345 926 _offsets.zero_bottom_entry();
ysr@345 927 _offsets.initialize_threshold();
ysr@345 928 }
ysr@345 929
ysr@345 930 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
ysr@345 931 Space::set_bottom(new_bottom);
ysr@345 932 _offsets.set_bottom(new_bottom);
ysr@345 933 }
ysr@345 934
ysr@345 935 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
ysr@345 936 Space::set_end(new_end);
ysr@345 937 _offsets.resize(new_end - bottom());
ysr@345 938 }
ysr@345 939
ysr@345 940 void G1OffsetTableContigSpace::print() const {
ysr@345 941 print_short();
ysr@345 942 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
ysr@345 943 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
ysr@345 944 bottom(), top(), _offsets.threshold(), end());
ysr@345 945 }
ysr@345 946
ysr@345 947 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
ysr@345 948 return _offsets.initialize_threshold();
ysr@345 949 }
ysr@345 950
ysr@345 951 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
ysr@345 952 HeapWord* end) {
ysr@345 953 _offsets.alloc_block(start, end);
ysr@345 954 return _offsets.threshold();
ysr@345 955 }
ysr@345 956
ysr@345 957 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
ysr@345 958 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 959 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
ysr@345 960 if (_gc_time_stamp < g1h->get_gc_time_stamp())
ysr@345 961 return top();
ysr@345 962 else
ysr@345 963 return ContiguousSpace::saved_mark_word();
ysr@345 964 }
ysr@345 965
ysr@345 966 void G1OffsetTableContigSpace::set_saved_mark() {
ysr@345 967 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 968 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
ysr@345 969
ysr@345 970 if (_gc_time_stamp < curr_gc_time_stamp) {
ysr@345 971 // The order of these is important, as another thread might be
ysr@345 972 // about to start scanning this region. If it does so after
ysr@345 973 // set_saved_mark and before _gc_time_stamp = ..., then the latter
ysr@345 974 // will be false, and it will pick up top() as the high water mark
ysr@345 975 // of region. If it does so after _gc_time_stamp = ..., then it
ysr@345 976 // will pick up the right saved_mark_word() as the high water mark
ysr@345 977 // of the region. Either way, the behaviour will be correct.
ysr@345 978 ContiguousSpace::set_saved_mark();
ysr@896 979 OrderAccess::storestore();
iveresov@356 980 _gc_time_stamp = curr_gc_time_stamp;
tonyp@2417 981 // No need to do another barrier to flush the writes above. If
tonyp@2417 982 // this is called in parallel with other threads trying to
tonyp@2417 983 // allocate into the region, the caller should call this while
tonyp@2417 984 // holding a lock and when the lock is released the writes will be
tonyp@2417 985 // flushed.
ysr@345 986 }
ysr@345 987 }
ysr@345 988
ysr@345 989 G1OffsetTableContigSpace::
ysr@345 990 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@345 991 MemRegion mr, bool is_zeroed) :
ysr@345 992 _offsets(sharedOffsetArray, mr),
ysr@345 993 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
ysr@345 994 _gc_time_stamp(0)
ysr@345 995 {
ysr@345 996 _offsets.set_space(this);
tonyp@359 997 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
ysr@345 998 }