annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 4233:7383557659bd

7185699: G1: Prediction model discrepancies Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl. Reviewed-by: azeemj, brutisso
author johnc
date Tue, 21 Aug 2012 14:10:39 -0700
parents a2f7274eb6ef
children 7afe50dc6b9f
rev   line source
ysr@345 1 /*
tonyp@3280 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@345 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@345 4 *
ysr@345 5 * This code is free software; you can redistribute it and/or modify it
ysr@345 6 * under the terms of the GNU General Public License version 2 only, as
ysr@345 7 * published by the Free Software Foundation.
ysr@345 8 *
ysr@345 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@345 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@345 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@345 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@345 13 * accompanied this code).
ysr@345 14 *
ysr@345 15 * You should have received a copy of the GNU General Public License version
ysr@345 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@345 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@345 18 *
trims@1563 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1563 20 * or visit www.oracle.com if you need additional information or have any
trims@1563 21 * questions.
ysr@345 22 *
ysr@345 23 */
ysr@345 24
stefank@1992 25 #include "precompiled.hpp"
stefank@1992 26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@1992 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@1992 28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@1992 29 #include "gc_implementation/g1/heapRegion.inline.hpp"
stefank@1992 30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@1992 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@1992 32 #include "memory/genOopClosures.inline.hpp"
stefank@1992 33 #include "memory/iterator.hpp"
stefank@1992 34 #include "oops/oop.inline.hpp"
ysr@345 35
johnc@3035 36 int HeapRegion::LogOfHRGrainBytes = 0;
johnc@3035 37 int HeapRegion::LogOfHRGrainWords = 0;
johnc@3035 38 size_t HeapRegion::GrainBytes = 0;
johnc@3035 39 size_t HeapRegion::GrainWords = 0;
johnc@3035 40 size_t HeapRegion::CardsPerRegion = 0;
tonyp@996 41
ysr@345 42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@345 43 HeapRegion* hr, OopClosure* cl,
ysr@345 44 CardTableModRefBS::PrecisionStyle precision,
ysr@345 45 FilterKind fk) :
ysr@345 46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
tonyp@4192 47 _hr(hr), _fk(fk), _g1(g1) { }
ysr@345 48
ysr@345 49 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
ysr@345 50 OopClosure* oc) :
tonyp@4192 51 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
ysr@345 52
ysr@345 53 class VerifyLiveClosure: public OopClosure {
tonyp@860 54 private:
ysr@345 55 G1CollectedHeap* _g1h;
ysr@345 56 CardTableModRefBS* _bs;
ysr@345 57 oop _containing_obj;
ysr@345 58 bool _failures;
ysr@345 59 int _n_failures;
johnc@2767 60 VerifyOption _vo;
ysr@345 61 public:
johnc@2767 62 // _vo == UsePrevMarking -> use "prev" marking information,
johnc@2767 63 // _vo == UseNextMarking -> use "next" marking information,
johnc@2767 64 // _vo == UseMarkWord -> use mark word from object header.
johnc@2767 65 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
ysr@345 66 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
johnc@2767 67 _failures(false), _n_failures(0), _vo(vo)
ysr@345 68 {
ysr@345 69 BarrierSet* bs = _g1h->barrier_set();
ysr@345 70 if (bs->is_a(BarrierSet::CardTableModRef))
ysr@345 71 _bs = (CardTableModRefBS*)bs;
ysr@345 72 }
ysr@345 73
ysr@345 74 void set_containing_obj(oop obj) {
ysr@345 75 _containing_obj = obj;
ysr@345 76 }
ysr@345 77
ysr@345 78 bool failures() { return _failures; }
ysr@345 79 int n_failures() { return _n_failures; }
ysr@345 80
ysr@896 81 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@896 82 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@345 83
tonyp@1477 84 void print_object(outputStream* out, oop obj) {
tonyp@1477 85 #ifdef PRODUCT
tonyp@1477 86 klassOop k = obj->klass();
tonyp@1477 87 const char* class_name = instanceKlass::cast(k)->external_name();
tonyp@1477 88 out->print_cr("class name %s", class_name);
tonyp@1477 89 #else // PRODUCT
tonyp@1477 90 obj->print_on(out);
tonyp@1477 91 #endif // PRODUCT
tonyp@1477 92 }
tonyp@1477 93
tonyp@3218 94 template <class T>
tonyp@3218 95 void do_oop_work(T* p) {
ysr@345 96 assert(_containing_obj != NULL, "Precondition");
johnc@2767 97 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
tonyp@860 98 "Precondition");
ysr@896 99 T heap_oop = oopDesc::load_heap_oop(p);
ysr@896 100 if (!oopDesc::is_null(heap_oop)) {
ysr@896 101 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@345 102 bool failed = false;
tonyp@3218 103 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
tonyp@3218 104 MutexLockerEx x(ParGCRareEvent_lock,
tonyp@3218 105 Mutex::_no_safepoint_check_flag);
tonyp@3218 106
ysr@345 107 if (!_failures) {
ysr@345 108 gclog_or_tty->print_cr("");
ysr@345 109 gclog_or_tty->print_cr("----------");
ysr@345 110 }
ysr@345 111 if (!_g1h->is_in_closed_subset(obj)) {
tonyp@1477 112 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@345 113 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 114 " of live obj "PTR_FORMAT" in region "
tonyp@1477 115 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 116 p, (void*) _containing_obj,
tonyp@1477 117 from->bottom(), from->end());
tonyp@1477 118 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 119 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
tonyp@1477 120 (void*) obj);
ysr@345 121 } else {
tonyp@1477 122 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
tonyp@1477 123 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
ysr@345 124 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1477 125 " of live obj "PTR_FORMAT" in region "
tonyp@1477 126 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 127 p, (void*) _containing_obj,
tonyp@1477 128 from->bottom(), from->end());
tonyp@1477 129 print_object(gclog_or_tty, _containing_obj);
tonyp@1477 130 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
tonyp@1477 131 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1477 132 (void*) obj, to->bottom(), to->end());
tonyp@1477 133 print_object(gclog_or_tty, obj);
ysr@345 134 }
ysr@345 135 gclog_or_tty->print_cr("----------");
tonyp@3218 136 gclog_or_tty->flush();
ysr@345 137 _failures = true;
ysr@345 138 failed = true;
ysr@345 139 _n_failures++;
ysr@345 140 }
ysr@345 141
ysr@345 142 if (!_g1h->full_collection()) {
ysr@896 143 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@896 144 HeapRegion* to = _g1h->heap_region_containing(obj);
ysr@345 145 if (from != NULL && to != NULL &&
ysr@345 146 from != to &&
ysr@345 147 !to->isHumongous()) {
ysr@345 148 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
ysr@345 149 jbyte cv_field = *_bs->byte_for_const(p);
ysr@345 150 const jbyte dirty = CardTableModRefBS::dirty_card_val();
ysr@345 151
ysr@345 152 bool is_bad = !(from->is_young()
ysr@345 153 || to->rem_set()->contains_reference(p)
ysr@345 154 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
ysr@345 155 (_containing_obj->is_objArray() ?
ysr@345 156 cv_field == dirty
ysr@345 157 : cv_obj == dirty || cv_field == dirty));
ysr@345 158 if (is_bad) {
tonyp@3218 159 MutexLockerEx x(ParGCRareEvent_lock,
tonyp@3218 160 Mutex::_no_safepoint_check_flag);
tonyp@3218 161
ysr@345 162 if (!_failures) {
ysr@345 163 gclog_or_tty->print_cr("");
ysr@345 164 gclog_or_tty->print_cr("----------");
ysr@345 165 }
ysr@345 166 gclog_or_tty->print_cr("Missing rem set entry:");
tonyp@2761 167 gclog_or_tty->print_cr("Field "PTR_FORMAT" "
tonyp@2761 168 "of obj "PTR_FORMAT", "
tonyp@2761 169 "in region "HR_FORMAT,
tonyp@2761 170 p, (void*) _containing_obj,
tonyp@2761 171 HR_FORMAT_PARAMS(from));
ysr@345 172 _containing_obj->print_on(gclog_or_tty);
tonyp@2761 173 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
tonyp@2761 174 "in region "HR_FORMAT,
tonyp@2761 175 (void*) obj,
tonyp@2761 176 HR_FORMAT_PARAMS(to));
ysr@345 177 obj->print_on(gclog_or_tty);
ysr@345 178 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
ysr@345 179 cv_obj, cv_field);
ysr@345 180 gclog_or_tty->print_cr("----------");
tonyp@3218 181 gclog_or_tty->flush();
ysr@345 182 _failures = true;
ysr@345 183 if (!failed) _n_failures++;
ysr@345 184 }
ysr@345 185 }
ysr@345 186 }
ysr@345 187 }
ysr@345 188 }
ysr@345 189 };
ysr@345 190
ysr@345 191 template<class ClosureType>
ysr@345 192 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
ysr@345 193 HeapRegion* hr,
ysr@345 194 HeapWord* cur, HeapWord* top) {
ysr@345 195 oop cur_oop = oop(cur);
ysr@345 196 int oop_size = cur_oop->size();
ysr@345 197 HeapWord* next_obj = cur + oop_size;
ysr@345 198 while (next_obj < top) {
ysr@345 199 // Keep filtering the remembered set.
ysr@345 200 if (!g1h->is_obj_dead(cur_oop, hr)) {
ysr@345 201 // Bottom lies entirely below top, so we can call the
ysr@345 202 // non-memRegion version of oop_iterate below.
ysr@345 203 cur_oop->oop_iterate(cl);
ysr@345 204 }
ysr@345 205 cur = next_obj;
ysr@345 206 cur_oop = oop(cur);
ysr@345 207 oop_size = cur_oop->size();
ysr@345 208 next_obj = cur + oop_size;
ysr@345 209 }
ysr@345 210 return cur;
ysr@345 211 }
ysr@345 212
ysr@345 213 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
ysr@345 214 HeapWord* bottom,
ysr@345 215 HeapWord* top,
ysr@345 216 OopClosure* cl) {
ysr@345 217 G1CollectedHeap* g1h = _g1;
johnc@2970 218 int oop_size;
johnc@2970 219 OopClosure* cl2 = NULL;
ysr@345 220
johnc@2970 221 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
ysr@345 222 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
johnc@2967 223
ysr@345 224 switch (_fk) {
johnc@2970 225 case NoFilterKind: cl2 = cl; break;
ysr@345 226 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
ysr@345 227 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
johnc@2970 228 default: ShouldNotReachHere();
ysr@345 229 }
ysr@345 230
ysr@345 231 // Start filtering what we add to the remembered set. If the object is
ysr@345 232 // not considered dead, either because it is marked (in the mark bitmap)
ysr@345 233 // or it was allocated after marking finished, then we add it. Otherwise
ysr@345 234 // we can safely ignore the object.
ysr@345 235 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 236 oop_size = oop(bottom)->oop_iterate(cl2, mr);
ysr@345 237 } else {
ysr@345 238 oop_size = oop(bottom)->size();
ysr@345 239 }
ysr@345 240
ysr@345 241 bottom += oop_size;
ysr@345 242
ysr@345 243 if (bottom < top) {
ysr@345 244 // We replicate the loop below for several kinds of possible filters.
ysr@345 245 switch (_fk) {
ysr@345 246 case NoFilterKind:
ysr@345 247 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
ysr@345 248 break;
johnc@2967 249
ysr@345 250 case IntoCSFilterKind: {
johnc@2970 251 FilterIntoCSClosure filt(this, g1h, cl);
ysr@345 252 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 253 break;
ysr@345 254 }
johnc@2967 255
ysr@345 256 case OutOfRegionFilterKind: {
ysr@345 257 FilterOutOfRegionClosure filt(_hr, cl);
ysr@345 258 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@345 259 break;
ysr@345 260 }
johnc@2967 261
ysr@345 262 default:
ysr@345 263 ShouldNotReachHere();
ysr@345 264 }
ysr@345 265
ysr@345 266 // Last object. Need to do dead-obj filtering here too.
ysr@345 267 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@345 268 oop(bottom)->oop_iterate(cl2, mr);
ysr@345 269 }
ysr@345 270 }
ysr@345 271 }
ysr@345 272
tonyp@996 273 // Minimum region size; we won't go lower than that.
tonyp@996 274 // We might want to decrease this in the future, to deal with small
tonyp@996 275 // heaps a bit more efficiently.
tonyp@996 276 #define MIN_REGION_SIZE ( 1024 * 1024 )
tonyp@996 277
tonyp@996 278 // Maximum region size; we don't go higher than that. There's a good
tonyp@996 279 // reason for having an upper bound. We don't want regions to get too
tonyp@996 280 // large, otherwise cleanup's effectiveness would decrease as there
tonyp@996 281 // will be fewer opportunities to find totally empty regions after
tonyp@996 282 // marking.
tonyp@996 283 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
tonyp@996 284
tonyp@996 285 // The automatic region size calculation will try to have around this
tonyp@996 286 // many regions in the heap (based on the min heap size).
tonyp@996 287 #define TARGET_REGION_NUMBER 2048
tonyp@996 288
tonyp@996 289 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
tonyp@996 290 // region_size in bytes
tonyp@996 291 uintx region_size = G1HeapRegionSize;
tonyp@996 292 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
tonyp@996 293 // We base the automatic calculation on the min heap size. This
tonyp@996 294 // can be problematic if the spread between min and max is quite
tonyp@996 295 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
tonyp@996 296 // the max size, the region size might be way too large for the
tonyp@996 297 // min size. Either way, some users might have to set the region
tonyp@996 298 // size manually for some -Xms / -Xmx combos.
tonyp@996 299
tonyp@996 300 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
tonyp@996 301 (uintx) MIN_REGION_SIZE);
tonyp@996 302 }
tonyp@996 303
tonyp@996 304 int region_size_log = log2_long((jlong) region_size);
tonyp@996 305 // Recalculate the region size to make sure it's a power of
tonyp@996 306 // 2. This means that region_size is the largest power of 2 that's
tonyp@996 307 // <= what we've calculated so far.
prr@1496 308 region_size = ((uintx)1 << region_size_log);
tonyp@996 309
tonyp@996 310 // Now make sure that we don't go over or under our limits.
tonyp@996 311 if (region_size < MIN_REGION_SIZE) {
tonyp@996 312 region_size = MIN_REGION_SIZE;
tonyp@996 313 } else if (region_size > MAX_REGION_SIZE) {
tonyp@996 314 region_size = MAX_REGION_SIZE;
tonyp@996 315 }
tonyp@996 316
tonyp@996 317 // And recalculate the log.
tonyp@996 318 region_size_log = log2_long((jlong) region_size);
tonyp@996 319
tonyp@996 320 // Now, set up the globals.
tonyp@996 321 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
tonyp@996 322 LogOfHRGrainBytes = region_size_log;
tonyp@996 323
tonyp@996 324 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
tonyp@996 325 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
tonyp@996 326
tonyp@996 327 guarantee(GrainBytes == 0, "we should only set it once");
tonyp@996 328 // The cast to int is safe, given that we've bounded region_size by
tonyp@996 329 // MIN_REGION_SIZE and MAX_REGION_SIZE.
johnc@3035 330 GrainBytes = (size_t)region_size;
tonyp@996 331
tonyp@996 332 guarantee(GrainWords == 0, "we should only set it once");
tonyp@996 333 GrainWords = GrainBytes >> LogHeapWordSize;
tonyp@3953 334 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
tonyp@996 335
tonyp@996 336 guarantee(CardsPerRegion == 0, "we should only set it once");
tonyp@996 337 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
tonyp@996 338 }
tonyp@996 339
ysr@345 340 void HeapRegion::reset_after_compaction() {
ysr@345 341 G1OffsetTableContigSpace::reset_after_compaction();
ysr@345 342 // After a compaction the mark bitmap is invalid, so we must
ysr@345 343 // treat all objects as being inside the unmarked area.
ysr@345 344 zero_marked_bytes();
ysr@345 345 init_top_at_mark_start();
ysr@345 346 }
ysr@345 347
ysr@345 348 void HeapRegion::hr_clear(bool par, bool clear_space) {
tonyp@2165 349 assert(_humongous_type == NotHumongous,
tonyp@2165 350 "we should have already filtered out humongous regions");
tonyp@2165 351 assert(_humongous_start_region == NULL,
tonyp@2165 352 "we should have already filtered out humongous regions");
tonyp@2165 353 assert(_end == _orig_end,
tonyp@2165 354 "we should have already filtered out humongous regions");
tonyp@2165 355
ysr@345 356 _in_collection_set = false;
ysr@345 357
ysr@345 358 set_young_index_in_cset(-1);
ysr@345 359 uninstall_surv_rate_group();
ysr@345 360 set_young_type(NotYoung);
tonyp@2417 361 reset_pre_dummy_top();
ysr@345 362
ysr@345 363 if (!par) {
ysr@345 364 // If this is parallel, this will be done later.
ysr@345 365 HeapRegionRemSet* hrrs = rem_set();
ysr@345 366 if (hrrs != NULL) hrrs->clear();
tonyp@358 367 _claimed = InitialClaimValue;
ysr@345 368 }
ysr@345 369 zero_marked_bytes();
ysr@345 370
ysr@345 371 _offsets.resize(HeapRegion::GrainWords);
ysr@345 372 init_top_at_mark_start();
tonyp@359 373 if (clear_space) clear(SpaceDecorator::Mangle);
ysr@345 374 }
ysr@345 375
tonyp@2558 376 void HeapRegion::par_clear() {
tonyp@2558 377 assert(used() == 0, "the region should have been already cleared");
johnc@3035 378 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
tonyp@2558 379 HeapRegionRemSet* hrrs = rem_set();
tonyp@2558 380 hrrs->clear();
tonyp@2558 381 CardTableModRefBS* ct_bs =
tonyp@2558 382 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
tonyp@2558 383 ct_bs->clear(MemRegion(bottom(), end()));
tonyp@2558 384 }
tonyp@2558 385
ysr@345 386 void HeapRegion::calc_gc_efficiency() {
johnc@4233 387 // GC efficiency is the ratio of how much space would be
johnc@4233 388 // reclaimed over how long we predict it would take to reclaim it.
ysr@345 389 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@3414 390 G1CollectorPolicy* g1p = g1h->g1_policy();
johnc@4233 391
johnc@4233 392 // Retrieve a prediction of the elapsed time for this region for
johnc@4233 393 // a mixed gc because the region will only be evacuated during a
johnc@4233 394 // mixed gc.
johnc@4233 395 double region_elapsed_time_ms =
johnc@4233 396 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
johnc@4233 397 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
ysr@345 398 }
ysr@345 399
tonyp@2146 400 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
tonyp@2165 401 assert(!isHumongous(), "sanity / pre-condition");
tonyp@1911 402 assert(end() == _orig_end,
tonyp@1911 403 "Should be normal before the humongous object allocation");
tonyp@1911 404 assert(top() == bottom(), "should be empty");
tonyp@2146 405 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
tonyp@1911 406
tonyp@358 407 _humongous_type = StartsHumongous;
ysr@345 408 _humongous_start_region = this;
tonyp@1911 409
tonyp@1911 410 set_end(new_end);
tonyp@2146 411 _offsets.set_for_starts_humongous(new_top);
tonyp@1911 412 }
tonyp@1911 413
tonyp@2146 414 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
tonyp@2165 415 assert(!isHumongous(), "sanity / pre-condition");
tonyp@1911 416 assert(end() == _orig_end,
tonyp@1911 417 "Should be normal before the humongous object allocation");
tonyp@1911 418 assert(top() == bottom(), "should be empty");
tonyp@2146 419 assert(first_hr->startsHumongous(), "pre-condition");
tonyp@1911 420
tonyp@1911 421 _humongous_type = ContinuesHumongous;
tonyp@2146 422 _humongous_start_region = first_hr;
ysr@345 423 }
ysr@345 424
tonyp@2165 425 void HeapRegion::set_notHumongous() {
tonyp@2165 426 assert(isHumongous(), "pre-condition");
tonyp@2165 427
tonyp@2165 428 if (startsHumongous()) {
tonyp@2165 429 assert(top() <= end(), "pre-condition");
tonyp@2165 430 set_end(_orig_end);
tonyp@2165 431 if (top() > end()) {
tonyp@2165 432 // at least one "continues humongous" region after it
tonyp@2165 433 set_top(end());
tonyp@2165 434 }
tonyp@2165 435 } else {
tonyp@2165 436 // continues humongous
tonyp@2165 437 assert(end() == _orig_end, "sanity");
tonyp@2165 438 }
tonyp@2165 439
johnc@3035 440 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
tonyp@2165 441 _humongous_type = NotHumongous;
tonyp@2165 442 _humongous_start_region = NULL;
tonyp@2165 443 }
tonyp@2165 444
ysr@345 445 bool HeapRegion::claimHeapRegion(jint claimValue) {
ysr@345 446 jint current = _claimed;
ysr@345 447 if (current != claimValue) {
ysr@345 448 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
ysr@345 449 if (res == current) {
ysr@345 450 return true;
ysr@345 451 }
ysr@345 452 }
ysr@345 453 return false;
ysr@345 454 }
ysr@345 455
ysr@345 456 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
ysr@345 457 HeapWord* low = addr;
ysr@345 458 HeapWord* high = end();
ysr@345 459 while (low < high) {
ysr@345 460 size_t diff = pointer_delta(high, low);
ysr@345 461 // Must add one below to bias toward the high amount. Otherwise, if
ysr@345 462 // "high" were at the desired value, and "low" were one less, we
ysr@345 463 // would not converge on "high". This is not symmetric, because
ysr@345 464 // we set "high" to a block start, which might be the right one,
ysr@345 465 // which we don't do for "low".
ysr@345 466 HeapWord* middle = low + (diff+1)/2;
ysr@345 467 if (middle == high) return high;
ysr@345 468 HeapWord* mid_bs = block_start_careful(middle);
ysr@345 469 if (mid_bs < addr) {
ysr@345 470 low = middle;
ysr@345 471 } else {
ysr@345 472 high = mid_bs;
ysr@345 473 }
ysr@345 474 }
ysr@345 475 assert(low == high && low >= addr, "Didn't work.");
ysr@345 476 return low;
ysr@345 477 }
ysr@345 478
tonyp@359 479 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
tonyp@359 480 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
ysr@345 481 hr_clear(false/*par*/, clear_space);
ysr@345 482 }
ysr@345 483 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@345 484 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@345 485 #endif // _MSC_VER
ysr@345 486
ysr@345 487
tonyp@3953 488 HeapRegion::HeapRegion(uint hrs_index,
tonyp@3953 489 G1BlockOffsetSharedArray* sharedOffsetArray,
tonyp@3953 490 MemRegion mr, bool is_zeroed) :
tonyp@3953 491 G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
johnc@2967 492 _hrs_index(hrs_index),
tonyp@358 493 _humongous_type(NotHumongous), _humongous_start_region(NULL),
tonyp@2825 494 _in_collection_set(false),
ysr@345 495 _next_in_special_set(NULL), _orig_end(NULL),
tonyp@358 496 _claimed(InitialClaimValue), _evacuation_failed(false),
tonyp@3954 497 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
ysr@345 498 _young_type(NotYoung), _next_young_region(NULL),
tonyp@2165 499 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
tonyp@2165 500 #ifdef ASSERT
tonyp@2165 501 _containing_set(NULL),
tonyp@2165 502 #endif // ASSERT
tonyp@2165 503 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
tonyp@2165 504 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
johnc@1483 505 _predicted_bytes_to_copy(0)
ysr@345 506 {
ysr@345 507 _orig_end = mr.end();
ysr@345 508 // Note that initialize() will set the start of the unmarked area of the
ysr@345 509 // region.
tonyp@359 510 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
tonyp@359 511 set_top(bottom());
tonyp@359 512 set_saved_mark();
ysr@345 513
ysr@345 514 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
ysr@345 515
ysr@345 516 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
ysr@345 517 }
ysr@345 518
tonyp@4192 519 CompactibleSpace* HeapRegion::next_compaction_space() const {
tonyp@4192 520 // We're not using an iterator given that it will wrap around when
tonyp@4192 521 // it reaches the last region and this is not what we want here.
tonyp@4192 522 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@4192 523 uint index = hrs_index() + 1;
tonyp@4192 524 while (index < g1h->n_regions()) {
tonyp@4192 525 HeapRegion* hr = g1h->region_at(index);
tonyp@4192 526 if (!hr->isHumongous()) {
tonyp@4192 527 return hr;
ysr@345 528 }
tonyp@4192 529 index += 1;
ysr@345 530 }
tonyp@4192 531 return NULL;
ysr@345 532 }
ysr@345 533
ysr@345 534 void HeapRegion::save_marks() {
ysr@345 535 set_saved_mark();
ysr@345 536 }
ysr@345 537
ysr@345 538 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
ysr@345 539 HeapWord* p = mr.start();
ysr@345 540 HeapWord* e = mr.end();
ysr@345 541 oop obj;
ysr@345 542 while (p < e) {
ysr@345 543 obj = oop(p);
ysr@345 544 p += obj->oop_iterate(cl);
ysr@345 545 }
ysr@345 546 assert(p == e, "bad memregion: doesn't end on obj boundary");
ysr@345 547 }
ysr@345 548
ysr@345 549 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
ysr@345 550 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ysr@345 551 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
ysr@345 552 }
ysr@345 553 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
ysr@345 554
ysr@345 555
ysr@345 556 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
ysr@345 557 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
ysr@345 558 }
ysr@345 559
tonyp@3280 560 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
tonyp@3280 561 bool during_conc_mark) {
tonyp@3280 562 // We always recreate the prev marking info and we'll explicitly
tonyp@3280 563 // mark all objects we find to be self-forwarded on the prev
tonyp@3280 564 // bitmap. So all objects need to be below PTAMS.
tonyp@3280 565 _prev_top_at_mark_start = top();
tonyp@3280 566 _prev_marked_bytes = 0;
tonyp@3280 567
tonyp@3280 568 if (during_initial_mark) {
tonyp@3280 569 // During initial-mark, we'll also explicitly mark all objects
tonyp@3280 570 // we find to be self-forwarded on the next bitmap. So all
tonyp@3280 571 // objects need to be below NTAMS.
tonyp@3280 572 _next_top_at_mark_start = top();
tonyp@3280 573 _next_marked_bytes = 0;
tonyp@3280 574 } else if (during_conc_mark) {
tonyp@3280 575 // During concurrent mark, all objects in the CSet (including
tonyp@3280 576 // the ones we find to be self-forwarded) are implicitly live.
tonyp@3280 577 // So all objects need to be above NTAMS.
tonyp@3280 578 _next_top_at_mark_start = bottom();
tonyp@3280 579 _next_marked_bytes = 0;
tonyp@3280 580 }
tonyp@3280 581 }
tonyp@3280 582
tonyp@3280 583 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
tonyp@3280 584 bool during_conc_mark,
tonyp@3280 585 size_t marked_bytes) {
tonyp@3280 586 assert(0 <= marked_bytes && marked_bytes <= used(),
tonyp@3280 587 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
tonyp@3280 588 marked_bytes, used()));
tonyp@3280 589 _prev_marked_bytes = marked_bytes;
tonyp@3280 590 }
tonyp@3280 591
ysr@345 592 HeapWord*
ysr@345 593 HeapRegion::object_iterate_mem_careful(MemRegion mr,
ysr@345 594 ObjectClosure* cl) {
ysr@345 595 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 596 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 597 // to update the BOT while we do this...
ysr@345 598 HeapWord* cur = block_start(mr.start());
ysr@345 599 mr = mr.intersection(used_region());
ysr@345 600 if (mr.is_empty()) return NULL;
ysr@345 601 // Otherwise, find the obj that extends onto mr.start().
ysr@345 602
ysr@345 603 assert(cur <= mr.start()
ysr@896 604 && (oop(cur)->klass_or_null() == NULL ||
ysr@345 605 cur + oop(cur)->size() > mr.start()),
ysr@345 606 "postcondition of block_start");
ysr@345 607 oop obj;
ysr@345 608 while (cur < mr.end()) {
ysr@345 609 obj = oop(cur);
ysr@896 610 if (obj->klass_or_null() == NULL) {
ysr@345 611 // Ran into an unparseable point.
ysr@345 612 return cur;
ysr@345 613 } else if (!g1h->is_obj_dead(obj)) {
ysr@345 614 cl->do_object(obj);
ysr@345 615 }
ysr@345 616 if (cl->abort()) return cur;
ysr@345 617 // The check above must occur before the operation below, since an
ysr@345 618 // abort might invalidate the "size" operation.
ysr@345 619 cur += obj->size();
ysr@345 620 }
ysr@345 621 return NULL;
ysr@345 622 }
ysr@345 623
ysr@345 624 HeapWord*
ysr@345 625 HeapRegion::
ysr@345 626 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@1685 627 FilterOutOfRegionClosure* cl,
tonyp@2558 628 bool filter_young,
tonyp@2558 629 jbyte* card_ptr) {
tonyp@2558 630 // Currently, we should only have to clean the card if filter_young
tonyp@2558 631 // is true and vice versa.
tonyp@2558 632 if (filter_young) {
tonyp@2558 633 assert(card_ptr != NULL, "pre-condition");
tonyp@2558 634 } else {
tonyp@2558 635 assert(card_ptr == NULL, "pre-condition");
tonyp@2558 636 }
ysr@345 637 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 638
ysr@345 639 // If we're within a stop-world GC, then we might look at a card in a
ysr@345 640 // GC alloc region that extends onto a GC LAB, which may not be
ysr@345 641 // parseable. Stop such at the "saved_mark" of the region.
johnc@3335 642 if (g1h->is_gc_active()) {
ysr@345 643 mr = mr.intersection(used_region_at_save_marks());
ysr@345 644 } else {
ysr@345 645 mr = mr.intersection(used_region());
ysr@345 646 }
ysr@345 647 if (mr.is_empty()) return NULL;
ysr@345 648 // Otherwise, find the obj that extends onto mr.start().
ysr@345 649
johnc@1685 650 // The intersection of the incoming mr (for the card) and the
johnc@1685 651 // allocated part of the region is non-empty. This implies that
johnc@1685 652 // we have actually allocated into this region. The code in
johnc@1685 653 // G1CollectedHeap.cpp that allocates a new region sets the
johnc@1685 654 // is_young tag on the region before allocating. Thus we
johnc@1685 655 // safely know if this region is young.
johnc@1685 656 if (is_young() && filter_young) {
johnc@1685 657 return NULL;
johnc@1685 658 }
johnc@1685 659
johnc@1727 660 assert(!is_young(), "check value of filter_young");
johnc@1727 661
tonyp@2558 662 // We can only clean the card here, after we make the decision that
tonyp@2558 663 // the card is not young. And we only clean the card if we have been
tonyp@2558 664 // asked to (i.e., card_ptr != NULL).
tonyp@2558 665 if (card_ptr != NULL) {
tonyp@2558 666 *card_ptr = CardTableModRefBS::clean_card_val();
tonyp@2558 667 // We must complete this write before we do any of the reads below.
tonyp@2558 668 OrderAccess::storeload();
tonyp@2558 669 }
tonyp@2558 670
johnc@3335 671 // Cache the boundaries of the memory region in some const locals
johnc@3335 672 HeapWord* const start = mr.start();
johnc@3335 673 HeapWord* const end = mr.end();
johnc@3335 674
ysr@345 675 // We used to use "block_start_careful" here. But we're actually happy
ysr@345 676 // to update the BOT while we do this...
johnc@3335 677 HeapWord* cur = block_start(start);
johnc@3335 678 assert(cur <= start, "Postcondition");
ysr@345 679
johnc@3335 680 oop obj;
johnc@3335 681
johnc@3335 682 HeapWord* next = cur;
johnc@3335 683 while (next <= start) {
johnc@3335 684 cur = next;
johnc@3335 685 obj = oop(cur);
johnc@3335 686 if (obj->klass_or_null() == NULL) {
ysr@345 687 // Ran into an unparseable point.
ysr@345 688 return cur;
ysr@345 689 }
ysr@345 690 // Otherwise...
johnc@3335 691 next = (cur + obj->size());
ysr@345 692 }
johnc@3335 693
johnc@3335 694 // If we finish the above loop...We have a parseable object that
johnc@3335 695 // begins on or before the start of the memory region, and ends
johnc@3335 696 // inside or spans the entire region.
johnc@3335 697
johnc@3335 698 assert(obj == oop(cur), "sanity");
johnc@3335 699 assert(cur <= start &&
johnc@3335 700 obj->klass_or_null() != NULL &&
johnc@3335 701 (cur + obj->size()) > start,
ysr@345 702 "Loop postcondition");
johnc@3335 703
ysr@345 704 if (!g1h->is_obj_dead(obj)) {
ysr@345 705 obj->oop_iterate(cl, mr);
ysr@345 706 }
ysr@345 707
johnc@3335 708 while (cur < end) {
ysr@345 709 obj = oop(cur);
ysr@896 710 if (obj->klass_or_null() == NULL) {
ysr@345 711 // Ran into an unparseable point.
ysr@345 712 return cur;
ysr@345 713 };
johnc@3335 714
ysr@345 715 // Otherwise:
ysr@345 716 next = (cur + obj->size());
johnc@3335 717
ysr@345 718 if (!g1h->is_obj_dead(obj)) {
johnc@3335 719 if (next < end || !obj->is_objArray()) {
johnc@3335 720 // This object either does not span the MemRegion
johnc@3335 721 // boundary, or if it does it's not an array.
johnc@3335 722 // Apply closure to whole object.
ysr@345 723 obj->oop_iterate(cl);
ysr@345 724 } else {
johnc@3335 725 // This obj is an array that spans the boundary.
johnc@3335 726 // Stop at the boundary.
johnc@3335 727 obj->oop_iterate(cl, mr);
ysr@345 728 }
ysr@345 729 }
ysr@345 730 cur = next;
ysr@345 731 }
ysr@345 732 return NULL;
ysr@345 733 }
ysr@345 734
ysr@345 735 void HeapRegion::print() const { print_on(gclog_or_tty); }
ysr@345 736 void HeapRegion::print_on(outputStream* st) const {
ysr@345 737 if (isHumongous()) {
ysr@345 738 if (startsHumongous())
ysr@345 739 st->print(" HS");
ysr@345 740 else
ysr@345 741 st->print(" HC");
ysr@345 742 } else {
ysr@345 743 st->print(" ");
ysr@345 744 }
ysr@345 745 if (in_collection_set())
ysr@345 746 st->print(" CS");
ysr@345 747 else
ysr@345 748 st->print(" ");
ysr@345 749 if (is_young())
johnc@1483 750 st->print(is_survivor() ? " SU" : " Y ");
ysr@345 751 else
ysr@345 752 st->print(" ");
ysr@345 753 if (is_empty())
ysr@345 754 st->print(" F");
ysr@345 755 else
ysr@345 756 st->print(" ");
tonyp@3124 757 st->print(" TS %5d", _gc_time_stamp);
tonyp@1477 758 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
tonyp@1477 759 prev_top_at_mark_start(), next_top_at_mark_start());
ysr@345 760 G1OffsetTableContigSpace::print_on(st);
ysr@345 761 }
ysr@345 762
brutisso@3951 763 void HeapRegion::verify() const {
tonyp@1079 764 bool dummy = false;
brutisso@3951 765 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
tonyp@860 766 }
tonyp@860 767
ysr@345 768 // This really ought to be commoned up into OffsetTableContigSpace somehow.
ysr@345 769 // We would need a mechanism to make that code skip dead objects.
ysr@345 770
brutisso@3951 771 void HeapRegion::verify(VerifyOption vo,
tonyp@1079 772 bool* failures) const {
ysr@345 773 G1CollectedHeap* g1 = G1CollectedHeap::heap();
tonyp@1079 774 *failures = false;
ysr@345 775 HeapWord* p = bottom();
ysr@345 776 HeapWord* prev_p = NULL;
johnc@2767 777 VerifyLiveClosure vl_cl(g1, vo);
tonyp@1740 778 bool is_humongous = isHumongous();
tonyp@2146 779 bool do_bot_verify = !is_young();
tonyp@1740 780 size_t object_num = 0;
ysr@345 781 while (p < top()) {
tonyp@2146 782 oop obj = oop(p);
tonyp@2146 783 size_t obj_size = obj->size();
tonyp@2146 784 object_num += 1;
tonyp@2146 785
tonyp@2146 786 if (is_humongous != g1->isHumongous(obj_size)) {
tonyp@1740 787 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
tonyp@1740 788 SIZE_FORMAT" words) in a %shumongous region",
tonyp@2146 789 p, g1->isHumongous(obj_size) ? "" : "non-",
tonyp@2146 790 obj_size, is_humongous ? "" : "non-");
tonyp@1740 791 *failures = true;
tonyp@2146 792 return;
tonyp@1740 793 }
tonyp@2146 794
tonyp@2146 795 // If it returns false, verify_for_object() will output the
tonyp@2146 796 // appropriate messasge.
tonyp@2146 797 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
tonyp@2146 798 *failures = true;
tonyp@2146 799 return;
tonyp@2146 800 }
tonyp@2146 801
johnc@2767 802 if (!g1->is_obj_dead_cond(obj, this, vo)) {
tonyp@2146 803 if (obj->is_oop()) {
tonyp@2146 804 klassOop klass = obj->klass();
tonyp@2146 805 if (!klass->is_perm()) {
tonyp@2146 806 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@2146 807 "not in perm", klass, obj);
tonyp@2146 808 *failures = true;
tonyp@2146 809 return;
tonyp@2146 810 } else if (!klass->is_klass()) {
tonyp@2146 811 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@2146 812 "not a klass", klass, obj);
tonyp@2146 813 *failures = true;
tonyp@2146 814 return;
tonyp@2146 815 } else {
tonyp@2146 816 vl_cl.set_containing_obj(obj);
tonyp@2146 817 obj->oop_iterate(&vl_cl);
tonyp@2146 818 if (vl_cl.failures()) {
tonyp@2146 819 *failures = true;
tonyp@2146 820 }
tonyp@2146 821 if (G1MaxVerifyFailures >= 0 &&
tonyp@2146 822 vl_cl.n_failures() >= G1MaxVerifyFailures) {
tonyp@2146 823 return;
tonyp@2146 824 }
tonyp@2146 825 }
tonyp@2146 826 } else {
tonyp@2146 827 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
tonyp@1079 828 *failures = true;
tonyp@1079 829 return;
tonyp@1079 830 }
ysr@345 831 }
ysr@345 832 prev_p = p;
tonyp@2146 833 p += obj_size;
ysr@345 834 }
tonyp@2146 835
tonyp@2146 836 if (p != top()) {
tonyp@2146 837 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
tonyp@2146 838 "does not match top "PTR_FORMAT, p, top());
tonyp@2146 839 *failures = true;
tonyp@2146 840 return;
tonyp@2146 841 }
tonyp@2146 842
tonyp@2146 843 HeapWord* the_end = end();
tonyp@2146 844 assert(p == top(), "it should still hold");
tonyp@2146 845 // Do some extra BOT consistency checking for addresses in the
tonyp@2146 846 // range [top, end). BOT look-ups in this range should yield
tonyp@2146 847 // top. No point in doing that if top == end (there's nothing there).
tonyp@2146 848 if (p < the_end) {
tonyp@2146 849 // Look up top
tonyp@2146 850 HeapWord* addr_1 = p;
tonyp@2146 851 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
tonyp@2146 852 if (b_start_1 != p) {
tonyp@2146 853 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
tonyp@2146 854 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 855 addr_1, b_start_1, p);
tonyp@2146 856 *failures = true;
tonyp@2146 857 return;
tonyp@2146 858 }
tonyp@2146 859
tonyp@2146 860 // Look up top + 1
tonyp@2146 861 HeapWord* addr_2 = p + 1;
tonyp@2146 862 if (addr_2 < the_end) {
tonyp@2146 863 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
tonyp@2146 864 if (b_start_2 != p) {
tonyp@2146 865 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
tonyp@2146 866 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 867 addr_2, b_start_2, p);
tonyp@1079 868 *failures = true;
tonyp@1079 869 return;
tonyp@2146 870 }
tonyp@2146 871 }
tonyp@2146 872
tonyp@2146 873 // Look up an address between top and end
tonyp@2146 874 size_t diff = pointer_delta(the_end, p) / 2;
tonyp@2146 875 HeapWord* addr_3 = p + diff;
tonyp@2146 876 if (addr_3 < the_end) {
tonyp@2146 877 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
tonyp@2146 878 if (b_start_3 != p) {
tonyp@2146 879 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
tonyp@2146 880 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 881 addr_3, b_start_3, p);
tonyp@2146 882 *failures = true;
tonyp@2146 883 return;
tonyp@2146 884 }
tonyp@2146 885 }
tonyp@2146 886
tonyp@2146 887 // Loook up end - 1
tonyp@2146 888 HeapWord* addr_4 = the_end - 1;
tonyp@2146 889 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
tonyp@2146 890 if (b_start_4 != p) {
tonyp@2146 891 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
tonyp@2146 892 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
tonyp@2146 893 addr_4, b_start_4, p);
tonyp@2146 894 *failures = true;
tonyp@2146 895 return;
tonyp@1079 896 }
ysr@345 897 }
tonyp@1079 898
tonyp@1740 899 if (is_humongous && object_num > 1) {
tonyp@1740 900 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
tonyp@1740 901 "but has "SIZE_FORMAT", objects",
tonyp@1740 902 bottom(), end(), object_num);
tonyp@1740 903 *failures = true;
tonyp@1079 904 return;
ysr@345 905 }
ysr@345 906 }
ysr@345 907
ysr@345 908 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
ysr@345 909 // away eventually.
ysr@345 910
tonyp@359 911 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
ysr@345 912 // false ==> we'll do the clearing if there's clearing to be done.
tonyp@359 913 ContiguousSpace::initialize(mr, false, mangle_space);
ysr@345 914 _offsets.zero_bottom_entry();
ysr@345 915 _offsets.initialize_threshold();
tonyp@359 916 if (clear_space) clear(mangle_space);
ysr@345 917 }
ysr@345 918
tonyp@359 919 void G1OffsetTableContigSpace::clear(bool mangle_space) {
tonyp@359 920 ContiguousSpace::clear(mangle_space);
ysr@345 921 _offsets.zero_bottom_entry();
ysr@345 922 _offsets.initialize_threshold();
ysr@345 923 }
ysr@345 924
ysr@345 925 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
ysr@345 926 Space::set_bottom(new_bottom);
ysr@345 927 _offsets.set_bottom(new_bottom);
ysr@345 928 }
ysr@345 929
ysr@345 930 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
ysr@345 931 Space::set_end(new_end);
ysr@345 932 _offsets.resize(new_end - bottom());
ysr@345 933 }
ysr@345 934
ysr@345 935 void G1OffsetTableContigSpace::print() const {
ysr@345 936 print_short();
ysr@345 937 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
ysr@345 938 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
ysr@345 939 bottom(), top(), _offsets.threshold(), end());
ysr@345 940 }
ysr@345 941
ysr@345 942 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
ysr@345 943 return _offsets.initialize_threshold();
ysr@345 944 }
ysr@345 945
ysr@345 946 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
ysr@345 947 HeapWord* end) {
ysr@345 948 _offsets.alloc_block(start, end);
ysr@345 949 return _offsets.threshold();
ysr@345 950 }
ysr@345 951
ysr@345 952 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
ysr@345 953 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 954 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
ysr@345 955 if (_gc_time_stamp < g1h->get_gc_time_stamp())
ysr@345 956 return top();
ysr@345 957 else
ysr@345 958 return ContiguousSpace::saved_mark_word();
ysr@345 959 }
ysr@345 960
ysr@345 961 void G1OffsetTableContigSpace::set_saved_mark() {
ysr@345 962 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@345 963 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
ysr@345 964
ysr@345 965 if (_gc_time_stamp < curr_gc_time_stamp) {
ysr@345 966 // The order of these is important, as another thread might be
ysr@345 967 // about to start scanning this region. If it does so after
ysr@345 968 // set_saved_mark and before _gc_time_stamp = ..., then the latter
ysr@345 969 // will be false, and it will pick up top() as the high water mark
ysr@345 970 // of region. If it does so after _gc_time_stamp = ..., then it
ysr@345 971 // will pick up the right saved_mark_word() as the high water mark
ysr@345 972 // of the region. Either way, the behaviour will be correct.
ysr@345 973 ContiguousSpace::set_saved_mark();
ysr@896 974 OrderAccess::storestore();
iveresov@356 975 _gc_time_stamp = curr_gc_time_stamp;
tonyp@2417 976 // No need to do another barrier to flush the writes above. If
tonyp@2417 977 // this is called in parallel with other threads trying to
tonyp@2417 978 // allocate into the region, the caller should call this while
tonyp@2417 979 // holding a lock and when the lock is released the writes will be
tonyp@2417 980 // flushed.
ysr@345 981 }
ysr@345 982 }
ysr@345 983
ysr@345 984 G1OffsetTableContigSpace::
ysr@345 985 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@345 986 MemRegion mr, bool is_zeroed) :
ysr@345 987 _offsets(sharedOffsetArray, mr),
ysr@345 988 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
ysr@345 989 _gc_time_stamp(0)
ysr@345 990 {
ysr@345 991 _offsets.set_space(this);
tonyp@359 992 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
ysr@345 993 }