comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 5169:0114a0a4434c

7145569: G1: optimize nmethods scanning Summary: Add a list of nmethods to the RSet for a region that contain references into the region. Skip scanning the code cache during root scanning and scan the nmethod lists during RSet scanning instead. Reviewed-by: tschatzl, brutisso, mgerdin, twisti, kvn
author johnc
date Wed, 22 Jan 2014 13:28:27 +0100
parents 7afe50dc6b9f
children 05e7f9c0c822
comparison
equal deleted inserted replaced
50:b8ac3eeaad51 51:ee6c1c85ccac
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
47 _hr(hr), _fk(fk), _g1(g1) { } 48 _hr(hr), _fk(fk), _g1(g1) { }
48 49
49 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
50 OopClosure* oc) : 51 OopClosure* oc) :
51 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
53
54 template<class ClosureType>
55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
56 HeapRegion* hr,
57 HeapWord* cur, HeapWord* top) {
58 oop cur_oop = oop(cur);
59 int oop_size = cur_oop->size();
60 HeapWord* next_obj = cur + oop_size;
61 while (next_obj < top) {
62 // Keep filtering the remembered set.
63 if (!g1h->is_obj_dead(cur_oop, hr)) {
64 // Bottom lies entirely below top, so we can call the
65 // non-memRegion version of oop_iterate below.
66 cur_oop->oop_iterate(cl);
67 }
68 cur = next_obj;
69 cur_oop = oop(cur);
70 oop_size = cur_oop->size();
71 next_obj = cur + oop_size;
72 }
73 return cur;
74 }
75
76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
77 HeapWord* bottom,
78 HeapWord* top,
79 OopClosure* cl) {
80 G1CollectedHeap* g1h = _g1;
81 int oop_size;
82 OopClosure* cl2 = NULL;
83
84 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
86
87 switch (_fk) {
88 case NoFilterKind: cl2 = cl; break;
89 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
91 default: ShouldNotReachHere();
92 }
93
94 // Start filtering what we add to the remembered set. If the object is
95 // not considered dead, either because it is marked (in the mark bitmap)
96 // or it was allocated after marking finished, then we add it. Otherwise
97 // we can safely ignore the object.
98 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
99 oop_size = oop(bottom)->oop_iterate(cl2, mr);
100 } else {
101 oop_size = oop(bottom)->size();
102 }
103
104 bottom += oop_size;
105
106 if (bottom < top) {
107 // We replicate the loop below for several kinds of possible filters.
108 switch (_fk) {
109 case NoFilterKind:
110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
111 break;
112
113 case IntoCSFilterKind: {
114 FilterIntoCSClosure filt(this, g1h, cl);
115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
116 break;
117 }
118
119 case OutOfRegionFilterKind: {
120 FilterOutOfRegionClosure filt(_hr, cl);
121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
122 break;
123 }
124
125 default:
126 ShouldNotReachHere();
127 }
128
129 // Last object. Need to do dead-obj filtering here too.
130 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
131 oop(bottom)->oop_iterate(cl2, mr);
132 }
133 }
134 }
135
136 // Minimum region size; we won't go lower than that.
137 // We might want to decrease this in the future, to deal with small
138 // heaps a bit more efficiently.
139 #define MIN_REGION_SIZE ( 1024 * 1024 )
140
141 // Maximum region size; we don't go higher than that. There's a good
142 // reason for having an upper bound. We don't want regions to get too
143 // large, otherwise cleanup's effectiveness would decrease as there
144 // will be fewer opportunities to find totally empty regions after
145 // marking.
146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
147
148 // The automatic region size calculation will try to have around this
149 // many regions in the heap (based on the min heap size).
150 #define TARGET_REGION_NUMBER 2048
151
152 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
153 // region_size in bytes
154 uintx region_size = G1HeapRegionSize;
155 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
156 // We base the automatic calculation on the min heap size. This
157 // can be problematic if the spread between min and max is quite
158 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
159 // the max size, the region size might be way too large for the
160 // min size. Either way, some users might have to set the region
161 // size manually for some -Xms / -Xmx combos.
162
163 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
164 (uintx) MIN_REGION_SIZE);
165 }
166
167 int region_size_log = log2_long((jlong) region_size);
168 // Recalculate the region size to make sure it's a power of
169 // 2. This means that region_size is the largest power of 2 that's
170 // <= what we've calculated so far.
171 region_size = ((uintx)1 << region_size_log);
172
173 // Now make sure that we don't go over or under our limits.
174 if (region_size < MIN_REGION_SIZE) {
175 region_size = MIN_REGION_SIZE;
176 } else if (region_size > MAX_REGION_SIZE) {
177 region_size = MAX_REGION_SIZE;
178 }
179
180 if (region_size != G1HeapRegionSize) {
181 // Update the flag to make sure that PrintFlagsFinal logs the correct value
182 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size);
183 }
184
185 // And recalculate the log.
186 region_size_log = log2_long((jlong) region_size);
187
188 // Now, set up the globals.
189 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
190 LogOfHRGrainBytes = region_size_log;
191
192 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
193 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
194
195 guarantee(GrainBytes == 0, "we should only set it once");
196 // The cast to int is safe, given that we've bounded region_size by
197 // MIN_REGION_SIZE and MAX_REGION_SIZE.
198 GrainBytes = (size_t)region_size;
199
200 guarantee(GrainWords == 0, "we should only set it once");
201 GrainWords = GrainBytes >> LogHeapWordSize;
202 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
203
204 guarantee(CardsPerRegion == 0, "we should only set it once");
205 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
206 }
207
208 void HeapRegion::reset_after_compaction() {
209 G1OffsetTableContigSpace::reset_after_compaction();
210 // After a compaction the mark bitmap is invalid, so we must
211 // treat all objects as being inside the unmarked area.
212 zero_marked_bytes();
213 init_top_at_mark_start();
214 }
215
216 void HeapRegion::hr_clear(bool par, bool clear_space) {
217 assert(_humongous_type == NotHumongous,
218 "we should have already filtered out humongous regions");
219 assert(_humongous_start_region == NULL,
220 "we should have already filtered out humongous regions");
221 assert(_end == _orig_end,
222 "we should have already filtered out humongous regions");
223
224 _in_collection_set = false;
225
226 set_young_index_in_cset(-1);
227 uninstall_surv_rate_group();
228 set_young_type(NotYoung);
229 reset_pre_dummy_top();
230
231 if (!par) {
232 // If this is parallel, this will be done later.
233 HeapRegionRemSet* hrrs = rem_set();
234 hrrs->clear();
235 _claimed = InitialClaimValue;
236 }
237 zero_marked_bytes();
238
239 _offsets.resize(HeapRegion::GrainWords);
240 init_top_at_mark_start();
241 if (clear_space) clear(SpaceDecorator::Mangle);
242 }
243
244 void HeapRegion::par_clear() {
245 assert(used() == 0, "the region should have been already cleared");
246 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
247 HeapRegionRemSet* hrrs = rem_set();
248 hrrs->clear();
249 CardTableModRefBS* ct_bs =
250 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
251 ct_bs->clear(MemRegion(bottom(), end()));
252 }
253
254 void HeapRegion::calc_gc_efficiency() {
255 // GC efficiency is the ratio of how much space would be
256 // reclaimed over how long we predict it would take to reclaim it.
257 G1CollectedHeap* g1h = G1CollectedHeap::heap();
258 G1CollectorPolicy* g1p = g1h->g1_policy();
259
260 // Retrieve a prediction of the elapsed time for this region for
261 // a mixed gc because the region will only be evacuated during a
262 // mixed gc.
263 double region_elapsed_time_ms =
264 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
265 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
266 }
267
268 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
269 assert(!isHumongous(), "sanity / pre-condition");
270 assert(end() == _orig_end,
271 "Should be normal before the humongous object allocation");
272 assert(top() == bottom(), "should be empty");
273 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
274
275 _humongous_type = StartsHumongous;
276 _humongous_start_region = this;
277
278 set_end(new_end);
279 _offsets.set_for_starts_humongous(new_top);
280 }
281
282 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
283 assert(!isHumongous(), "sanity / pre-condition");
284 assert(end() == _orig_end,
285 "Should be normal before the humongous object allocation");
286 assert(top() == bottom(), "should be empty");
287 assert(first_hr->startsHumongous(), "pre-condition");
288
289 _humongous_type = ContinuesHumongous;
290 _humongous_start_region = first_hr;
291 }
292
293 void HeapRegion::set_notHumongous() {
294 assert(isHumongous(), "pre-condition");
295
296 if (startsHumongous()) {
297 assert(top() <= end(), "pre-condition");
298 set_end(_orig_end);
299 if (top() > end()) {
300 // at least one "continues humongous" region after it
301 set_top(end());
302 }
303 } else {
304 // continues humongous
305 assert(end() == _orig_end, "sanity");
306 }
307
308 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
309 _humongous_type = NotHumongous;
310 _humongous_start_region = NULL;
311 }
312
313 bool HeapRegion::claimHeapRegion(jint claimValue) {
314 jint current = _claimed;
315 if (current != claimValue) {
316 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
317 if (res == current) {
318 return true;
319 }
320 }
321 return false;
322 }
323
324 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
325 HeapWord* low = addr;
326 HeapWord* high = end();
327 while (low < high) {
328 size_t diff = pointer_delta(high, low);
329 // Must add one below to bias toward the high amount. Otherwise, if
330 // "high" were at the desired value, and "low" were one less, we
331 // would not converge on "high". This is not symmetric, because
332 // we set "high" to a block start, which might be the right one,
333 // which we don't do for "low".
334 HeapWord* middle = low + (diff+1)/2;
335 if (middle == high) return high;
336 HeapWord* mid_bs = block_start_careful(middle);
337 if (mid_bs < addr) {
338 low = middle;
339 } else {
340 high = mid_bs;
341 }
342 }
343 assert(low == high && low >= addr, "Didn't work.");
344 return low;
345 }
346
347 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
348 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
349 hr_clear(false/*par*/, clear_space);
350 }
351 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
352 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
353 #endif // _MSC_VER
354
355
356 HeapRegion::HeapRegion(uint hrs_index,
357 G1BlockOffsetSharedArray* sharedOffsetArray,
358 MemRegion mr, bool is_zeroed) :
359 G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
360 _hrs_index(hrs_index),
361 _humongous_type(NotHumongous), _humongous_start_region(NULL),
362 _in_collection_set(false),
363 _next_in_special_set(NULL), _orig_end(NULL),
364 _claimed(InitialClaimValue), _evacuation_failed(false),
365 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
366 _young_type(NotYoung), _next_young_region(NULL),
367 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
368 #ifdef ASSERT
369 _containing_set(NULL),
370 #endif // ASSERT
371 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
372 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
373 _predicted_bytes_to_copy(0)
374 {
375 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
376 _orig_end = mr.end();
377 // Note that initialize() will set the start of the unmarked area of the
378 // region.
379 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
380 set_top(bottom());
381 set_saved_mark();
382
383 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
384 }
385
386 CompactibleSpace* HeapRegion::next_compaction_space() const {
387 // We're not using an iterator given that it will wrap around when
388 // it reaches the last region and this is not what we want here.
389 G1CollectedHeap* g1h = G1CollectedHeap::heap();
390 uint index = hrs_index() + 1;
391 while (index < g1h->n_regions()) {
392 HeapRegion* hr = g1h->region_at(index);
393 if (!hr->isHumongous()) {
394 return hr;
395 }
396 index += 1;
397 }
398 return NULL;
399 }
400
401 void HeapRegion::save_marks() {
402 set_saved_mark();
403 }
404
405 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
406 HeapWord* p = mr.start();
407 HeapWord* e = mr.end();
408 oop obj;
409 while (p < e) {
410 obj = oop(p);
411 p += obj->oop_iterate(cl);
412 }
413 assert(p == e, "bad memregion: doesn't end on obj boundary");
414 }
415
416 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
417 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
418 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
419 }
420 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
421
422
423 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
424 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
425 }
426
427 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
428 bool during_conc_mark) {
429 // We always recreate the prev marking info and we'll explicitly
430 // mark all objects we find to be self-forwarded on the prev
431 // bitmap. So all objects need to be below PTAMS.
432 _prev_top_at_mark_start = top();
433 _prev_marked_bytes = 0;
434
435 if (during_initial_mark) {
436 // During initial-mark, we'll also explicitly mark all objects
437 // we find to be self-forwarded on the next bitmap. So all
438 // objects need to be below NTAMS.
439 _next_top_at_mark_start = top();
440 _next_marked_bytes = 0;
441 } else if (during_conc_mark) {
442 // During concurrent mark, all objects in the CSet (including
443 // the ones we find to be self-forwarded) are implicitly live.
444 // So all objects need to be above NTAMS.
445 _next_top_at_mark_start = bottom();
446 _next_marked_bytes = 0;
447 }
448 }
449
450 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
451 bool during_conc_mark,
452 size_t marked_bytes) {
453 assert(0 <= marked_bytes && marked_bytes <= used(),
454 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
455 marked_bytes, used()));
456 _prev_marked_bytes = marked_bytes;
457 }
458
459 HeapWord*
460 HeapRegion::object_iterate_mem_careful(MemRegion mr,
461 ObjectClosure* cl) {
462 G1CollectedHeap* g1h = G1CollectedHeap::heap();
463 // We used to use "block_start_careful" here. But we're actually happy
464 // to update the BOT while we do this...
465 HeapWord* cur = block_start(mr.start());
466 mr = mr.intersection(used_region());
467 if (mr.is_empty()) return NULL;
468 // Otherwise, find the obj that extends onto mr.start().
469
470 assert(cur <= mr.start()
471 && (oop(cur)->klass_or_null() == NULL ||
472 cur + oop(cur)->size() > mr.start()),
473 "postcondition of block_start");
474 oop obj;
475 while (cur < mr.end()) {
476 obj = oop(cur);
477 if (obj->klass_or_null() == NULL) {
478 // Ran into an unparseable point.
479 return cur;
480 } else if (!g1h->is_obj_dead(obj)) {
481 cl->do_object(obj);
482 }
483 if (cl->abort()) return cur;
484 // The check above must occur before the operation below, since an
485 // abort might invalidate the "size" operation.
486 cur += obj->size();
487 }
488 return NULL;
489 }
490
491 HeapWord*
492 HeapRegion::
493 oops_on_card_seq_iterate_careful(MemRegion mr,
494 FilterOutOfRegionClosure* cl,
495 bool filter_young,
496 jbyte* card_ptr) {
497 // Currently, we should only have to clean the card if filter_young
498 // is true and vice versa.
499 if (filter_young) {
500 assert(card_ptr != NULL, "pre-condition");
501 } else {
502 assert(card_ptr == NULL, "pre-condition");
503 }
504 G1CollectedHeap* g1h = G1CollectedHeap::heap();
505
506 // If we're within a stop-world GC, then we might look at a card in a
507 // GC alloc region that extends onto a GC LAB, which may not be
508 // parseable. Stop such at the "saved_mark" of the region.
509 if (g1h->is_gc_active()) {
510 mr = mr.intersection(used_region_at_save_marks());
511 } else {
512 mr = mr.intersection(used_region());
513 }
514 if (mr.is_empty()) return NULL;
515 // Otherwise, find the obj that extends onto mr.start().
516
517 // The intersection of the incoming mr (for the card) and the
518 // allocated part of the region is non-empty. This implies that
519 // we have actually allocated into this region. The code in
520 // G1CollectedHeap.cpp that allocates a new region sets the
521 // is_young tag on the region before allocating. Thus we
522 // safely know if this region is young.
523 if (is_young() && filter_young) {
524 return NULL;
525 }
526
527 assert(!is_young(), "check value of filter_young");
528
529 // We can only clean the card here, after we make the decision that
530 // the card is not young. And we only clean the card if we have been
531 // asked to (i.e., card_ptr != NULL).
532 if (card_ptr != NULL) {
533 *card_ptr = CardTableModRefBS::clean_card_val();
534 // We must complete this write before we do any of the reads below.
535 OrderAccess::storeload();
536 }
537
538 // Cache the boundaries of the memory region in some const locals
539 HeapWord* const start = mr.start();
540 HeapWord* const end = mr.end();
541
542 // We used to use "block_start_careful" here. But we're actually happy
543 // to update the BOT while we do this...
544 HeapWord* cur = block_start(start);
545 assert(cur <= start, "Postcondition");
546
547 oop obj;
548
549 HeapWord* next = cur;
550 while (next <= start) {
551 cur = next;
552 obj = oop(cur);
553 if (obj->klass_or_null() == NULL) {
554 // Ran into an unparseable point.
555 return cur;
556 }
557 // Otherwise...
558 next = (cur + obj->size());
559 }
560
561 // If we finish the above loop...We have a parseable object that
562 // begins on or before the start of the memory region, and ends
563 // inside or spans the entire region.
564
565 assert(obj == oop(cur), "sanity");
566 assert(cur <= start &&
567 obj->klass_or_null() != NULL &&
568 (cur + obj->size()) > start,
569 "Loop postcondition");
570
571 if (!g1h->is_obj_dead(obj)) {
572 obj->oop_iterate(cl, mr);
573 }
574
575 while (cur < end) {
576 obj = oop(cur);
577 if (obj->klass_or_null() == NULL) {
578 // Ran into an unparseable point.
579 return cur;
580 };
581
582 // Otherwise:
583 next = (cur + obj->size());
584
585 if (!g1h->is_obj_dead(obj)) {
586 if (next < end || !obj->is_objArray()) {
587 // This object either does not span the MemRegion
588 // boundary, or if it does it's not an array.
589 // Apply closure to whole object.
590 obj->oop_iterate(cl);
591 } else {
592 // This obj is an array that spans the boundary.
593 // Stop at the boundary.
594 obj->oop_iterate(cl, mr);
595 }
596 }
597 cur = next;
598 }
599 return NULL;
600 }
601
602 // Code roots support
603
604 void HeapRegion::add_strong_code_root(nmethod* nm) {
605 HeapRegionRemSet* hrrs = rem_set();
606 hrrs->add_strong_code_root(nm);
607 }
608
609 void HeapRegion::remove_strong_code_root(nmethod* nm) {
610 HeapRegionRemSet* hrrs = rem_set();
611 hrrs->remove_strong_code_root(nm);
612 }
613
614 void HeapRegion::migrate_strong_code_roots() {
615 assert(in_collection_set(), "only collection set regions");
616 assert(!isHumongous(), "not humongous regions");
617
618 HeapRegionRemSet* hrrs = rem_set();
619 hrrs->migrate_strong_code_roots();
620 }
621
622 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
623 HeapRegionRemSet* hrrs = rem_set();
624 hrrs->strong_code_roots_do(blk);
625 }
626
627 class VerifyStrongCodeRootOopClosure: public OopClosure {
628 const HeapRegion* _hr;
629 nmethod* _nm;
630 bool _failures;
631 bool _has_oops_in_region;
632
633 template <class T> void do_oop_work(T* p) {
634 T heap_oop = oopDesc::load_heap_oop(p);
635 if (!oopDesc::is_null(heap_oop)) {
636 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
637
638 // Note: not all the oops embedded in the nmethod are in the
639 // current region. We only look at those which are.
640 if (_hr->is_in(obj)) {
641 // Object is in the region. Check that its less than top
642 if (_hr->top() <= (HeapWord*)obj) {
643 // Object is above top
644 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
645 "["PTR_FORMAT", "PTR_FORMAT") is above "
646 "top "PTR_FORMAT,
647 obj, _hr->bottom(), _hr->end(), _hr->top());
648 _failures = true;
649 return;
650 }
651 // Nmethod has at least one oop in the current region
652 _has_oops_in_region = true;
653 }
654 }
655 }
656
657 public:
658 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
659 _hr(hr), _failures(false), _has_oops_in_region(false) {}
660
661 void do_oop(narrowOop* p) { do_oop_work(p); }
662 void do_oop(oop* p) { do_oop_work(p); }
663
664 bool failures() { return _failures; }
665 bool has_oops_in_region() { return _has_oops_in_region; }
666 };
667
668 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
669 const HeapRegion* _hr;
670 bool _failures;
671 public:
672 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
673 _hr(hr), _failures(false) {}
674
675 void do_code_blob(CodeBlob* cb) {
676 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
677 if (nm != NULL) {
678 // Verify that the nemthod is live
679 if (!nm->is_alive()) {
680 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
681 PTR_FORMAT" in its strong code roots",
682 _hr->bottom(), _hr->end(), nm);
683 _failures = true;
684 } else {
685 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
686 nm->oops_do(&oop_cl);
687 if (!oop_cl.has_oops_in_region()) {
688 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
689 PTR_FORMAT" in its strong code roots "
690 "with no pointers into region",
691 _hr->bottom(), _hr->end(), nm);
692 _failures = true;
693 } else if (oop_cl.failures()) {
694 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
695 "failures for nmethod "PTR_FORMAT,
696 _hr->bottom(), _hr->end(), nm);
697 _failures = true;
698 }
699 }
700 }
701 }
702
703 bool failures() { return _failures; }
704 };
705
706 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
707 if (!G1VerifyHeapRegionCodeRoots) {
708 // We're not verifying code roots.
709 return;
710 }
711 if (vo == VerifyOption_G1UseMarkWord) {
712 // Marking verification during a full GC is performed after class
713 // unloading, code cache unloading, etc so the strong code roots
714 // attached to each heap region are in an inconsistent state. They won't
715 // be consistent until the strong code roots are rebuilt after the
716 // actual GC. Skip verifying the strong code roots in this particular
717 // time.
718 assert(VerifyDuringGC, "only way to get here");
719 return;
720 }
721
722 HeapRegionRemSet* hrrs = rem_set();
723 int strong_code_roots_length = hrrs->strong_code_roots_list_length();
724
725 // if this region is empty then there should be no entries
726 // on its strong code root list
727 if (is_empty()) {
728 if (strong_code_roots_length > 0) {
729 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
730 "but has "INT32_FORMAT" code root entries",
731 bottom(), end(), strong_code_roots_length);
732 *failures = true;
733 }
734 return;
735 }
736
737 // An H-region should have an empty strong code root list
738 if (isHumongous()) {
739 if (strong_code_roots_length > 0) {
740 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
741 "but has "INT32_FORMAT" code root entries",
742 bottom(), end(), strong_code_roots_length);
743 *failures = true;
744 }
745 return;
746 }
747
748 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
749 strong_code_roots_do(&cb_cl);
750
751 if (cb_cl.failures()) {
752 *failures = true;
753 }
754 }
755
756 void HeapRegion::print() const { print_on(gclog_or_tty); }
757 void HeapRegion::print_on(outputStream* st) const {
758 if (isHumongous()) {
759 if (startsHumongous())
760 st->print(" HS");
761 else
762 st->print(" HC");
763 } else {
764 st->print(" ");
765 }
766 if (in_collection_set())
767 st->print(" CS");
768 else
769 st->print(" ");
770 if (is_young())
771 st->print(is_survivor() ? " SU" : " Y ");
772 else
773 st->print(" ");
774 if (is_empty())
775 st->print(" F");
776 else
777 st->print(" ");
778 st->print(" TS %5d", _gc_time_stamp);
779 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
780 prev_top_at_mark_start(), next_top_at_mark_start());
781 G1OffsetTableContigSpace::print_on(st);
782 }
52 783
53 class VerifyLiveClosure: public OopClosure { 784 class VerifyLiveClosure: public OopClosure {
54 private: 785 private:
55 G1CollectedHeap* _g1h; 786 G1CollectedHeap* _g1h;
56 CardTableModRefBS* _bs; 787 CardTableModRefBS* _bs;
186 } 917 }
187 } 918 }
188 } 919 }
189 }; 920 };
190 921
191 template<class ClosureType>
192 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
193 HeapRegion* hr,
194 HeapWord* cur, HeapWord* top) {
195 oop cur_oop = oop(cur);
196 int oop_size = cur_oop->size();
197 HeapWord* next_obj = cur + oop_size;
198 while (next_obj < top) {
199 // Keep filtering the remembered set.
200 if (!g1h->is_obj_dead(cur_oop, hr)) {
201 // Bottom lies entirely below top, so we can call the
202 // non-memRegion version of oop_iterate below.
203 cur_oop->oop_iterate(cl);
204 }
205 cur = next_obj;
206 cur_oop = oop(cur);
207 oop_size = cur_oop->size();
208 next_obj = cur + oop_size;
209 }
210 return cur;
211 }
212
213 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
214 HeapWord* bottom,
215 HeapWord* top,
216 OopClosure* cl) {
217 G1CollectedHeap* g1h = _g1;
218 int oop_size;
219 OopClosure* cl2 = NULL;
220
221 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
222 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
223
224 switch (_fk) {
225 case NoFilterKind: cl2 = cl; break;
226 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
227 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
228 default: ShouldNotReachHere();
229 }
230
231 // Start filtering what we add to the remembered set. If the object is
232 // not considered dead, either because it is marked (in the mark bitmap)
233 // or it was allocated after marking finished, then we add it. Otherwise
234 // we can safely ignore the object.
235 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
236 oop_size = oop(bottom)->oop_iterate(cl2, mr);
237 } else {
238 oop_size = oop(bottom)->size();
239 }
240
241 bottom += oop_size;
242
243 if (bottom < top) {
244 // We replicate the loop below for several kinds of possible filters.
245 switch (_fk) {
246 case NoFilterKind:
247 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
248 break;
249
250 case IntoCSFilterKind: {
251 FilterIntoCSClosure filt(this, g1h, cl);
252 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
253 break;
254 }
255
256 case OutOfRegionFilterKind: {
257 FilterOutOfRegionClosure filt(_hr, cl);
258 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
259 break;
260 }
261
262 default:
263 ShouldNotReachHere();
264 }
265
266 // Last object. Need to do dead-obj filtering here too.
267 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
268 oop(bottom)->oop_iterate(cl2, mr);
269 }
270 }
271 }
272
273 // Minimum region size; we won't go lower than that.
274 // We might want to decrease this in the future, to deal with small
275 // heaps a bit more efficiently.
276 #define MIN_REGION_SIZE ( 1024 * 1024 )
277
278 // Maximum region size; we don't go higher than that. There's a good
279 // reason for having an upper bound. We don't want regions to get too
280 // large, otherwise cleanup's effectiveness would decrease as there
281 // will be fewer opportunities to find totally empty regions after
282 // marking.
283 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
284
285 // The automatic region size calculation will try to have around this
286 // many regions in the heap (based on the min heap size).
287 #define TARGET_REGION_NUMBER 2048
288
289 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
290 // region_size in bytes
291 uintx region_size = G1HeapRegionSize;
292 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
293 // We base the automatic calculation on the min heap size. This
294 // can be problematic if the spread between min and max is quite
295 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
296 // the max size, the region size might be way too large for the
297 // min size. Either way, some users might have to set the region
298 // size manually for some -Xms / -Xmx combos.
299
300 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
301 (uintx) MIN_REGION_SIZE);
302 }
303
304 int region_size_log = log2_long((jlong) region_size);
305 // Recalculate the region size to make sure it's a power of
306 // 2. This means that region_size is the largest power of 2 that's
307 // <= what we've calculated so far.
308 region_size = ((uintx)1 << region_size_log);
309
310 // Now make sure that we don't go over or under our limits.
311 if (region_size < MIN_REGION_SIZE) {
312 region_size = MIN_REGION_SIZE;
313 } else if (region_size > MAX_REGION_SIZE) {
314 region_size = MAX_REGION_SIZE;
315 }
316
317 // And recalculate the log.
318 region_size_log = log2_long((jlong) region_size);
319
320 // Now, set up the globals.
321 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
322 LogOfHRGrainBytes = region_size_log;
323
324 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
325 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
326
327 guarantee(GrainBytes == 0, "we should only set it once");
328 // The cast to int is safe, given that we've bounded region_size by
329 // MIN_REGION_SIZE and MAX_REGION_SIZE.
330 GrainBytes = (size_t)region_size;
331
332 guarantee(GrainWords == 0, "we should only set it once");
333 GrainWords = GrainBytes >> LogHeapWordSize;
334 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
335
336 guarantee(CardsPerRegion == 0, "we should only set it once");
337 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
338 }
339
340 void HeapRegion::reset_after_compaction() {
341 G1OffsetTableContigSpace::reset_after_compaction();
342 // After a compaction the mark bitmap is invalid, so we must
343 // treat all objects as being inside the unmarked area.
344 zero_marked_bytes();
345 init_top_at_mark_start();
346 }
347
348 void HeapRegion::hr_clear(bool par, bool clear_space) {
349 assert(_humongous_type == NotHumongous,
350 "we should have already filtered out humongous regions");
351 assert(_humongous_start_region == NULL,
352 "we should have already filtered out humongous regions");
353 assert(_end == _orig_end,
354 "we should have already filtered out humongous regions");
355
356 _in_collection_set = false;
357
358 set_young_index_in_cset(-1);
359 uninstall_surv_rate_group();
360 set_young_type(NotYoung);
361 reset_pre_dummy_top();
362
363 if (!par) {
364 // If this is parallel, this will be done later.
365 HeapRegionRemSet* hrrs = rem_set();
366 if (hrrs != NULL) hrrs->clear();
367 _claimed = InitialClaimValue;
368 }
369 zero_marked_bytes();
370
371 _offsets.resize(HeapRegion::GrainWords);
372 init_top_at_mark_start();
373 if (clear_space) clear(SpaceDecorator::Mangle);
374 }
375
376 void HeapRegion::par_clear() {
377 assert(used() == 0, "the region should have been already cleared");
378 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
379 HeapRegionRemSet* hrrs = rem_set();
380 hrrs->clear();
381 CardTableModRefBS* ct_bs =
382 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
383 ct_bs->clear(MemRegion(bottom(), end()));
384 }
385
386 void HeapRegion::calc_gc_efficiency() {
387 // GC efficiency is the ratio of how much space would be
388 // reclaimed over how long we predict it would take to reclaim it.
389 G1CollectedHeap* g1h = G1CollectedHeap::heap();
390 G1CollectorPolicy* g1p = g1h->g1_policy();
391
392 // Retrieve a prediction of the elapsed time for this region for
393 // a mixed gc because the region will only be evacuated during a
394 // mixed gc.
395 double region_elapsed_time_ms =
396 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
397 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
398 }
399
400 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
401 assert(!isHumongous(), "sanity / pre-condition");
402 assert(end() == _orig_end,
403 "Should be normal before the humongous object allocation");
404 assert(top() == bottom(), "should be empty");
405 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
406
407 _humongous_type = StartsHumongous;
408 _humongous_start_region = this;
409
410 set_end(new_end);
411 _offsets.set_for_starts_humongous(new_top);
412 }
413
414 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
415 assert(!isHumongous(), "sanity / pre-condition");
416 assert(end() == _orig_end,
417 "Should be normal before the humongous object allocation");
418 assert(top() == bottom(), "should be empty");
419 assert(first_hr->startsHumongous(), "pre-condition");
420
421 _humongous_type = ContinuesHumongous;
422 _humongous_start_region = first_hr;
423 }
424
425 void HeapRegion::set_notHumongous() {
426 assert(isHumongous(), "pre-condition");
427
428 if (startsHumongous()) {
429 assert(top() <= end(), "pre-condition");
430 set_end(_orig_end);
431 if (top() > end()) {
432 // at least one "continues humongous" region after it
433 set_top(end());
434 }
435 } else {
436 // continues humongous
437 assert(end() == _orig_end, "sanity");
438 }
439
440 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
441 _humongous_type = NotHumongous;
442 _humongous_start_region = NULL;
443 }
444
445 bool HeapRegion::claimHeapRegion(jint claimValue) {
446 jint current = _claimed;
447 if (current != claimValue) {
448 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
449 if (res == current) {
450 return true;
451 }
452 }
453 return false;
454 }
455
456 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
457 HeapWord* low = addr;
458 HeapWord* high = end();
459 while (low < high) {
460 size_t diff = pointer_delta(high, low);
461 // Must add one below to bias toward the high amount. Otherwise, if
462 // "high" were at the desired value, and "low" were one less, we
463 // would not converge on "high". This is not symmetric, because
464 // we set "high" to a block start, which might be the right one,
465 // which we don't do for "low".
466 HeapWord* middle = low + (diff+1)/2;
467 if (middle == high) return high;
468 HeapWord* mid_bs = block_start_careful(middle);
469 if (mid_bs < addr) {
470 low = middle;
471 } else {
472 high = mid_bs;
473 }
474 }
475 assert(low == high && low >= addr, "Didn't work.");
476 return low;
477 }
478
479 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
480 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
481 hr_clear(false/*par*/, clear_space);
482 }
483 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
484 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
485 #endif // _MSC_VER
486
487
488 HeapRegion::HeapRegion(uint hrs_index,
489 G1BlockOffsetSharedArray* sharedOffsetArray,
490 MemRegion mr, bool is_zeroed) :
491 G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
492 _hrs_index(hrs_index),
493 _humongous_type(NotHumongous), _humongous_start_region(NULL),
494 _in_collection_set(false),
495 _next_in_special_set(NULL), _orig_end(NULL),
496 _claimed(InitialClaimValue), _evacuation_failed(false),
497 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
498 _young_type(NotYoung), _next_young_region(NULL),
499 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
500 #ifdef ASSERT
501 _containing_set(NULL),
502 #endif // ASSERT
503 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
504 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
505 _predicted_bytes_to_copy(0)
506 {
507 _orig_end = mr.end();
508 // Note that initialize() will set the start of the unmarked area of the
509 // region.
510 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
511 set_top(bottom());
512 set_saved_mark();
513
514 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
515
516 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
517 }
518
519 CompactibleSpace* HeapRegion::next_compaction_space() const {
520 // We're not using an iterator given that it will wrap around when
521 // it reaches the last region and this is not what we want here.
522 G1CollectedHeap* g1h = G1CollectedHeap::heap();
523 uint index = hrs_index() + 1;
524 while (index < g1h->n_regions()) {
525 HeapRegion* hr = g1h->region_at(index);
526 if (!hr->isHumongous()) {
527 return hr;
528 }
529 index += 1;
530 }
531 return NULL;
532 }
533
534 void HeapRegion::save_marks() {
535 set_saved_mark();
536 }
537
538 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
539 HeapWord* p = mr.start();
540 HeapWord* e = mr.end();
541 oop obj;
542 while (p < e) {
543 obj = oop(p);
544 p += obj->oop_iterate(cl);
545 }
546 assert(p == e, "bad memregion: doesn't end on obj boundary");
547 }
548
549 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
550 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
551 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
552 }
553 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
554
555
556 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
557 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
558 }
559
560 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
561 bool during_conc_mark) {
562 // We always recreate the prev marking info and we'll explicitly
563 // mark all objects we find to be self-forwarded on the prev
564 // bitmap. So all objects need to be below PTAMS.
565 _prev_top_at_mark_start = top();
566 _prev_marked_bytes = 0;
567
568 if (during_initial_mark) {
569 // During initial-mark, we'll also explicitly mark all objects
570 // we find to be self-forwarded on the next bitmap. So all
571 // objects need to be below NTAMS.
572 _next_top_at_mark_start = top();
573 _next_marked_bytes = 0;
574 } else if (during_conc_mark) {
575 // During concurrent mark, all objects in the CSet (including
576 // the ones we find to be self-forwarded) are implicitly live.
577 // So all objects need to be above NTAMS.
578 _next_top_at_mark_start = bottom();
579 _next_marked_bytes = 0;
580 }
581 }
582
583 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
584 bool during_conc_mark,
585 size_t marked_bytes) {
586 assert(0 <= marked_bytes && marked_bytes <= used(),
587 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
588 marked_bytes, used()));
589 _prev_marked_bytes = marked_bytes;
590 }
591
592 HeapWord*
593 HeapRegion::object_iterate_mem_careful(MemRegion mr,
594 ObjectClosure* cl) {
595 G1CollectedHeap* g1h = G1CollectedHeap::heap();
596 // We used to use "block_start_careful" here. But we're actually happy
597 // to update the BOT while we do this...
598 HeapWord* cur = block_start(mr.start());
599 mr = mr.intersection(used_region());
600 if (mr.is_empty()) return NULL;
601 // Otherwise, find the obj that extends onto mr.start().
602
603 assert(cur <= mr.start()
604 && (oop(cur)->klass_or_null() == NULL ||
605 cur + oop(cur)->size() > mr.start()),
606 "postcondition of block_start");
607 oop obj;
608 while (cur < mr.end()) {
609 obj = oop(cur);
610 if (obj->klass_or_null() == NULL) {
611 // Ran into an unparseable point.
612 return cur;
613 } else if (!g1h->is_obj_dead(obj)) {
614 cl->do_object(obj);
615 }
616 if (cl->abort()) return cur;
617 // The check above must occur before the operation below, since an
618 // abort might invalidate the "size" operation.
619 cur += obj->size();
620 }
621 return NULL;
622 }
623
624 HeapWord*
625 HeapRegion::
626 oops_on_card_seq_iterate_careful(MemRegion mr,
627 FilterOutOfRegionClosure* cl,
628 bool filter_young,
629 jbyte* card_ptr) {
630 // Currently, we should only have to clean the card if filter_young
631 // is true and vice versa.
632 if (filter_young) {
633 assert(card_ptr != NULL, "pre-condition");
634 } else {
635 assert(card_ptr == NULL, "pre-condition");
636 }
637 G1CollectedHeap* g1h = G1CollectedHeap::heap();
638
639 // If we're within a stop-world GC, then we might look at a card in a
640 // GC alloc region that extends onto a GC LAB, which may not be
641 // parseable. Stop such at the "saved_mark" of the region.
642 if (g1h->is_gc_active()) {
643 mr = mr.intersection(used_region_at_save_marks());
644 } else {
645 mr = mr.intersection(used_region());
646 }
647 if (mr.is_empty()) return NULL;
648 // Otherwise, find the obj that extends onto mr.start().
649
650 // The intersection of the incoming mr (for the card) and the
651 // allocated part of the region is non-empty. This implies that
652 // we have actually allocated into this region. The code in
653 // G1CollectedHeap.cpp that allocates a new region sets the
654 // is_young tag on the region before allocating. Thus we
655 // safely know if this region is young.
656 if (is_young() && filter_young) {
657 return NULL;
658 }
659
660 assert(!is_young(), "check value of filter_young");
661
662 // We can only clean the card here, after we make the decision that
663 // the card is not young. And we only clean the card if we have been
664 // asked to (i.e., card_ptr != NULL).
665 if (card_ptr != NULL) {
666 *card_ptr = CardTableModRefBS::clean_card_val();
667 // We must complete this write before we do any of the reads below.
668 OrderAccess::storeload();
669 }
670
671 // Cache the boundaries of the memory region in some const locals
672 HeapWord* const start = mr.start();
673 HeapWord* const end = mr.end();
674
675 // We used to use "block_start_careful" here. But we're actually happy
676 // to update the BOT while we do this...
677 HeapWord* cur = block_start(start);
678 assert(cur <= start, "Postcondition");
679
680 oop obj;
681
682 HeapWord* next = cur;
683 while (next <= start) {
684 cur = next;
685 obj = oop(cur);
686 if (obj->klass_or_null() == NULL) {
687 // Ran into an unparseable point.
688 return cur;
689 }
690 // Otherwise...
691 next = (cur + obj->size());
692 }
693
694 // If we finish the above loop...We have a parseable object that
695 // begins on or before the start of the memory region, and ends
696 // inside or spans the entire region.
697
698 assert(obj == oop(cur), "sanity");
699 assert(cur <= start &&
700 obj->klass_or_null() != NULL &&
701 (cur + obj->size()) > start,
702 "Loop postcondition");
703
704 if (!g1h->is_obj_dead(obj)) {
705 obj->oop_iterate(cl, mr);
706 }
707
708 while (cur < end) {
709 obj = oop(cur);
710 if (obj->klass_or_null() == NULL) {
711 // Ran into an unparseable point.
712 return cur;
713 };
714
715 // Otherwise:
716 next = (cur + obj->size());
717
718 if (!g1h->is_obj_dead(obj)) {
719 if (next < end || !obj->is_objArray()) {
720 // This object either does not span the MemRegion
721 // boundary, or if it does it's not an array.
722 // Apply closure to whole object.
723 obj->oop_iterate(cl);
724 } else {
725 // This obj is an array that spans the boundary.
726 // Stop at the boundary.
727 obj->oop_iterate(cl, mr);
728 }
729 }
730 cur = next;
731 }
732 return NULL;
733 }
734
735 void HeapRegion::print() const { print_on(gclog_or_tty); }
736 void HeapRegion::print_on(outputStream* st) const {
737 if (isHumongous()) {
738 if (startsHumongous())
739 st->print(" HS");
740 else
741 st->print(" HC");
742 } else {
743 st->print(" ");
744 }
745 if (in_collection_set())
746 st->print(" CS");
747 else
748 st->print(" ");
749 if (is_young())
750 st->print(is_survivor() ? " SU" : " Y ");
751 else
752 st->print(" ");
753 if (is_empty())
754 st->print(" F");
755 else
756 st->print(" ");
757 st->print(" TS %5d", _gc_time_stamp);
758 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
759 prev_top_at_mark_start(), next_top_at_mark_start());
760 G1OffsetTableContigSpace::print_on(st);
761 }
762
763 void HeapRegion::verify() const {
764 bool dummy = false;
765 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
766 }
767
768 // This really ought to be commoned up into OffsetTableContigSpace somehow. 922 // This really ought to be commoned up into OffsetTableContigSpace somehow.
769 // We would need a mechanism to make that code skip dead objects. 923 // We would need a mechanism to make that code skip dead objects.
770 924
771 void HeapRegion::verify(VerifyOption vo, 925 void HeapRegion::verify(VerifyOption vo,
772 bool* failures) const { 926 bool* failures) const {
901 "but has "SIZE_FORMAT", objects", 1055 "but has "SIZE_FORMAT", objects",
902 bottom(), end(), object_num); 1056 bottom(), end(), object_num);
903 *failures = true; 1057 *failures = true;
904 return; 1058 return;
905 } 1059 }
1060
1061 verify_strong_code_roots(vo, failures);
1062 }
1063
1064 void HeapRegion::verify() const {
1065 bool dummy = false;
1066 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
906 } 1067 }
907 1068
908 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1069 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
909 // away eventually. 1070 // away eventually.
910 1071