annotate src/hotspot/share/gc/g1/g1EvacFailure.cpp @ 53912:bb051ca06e9e

8159440: Move marking of promoted objects during initial mark into the concurrent phase Reviewed-by: sjohanss, kbarrett
author tschatzl
date Thu, 06 Dec 2018 13:55:22 +0100
parents c25572739e7c
children 13acc8e38a29
rev   line source
johnc@11451 1 /*
eosterlund@49595 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
johnc@11451 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
johnc@11451 4 *
johnc@11451 5 * This code is free software; you can redistribute it and/or modify it
johnc@11451 6 * under the terms of the GNU General Public License version 2 only, as
johnc@11451 7 * published by the Free Software Foundation.
johnc@11451 8 *
johnc@11451 9 * This code is distributed in the hope that it will be useful, but WITHOUT
johnc@11451 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
johnc@11451 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
johnc@11451 12 * version 2 for more details (a copy is included in the LICENSE file that
johnc@11451 13 * accompanied this code).
johnc@11451 14 *
johnc@11451 15 * You should have received a copy of the GNU General Public License version
johnc@11451 16 * 2 along with this work; if not, write to the Free Software Foundation,
johnc@11451 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
johnc@11451 18 *
johnc@11451 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
johnc@11451 20 * or visit www.oracle.com if you need additional information or have any
johnc@11451 21 * questions.
johnc@11451 22 *
johnc@11451 23 */
johnc@11451 24
stefank@29080 25 #include "precompiled.hpp"
pliden@30764 26 #include "gc/g1/dirtyCardQueue.hpp"
pliden@30764 27 #include "gc/g1/g1CollectedHeap.inline.hpp"
drwhite@31331 28 #include "gc/g1/g1CollectorState.hpp"
ehelin@35943 29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
pliden@30764 30 #include "gc/g1/g1EvacFailure.hpp"
david@35851 31 #include "gc/g1/g1HeapVerifier.hpp"
pliden@30764 32 #include "gc/g1/g1OopClosures.inline.hpp"
pliden@30764 33 #include "gc/g1/g1_globals.hpp"
pliden@30764 34 #include "gc/g1/heapRegion.hpp"
pliden@30764 35 #include "gc/g1/heapRegionRemSet.hpp"
tonyp@38081 36 #include "gc/shared/preservedMarks.inline.hpp"
stefank@50087 37 #include "oops/access.inline.hpp"
stefank@50087 38 #include "oops/compressedOops.inline.hpp"
rkennke@50217 39 #include "oops/oop.inline.hpp"
johnc@11451 40
stefank@51386 41 class UpdateRSetDeferred : public BasicOopIterateClosure {
johnc@11451 42 private:
tschatzl@50301 43 G1CollectedHeap* _g1h;
eosterlund@49595 44 DirtyCardQueue* _dcq;
eosterlund@49595 45 G1CardTable* _ct;
johnc@11451 46
johnc@11451 47 public:
tschatzl@31631 48 UpdateRSetDeferred(DirtyCardQueue* dcq) :
tschatzl@52033 49 _g1h(G1CollectedHeap::heap()), _dcq(dcq), _ct(_g1h->card_table()) {}
johnc@11451 50
johnc@11451 51 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@11451 52 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@11451 53 template <class T> void do_oop_work(T* p) {
tschatzl@50301 54 assert(_g1h->heap_region_containing(p)->is_in_reserved(p), "paranoia");
tschatzl@50301 55 assert(!_g1h->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
ehelin@34614 56
stefank@50087 57 T const o = RawAccess<>::oop_load(p);
stefank@50087 58 if (CompressedOops::is_null(o)) {
tschatzl@46634 59 return;
tschatzl@46634 60 }
tschatzl@46634 61
stefank@50087 62 if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
tschatzl@46634 63 return;
tschatzl@46634 64 }
eosterlund@49595 65 size_t card_index = _ct->index_for(p);
eosterlund@49595 66 if (_ct->mark_card_deferred(card_index)) {
eosterlund@49595 67 _dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
johnc@11451 68 }
johnc@11451 69 }
johnc@11451 70 };
johnc@11451 71
johnc@11451 72 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
tschatzl@50301 73 G1CollectedHeap* _g1h;
ehelin@35943 74 G1ConcurrentMark* _cm;
johnc@11451 75 HeapRegion* _hr;
tonyp@11455 76 size_t _marked_bytes;
ehelin@46590 77 UpdateRSetDeferred* _update_rset_cl;
tonyp@11455 78 bool _during_initial_mark;
johnc@11583 79 uint _worker_id;
tschatzl@31631 80 HeapWord* _last_forwarded_object_end;
johnc@11583 81
johnc@11451 82 public:
tschatzl@31631 83 RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
ehelin@46590 84 UpdateRSetDeferred* update_rset_cl,
tonyp@11455 85 bool during_initial_mark,
johnc@11583 86 uint worker_id) :
tschatzl@50301 87 _g1h(G1CollectedHeap::heap()),
tschatzl@50301 88 _cm(_g1h->concurrent_mark()),
tschatzl@31631 89 _hr(hr),
tschatzl@31631 90 _marked_bytes(0),
johnc@11451 91 _update_rset_cl(update_rset_cl),
tonyp@11455 92 _during_initial_mark(during_initial_mark),
stefank@25492 93 _worker_id(worker_id),
tschatzl@31631 94 _last_forwarded_object_end(hr->bottom()) { }
johnc@11451 95
tonyp@11455 96 size_t marked_bytes() { return _marked_bytes; }
johnc@11451 97
tschatzl@31631 98 // Iterate over the live objects in the region to find self-forwarded objects
tschatzl@31631 99 // that need to be kept live. We need to update the remembered sets of these
tschatzl@31631 100 // objects. Further update the BOT and marks.
tschatzl@31631 101 // We can coalesce and overwrite the remaining heap contents with dummy objects
tschatzl@31631 102 // as they have either been dead or evacuated (which are unreferenced now, i.e.
tschatzl@31631 103 // dead too) already.
johnc@11451 104 void do_object(oop obj) {
johnc@11451 105 HeapWord* obj_addr = (HeapWord*) obj;
johnc@11451 106 assert(_hr->is_in(obj_addr), "sanity");
stefank@25492 107
johnc@11451 108 if (obj->is_forwarded() && obj->forwardee() == obj) {
johnc@11451 109 // The object failed to move.
tonyp@11455 110
tschatzl@31631 111 zap_dead_objects(_last_forwarded_object_end, obj_addr);
tonyp@11455 112 // We consider all objects that we find self-forwarded to be
tonyp@11455 113 // live. What we'll do is that we'll update the prev marking
tonyp@11455 114 // info so that they are all under PTAMS and explicitly marked.
tschatzl@47833 115 if (!_cm->is_marked_in_prev_bitmap(obj)) {
tschatzl@47833 116 _cm->mark_in_prev_bitmap(obj);
stefank@25492 117 }
tonyp@11455 118 if (_during_initial_mark) {
tonyp@11455 119 // For the next marking info we'll only mark the
tonyp@11455 120 // self-forwarded objects explicitly if we are during
tonyp@11455 121 // initial-mark (since, normally, we only mark objects pointed
tonyp@11455 122 // to by roots if we succeed in copying them). By marking all
tonyp@11455 123 // self-forwarded objects we ensure that we mark any that are
tonyp@11455 124 // still pointed to be roots. During concurrent marking, and
tonyp@11455 125 // after initial-mark, we don't need to mark any objects
tonyp@11455 126 // explicitly and all objects in the CSet are considered
tonyp@11455 127 // (implicitly) live. So, we won't mark them explicitly and
tonyp@11455 128 // we'll leave them over NTAMS.
tschatzl@53912 129 _cm->mark_in_next_bitmap(_worker_id, _hr, obj);
johnc@11451 130 }
tschatzl@37413 131 size_t obj_size = obj->size();
tschatzl@37413 132
tonyp@11455 133 _marked_bytes += (obj_size * HeapWordSize);
tonyp@38081 134 PreservedMarks::init_forwarded_mark(obj);
johnc@11451 135
johnc@11451 136 // While we were processing RSet buffers during the collection,
johnc@11451 137 // we actually didn't scan any cards on the collection set,
johnc@11451 138 // since we didn't want to update remembered sets with entries
johnc@11451 139 // that point into the collection set, given that live objects
johnc@11451 140 // from the collection set are about to move and such entries
johnc@11451 141 // will be stale very soon.
johnc@11451 142 // This change also dealt with a reliability issue which
johnc@11451 143 // involved scanning a card in the collection set and coming
johnc@11451 144 // across an array that was being chunked and looking malformed.
johnc@11451 145 // The problem is that, if evacuation fails, we might have
johnc@11451 146 // remembered set entries missing given that we skipped cards on
johnc@11451 147 // the collection set. So, we'll recreate such entries now.
johnc@11451 148 obj->oop_iterate(_update_rset_cl);
stefank@25492 149
tschatzl@37413 150 HeapWord* obj_end = obj_addr + obj_size;
tschatzl@31631 151 _last_forwarded_object_end = obj_end;
tschatzl@31631 152 _hr->cross_threshold(obj_addr, obj_end);
tschatzl@31631 153 }
tschatzl@31631 154 }
stefank@25492 155
tschatzl@31631 156 // Fill the memory area from start to end with filler objects, and update the BOT
tschatzl@31631 157 // and the mark bitmap accordingly.
tschatzl@31631 158 void zap_dead_objects(HeapWord* start, HeapWord* end) {
tschatzl@31631 159 if (start == end) {
tschatzl@31631 160 return;
johnc@11451 161 }
tschatzl@31631 162
tschatzl@31631 163 size_t gap_size = pointer_delta(end, start);
tschatzl@31631 164 MemRegion mr(start, gap_size);
tschatzl@31631 165 if (gap_size >= CollectedHeap::min_fill_size()) {
tschatzl@31631 166 CollectedHeap::fill_with_objects(start, gap_size);
tschatzl@31631 167
tschatzl@31631 168 HeapWord* end_first_obj = start + ((oop)start)->size();
tschatzl@31631 169 _hr->cross_threshold(start, end_first_obj);
tschatzl@31631 170 // Fill_with_objects() may have created multiple (i.e. two)
tschatzl@31631 171 // objects, as the max_fill_size() is half a region.
tschatzl@31631 172 // After updating the BOT for the first object, also update the
tschatzl@31631 173 // BOT for the second object to make the BOT complete.
tschatzl@31631 174 if (end_first_obj != end) {
tschatzl@31631 175 _hr->cross_threshold(end_first_obj, end);
tschatzl@31631 176 #ifdef ASSERT
tschatzl@31631 177 size_t size_second_obj = ((oop)end_first_obj)->size();
tschatzl@31631 178 HeapWord* end_of_second_obj = end_first_obj + size_second_obj;
tschatzl@31631 179 assert(end == end_of_second_obj,
david@33105 180 "More than two objects were used to fill the area from " PTR_FORMAT " to " PTR_FORMAT ", "
david@33105 181 "second objects size " SIZE_FORMAT " ends at " PTR_FORMAT,
david@33105 182 p2i(start), p2i(end), size_second_obj, p2i(end_of_second_obj));
tschatzl@31631 183 #endif
tschatzl@31631 184 }
tschatzl@31631 185 }
tschatzl@47833 186 _cm->clear_range_in_prev_bitmap(mr);
tschatzl@31631 187 }
tschatzl@31631 188
tschatzl@31631 189 void zap_remainder() {
tschatzl@31631 190 zap_dead_objects(_last_forwarded_object_end, _hr->top());
johnc@11451 191 }
johnc@11451 192 };
johnc@11451 193
johnc@11451 194 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
johnc@11451 195 G1CollectedHeap* _g1h;
johnc@11583 196 uint _worker_id;
mlarsson@27009 197 HeapRegionClaimer* _hrclaimer;
johnc@11451 198
tschatzl@26701 199 DirtyCardQueue _dcq;
tschatzl@26701 200 UpdateRSetDeferred _update_rset_cl;
tschatzl@26701 201
johnc@11451 202 public:
tschatzl@31631 203 RemoveSelfForwardPtrHRClosure(uint worker_id,
mlarsson@27009 204 HeapRegionClaimer* hrclaimer) :
tschatzl@31631 205 _g1h(G1CollectedHeap::heap()),
tschatzl@52033 206 _worker_id(worker_id),
tschatzl@52033 207 _hrclaimer(hrclaimer),
tschatzl@31631 208 _dcq(&_g1h->dirty_card_queue_set()),
tschatzl@52033 209 _update_rset_cl(&_dcq){
tschatzl@31631 210 }
tschatzl@31631 211
tschatzl@31631 212 size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
tschatzl@31631 213 bool during_initial_mark) {
tschatzl@31631 214 RemoveSelfForwardPtrObjClosure rspc(hr,
tschatzl@31631 215 &_update_rset_cl,
tschatzl@31631 216 during_initial_mark,
tschatzl@31631 217 _worker_id);
tschatzl@31631 218 hr->object_iterate(&rspc);
tschatzl@31631 219 // Need to zap the remainder area of the processed region.
tschatzl@31631 220 rspc.zap_remainder();
tschatzl@31631 221
tschatzl@31631 222 return rspc.marked_bytes();
mlarsson@27009 223 }
johnc@11451 224
jwilhelm@49400 225 bool do_heap_region(HeapRegion *hr) {
david@33105 226 assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
johnc@11451 227 assert(hr->in_collection_set(), "bad CS");
johnc@11451 228
mlarsson@27009 229 if (_hrclaimer->claim_region(hr->hrm_index())) {
johnc@11451 230 if (hr->evacuation_failed()) {
tschatzl@50138 231 bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
tschatzl@50138 232 bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
fzhinkin@46258 233
tonyp@11455 234 hr->note_self_forwarding_removal_start(during_initial_mark,
tonyp@11455 235 during_conc_mark);
david@35851 236 _g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
johnc@11451 237
johnc@11451 238 hr->reset_bot();
tschatzl@31631 239
tschatzl@31631 240 size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark);
johnc@11451 241
mgerdin@26422 242 hr->rem_set()->clean_strong_code_roots(hr);
tschatzl@50102 243 hr->rem_set()->clear_locked(true);
mgerdin@26422 244
fzhinkin@46258 245 hr->note_self_forwarding_removal_end(live_bytes);
johnc@11451 246 }
johnc@11451 247 }
johnc@11451 248 return false;
johnc@11451 249 }
johnc@11451 250 };
johnc@11451 251
tschatzl@31631 252 G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask() :
tschatzl@31631 253 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
tschatzl@31631 254 _g1h(G1CollectedHeap::heap()),
tschatzl@31631 255 _hrclaimer(_g1h->workers()->active_workers()) { }
johnc@11451 256
stefank@29080 257 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
tschatzl@31631 258 RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
johnc@11451 259
tschatzl@39698 260 _g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
stefank@29080 261 }