annotate src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp @ 56679:55319b27b346

8225357: Rewire ShenandoahHeap::maybe_update_with_forwarded for contending fixups Reviewed-by: rkennke
author shade
date Fri, 07 Jun 2019 11:47:53 +0200
parents 785a12e0f89b
children 14c78683c9f0
rev   line source
rkennke@53962 1 /*
zgu@54471 2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
rkennke@53962 3 *
rkennke@53962 4 * This code is free software; you can redistribute it and/or modify it
rkennke@53962 5 * under the terms of the GNU General Public License version 2 only, as
rkennke@53962 6 * published by the Free Software Foundation.
rkennke@53962 7 *
rkennke@53962 8 * This code is distributed in the hope that it will be useful, but WITHOUT
rkennke@53962 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
rkennke@53962 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
rkennke@53962 11 * version 2 for more details (a copy is included in the LICENSE file that
rkennke@53962 12 * accompanied this code).
rkennke@53962 13 *
rkennke@53962 14 * You should have received a copy of the GNU General Public License version
rkennke@53962 15 * 2 along with this work; if not, write to the Free Software Foundation,
rkennke@53962 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
rkennke@53962 17 *
rkennke@53962 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
rkennke@53962 19 * or visit www.oracle.com if you need additional information or have any
rkennke@53962 20 * questions.
rkennke@53962 21 *
rkennke@53962 22 */
rkennke@53962 23
coleenp@54304 24 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
coleenp@54304 25 #define SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
rkennke@53962 26
rkennke@53962 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
rkennke@53962 28 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
rkennke@53962 29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
rkennke@53962 30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
zgu@54471 31 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
rkennke@53962 32 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
rkennke@53962 33 #include "memory/iterator.inline.hpp"
stefank@56133 34 #include "oops/compressedOops.inline.hpp"
rkennke@53962 35 #include "oops/oop.inline.hpp"
rkennke@53962 36 #include "runtime/prefetch.inline.hpp"
rkennke@53962 37
rkennke@53962 38 template <class T>
rkennke@53962 39 void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task) {
rkennke@53962 40 oop obj = task->obj();
rkennke@53962 41
rkennke@53962 42 shenandoah_assert_not_forwarded_except(NULL, obj, _heap->is_concurrent_traversal_in_progress() && _heap->cancelled_gc());
rkennke@53962 43 shenandoah_assert_marked(NULL, obj);
rkennke@53962 44 shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc());
rkennke@53962 45
rkennke@53962 46 if (task->is_not_chunked()) {
rkennke@53962 47 if (obj->is_instance()) {
rkennke@53962 48 // Case 1: Normal oop, process as usual.
rkennke@53962 49 obj->oop_iterate(cl);
rkennke@53962 50 } else if (obj->is_objArray()) {
rkennke@53962 51 // Case 2: Object array instance and no chunk is set. Must be the first
rkennke@53962 52 // time we visit it, start the chunked processing.
rkennke@53962 53 do_chunked_array_start<T>(q, cl, obj);
rkennke@53962 54 } else {
rkennke@53962 55 // Case 3: Primitive array. Do nothing, no oops there. We use the same
rkennke@53962 56 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
rkennke@53962 57 // We skip iterating over the klass pointer since we know that
rkennke@53962 58 // Universe::TypeArrayKlass never moves.
rkennke@53962 59 assert (obj->is_typeArray(), "should be type array");
rkennke@53962 60 }
rkennke@53962 61 // Count liveness the last: push the outstanding work to the queues first
rkennke@53962 62 count_liveness(live_data, obj);
rkennke@53962 63 } else {
rkennke@53962 64 // Case 4: Array chunk, has sensible chunk id. Process it.
rkennke@53962 65 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow());
rkennke@53962 66 }
rkennke@53962 67 }
rkennke@53962 68
rkennke@53962 69 inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) {
rkennke@53962 70 size_t region_idx = _heap->heap_region_index_containing(obj);
rkennke@53962 71 ShenandoahHeapRegion* region = _heap->get_region(region_idx);
rkennke@56437 72 size_t size = obj->size();
rkennke@53962 73
rkennke@53962 74 if (!region->is_humongous_start()) {
rkennke@53962 75 assert(!region->is_humongous(), "Cannot have continuations here");
rkennke@53962 76 size_t max = (1 << (sizeof(jushort) * 8)) - 1;
rkennke@53962 77 if (size >= max) {
rkennke@53962 78 // too big, add to region data directly
rkennke@53962 79 region->increase_live_data_gc_words(size);
rkennke@53962 80 } else {
rkennke@53962 81 jushort cur = live_data[region_idx];
rkennke@53962 82 size_t new_val = cur + size;
rkennke@53962 83 if (new_val >= max) {
rkennke@53962 84 // overflow, flush to region data
rkennke@53962 85 region->increase_live_data_gc_words(new_val);
rkennke@53962 86 live_data[region_idx] = 0;
rkennke@53962 87 } else {
rkennke@53962 88 // still good, remember in locals
rkennke@53962 89 live_data[region_idx] = (jushort) new_val;
rkennke@53962 90 }
rkennke@53962 91 }
rkennke@53962 92 } else {
rkennke@53962 93 shenandoah_assert_in_correct_region(NULL, obj);
rkennke@53962 94 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
rkennke@53962 95
rkennke@53962 96 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
rkennke@53962 97 ShenandoahHeapRegion* chain_reg = _heap->get_region(i);
rkennke@53962 98 assert(chain_reg->is_humongous(), "Expecting a humongous region");
rkennke@53962 99 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
rkennke@53962 100 }
rkennke@53962 101 }
rkennke@53962 102 }
rkennke@53962 103
rkennke@53962 104 template <class T>
rkennke@53962 105 inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) {
rkennke@53962 106 assert(obj->is_objArray(), "expect object array");
rkennke@53962 107 objArrayOop array = objArrayOop(obj);
rkennke@53962 108 int len = array->length();
rkennke@53962 109
rkennke@53962 110 if (len <= (int) ObjArrayMarkingStride*2) {
rkennke@53962 111 // A few slices only, process directly
rkennke@53962 112 array->oop_iterate_range(cl, 0, len);
rkennke@53962 113 } else {
rkennke@53962 114 int bits = log2_long((size_t) len);
rkennke@53962 115 // Compensate for non-power-of-two arrays, cover the array in excess:
rkennke@53962 116 if (len != (1 << bits)) bits++;
rkennke@53962 117
rkennke@53962 118 // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
rkennke@53962 119 // boundaries against array->length(), touching the array header on every chunk.
rkennke@53962 120 //
rkennke@53962 121 // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
rkennke@53962 122 // If the array is not divided in chunk sizes, then there would be an irregular tail,
rkennke@53962 123 // which we will process separately.
rkennke@53962 124
rkennke@53962 125 int last_idx = 0;
rkennke@53962 126
rkennke@53962 127 int chunk = 1;
rkennke@53962 128 int pow = bits;
rkennke@53962 129
rkennke@53962 130 // Handle overflow
rkennke@53962 131 if (pow >= 31) {
rkennke@53962 132 assert (pow == 31, "sanity");
rkennke@53962 133 pow--;
rkennke@53962 134 chunk = 2;
rkennke@53962 135 last_idx = (1 << pow);
rkennke@53962 136 bool pushed = q->push(ShenandoahMarkTask(array, 1, pow));
rkennke@53962 137 assert(pushed, "overflow queue should always succeed pushing");
rkennke@53962 138 }
rkennke@53962 139
rkennke@53962 140 // Split out tasks, as suggested in ObjArrayChunkedTask docs. Record the last
rkennke@53962 141 // successful right boundary to figure out the irregular tail.
rkennke@53962 142 while ((1 << pow) > (int)ObjArrayMarkingStride &&
rkennke@53962 143 (chunk*2 < ShenandoahMarkTask::chunk_size())) {
rkennke@53962 144 pow--;
rkennke@53962 145 int left_chunk = chunk*2 - 1;
rkennke@53962 146 int right_chunk = chunk*2;
rkennke@53962 147 int left_chunk_end = left_chunk * (1 << pow);
rkennke@53962 148 if (left_chunk_end < len) {
rkennke@53962 149 bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow));
rkennke@53962 150 assert(pushed, "overflow queue should always succeed pushing");
rkennke@53962 151 chunk = right_chunk;
rkennke@53962 152 last_idx = left_chunk_end;
rkennke@53962 153 } else {
rkennke@53962 154 chunk = left_chunk;
rkennke@53962 155 }
rkennke@53962 156 }
rkennke@53962 157
rkennke@53962 158 // Process the irregular tail, if present
rkennke@53962 159 int from = last_idx;
rkennke@53962 160 if (from < len) {
rkennke@53962 161 array->oop_iterate_range(cl, from, len);
rkennke@53962 162 }
rkennke@53962 163 }
rkennke@53962 164 }
rkennke@53962 165
rkennke@53962 166 template <class T>
rkennke@53962 167 inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) {
rkennke@53962 168 assert(obj->is_objArray(), "expect object array");
rkennke@53962 169 objArrayOop array = objArrayOop(obj);
rkennke@53962 170
rkennke@53962 171 assert (ObjArrayMarkingStride > 0, "sanity");
rkennke@53962 172
rkennke@53962 173 // Split out tasks, as suggested in ObjArrayChunkedTask docs. Avoid pushing tasks that
rkennke@53962 174 // are known to start beyond the array.
rkennke@53962 175 while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
rkennke@53962 176 pow--;
rkennke@53962 177 chunk *= 2;
rkennke@53962 178 bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow));
rkennke@53962 179 assert(pushed, "overflow queue should always succeed pushing");
rkennke@53962 180 }
rkennke@53962 181
rkennke@53962 182 int chunk_size = 1 << pow;
rkennke@53962 183
rkennke@53962 184 int from = (chunk - 1) * chunk_size;
rkennke@53962 185 int to = chunk * chunk_size;
rkennke@53962 186
rkennke@53962 187 #ifdef ASSERT
rkennke@53962 188 int len = array->length();
rkennke@53962 189 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
rkennke@53962 190 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
rkennke@53962 191 #endif
rkennke@53962 192
rkennke@53962 193 array->oop_iterate_range(cl, from, to);
rkennke@53962 194 }
rkennke@53962 195
rkennke@53962 196 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
rkennke@53962 197 private:
rkennke@53962 198 ShenandoahObjToScanQueue* _queue;
rkennke@53962 199 ShenandoahHeap* _heap;
rkennke@53962 200 ShenandoahMarkingContext* const _mark_context;
rkennke@53962 201 public:
rkennke@53962 202 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
rkennke@53962 203 _queue(q),
rkennke@53962 204 _heap(ShenandoahHeap::heap()),
rkennke@53962 205 _mark_context(_heap->marking_context())
rkennke@53962 206 {
rkennke@53962 207 }
rkennke@53962 208
rkennke@53962 209 void do_buffer(void **buffer, size_t size) {
rkennke@53962 210 if (_heap->has_forwarded_objects()) {
rkennke@53962 211 if (ShenandoahStringDedup::is_enabled()) {
rkennke@53962 212 do_buffer_impl<RESOLVE, ENQUEUE_DEDUP>(buffer, size);
rkennke@53962 213 } else {
rkennke@53962 214 do_buffer_impl<RESOLVE, NO_DEDUP>(buffer, size);
rkennke@53962 215 }
rkennke@53962 216 } else {
rkennke@53962 217 if (ShenandoahStringDedup::is_enabled()) {
rkennke@53962 218 do_buffer_impl<NONE, ENQUEUE_DEDUP>(buffer, size);
rkennke@53962 219 } else {
rkennke@53962 220 do_buffer_impl<NONE, NO_DEDUP>(buffer, size);
rkennke@53962 221 }
rkennke@53962 222 }
rkennke@53962 223 }
rkennke@53962 224
rkennke@53962 225 template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
rkennke@53962 226 void do_buffer_impl(void **buffer, size_t size) {
rkennke@53962 227 for (size_t i = 0; i < size; ++i) {
rkennke@53962 228 oop *p = (oop *) &buffer[i];
rkennke@53962 229 ShenandoahConcurrentMark::mark_through_ref<oop, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
rkennke@53962 230 }
rkennke@53962 231 }
rkennke@53962 232 };
rkennke@53962 233
rkennke@53962 234 template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
rkennke@53962 235 inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context) {
rkennke@53962 236 T o = RawAccess<>::oop_load(p);
rkennke@53962 237 if (!CompressedOops::is_null(o)) {
rkennke@53962 238 oop obj = CompressedOops::decode_not_null(o);
rkennke@53962 239 switch (UPDATE_REFS) {
rkennke@53962 240 case NONE:
rkennke@53962 241 break;
rkennke@53962 242 case RESOLVE:
rkennke@53962 243 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
rkennke@53962 244 break;
rkennke@53962 245 case SIMPLE:
rkennke@53962 246 // We piggy-back reference updating to the marking tasks.
rkennke@53962 247 obj = heap->update_with_forwarded_not_null(p, obj);
rkennke@53962 248 break;
rkennke@53962 249 case CONCURRENT:
rkennke@53962 250 obj = heap->maybe_update_with_forwarded_not_null(p, obj);
rkennke@53962 251 break;
rkennke@53962 252 default:
rkennke@53962 253 ShouldNotReachHere();
rkennke@53962 254 }
rkennke@53962 255
shade@56679 256 // Note: Only when concurrently updating references can obj be different
shade@56679 257 // (that is, really different, not just different from-/to-space copies of the same)
shade@56679 258 // from the one we originally loaded. Mutator thread can beat us by writing something
shade@56679 259 // else into the location. In that case, we would mark through that updated value,
shade@56679 260 // on the off-chance it is not handled by other means (e.g. via SATB). However,
shade@56679 261 // if that write was NULL, we don't need to do anything else.
rkennke@53962 262 if (UPDATE_REFS != CONCURRENT || !CompressedOops::is_null(obj)) {
rkennke@53962 263 shenandoah_assert_not_forwarded(p, obj);
rkennke@53962 264 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
rkennke@53962 265
rkennke@53962 266 if (mark_context->mark(obj)) {
rkennke@53962 267 bool pushed = q->push(ShenandoahMarkTask(obj));
rkennke@53962 268 assert(pushed, "overflow queue should always succeed pushing");
rkennke@53962 269
rkennke@53962 270 if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) {
rkennke@53962 271 assert(ShenandoahStringDedup::is_enabled(), "Must be enabled");
rkennke@53962 272 ShenandoahStringDedup::enqueue_candidate(obj);
rkennke@53962 273 }
rkennke@53962 274 }
rkennke@53962 275
rkennke@53962 276 shenandoah_assert_marked(p, obj);
rkennke@53962 277 }
rkennke@53962 278 }
rkennke@53962 279 }
rkennke@53962 280
coleenp@54304 281 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP