annotate src/hotspot/share/gc/shared/plab.cpp @ 50590:4fa726f796f5

8202781: Fix typo in DiscoveredListIterator::complete_enqeue Reviewed-by: kbarrett
author tschatzl
date Tue, 08 May 2018 16:49:20 +0200
parents d503911aa948
children d12828b7cd64
rev   line source
jprovino@30275 1 /*
aharlap@46290 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
jprovino@30275 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
jprovino@30275 4 *
jprovino@30275 5 * This code is free software; you can redistribute it and/or modify it
jprovino@30275 6 * under the terms of the GNU General Public License version 2 only, as
jprovino@30275 7 * published by the Free Software Foundation.
jprovino@30275 8 *
jprovino@30275 9 * This code is distributed in the hope that it will be useful, but WITHOUT
jprovino@30275 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
jprovino@30275 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
jprovino@30275 12 * version 2 for more details (a copy is included in the LICENSE file that
jprovino@30275 13 * accompanied this code).
jprovino@30275 14 *
jprovino@30275 15 * You should have received a copy of the GNU General Public License version
jprovino@30275 16 * 2 along with this work; if not, write to the Free Software Foundation,
jprovino@30275 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
jprovino@30275 18 *
jprovino@30275 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
jprovino@30275 20 * or visit www.oracle.com if you need additional information or have any
jprovino@30275 21 * questions.
jprovino@30275 22 *
jprovino@30275 23 */
jprovino@30275 24
jprovino@30275 25 #include "precompiled.hpp"
pliden@30764 26 #include "gc/shared/collectedHeap.hpp"
tschatzl@32378 27 #include "gc/shared/plab.inline.hpp"
pliden@30764 28 #include "gc/shared/threadLocalAllocBuffer.hpp"
brutisso@35061 29 #include "logging/log.hpp"
jprovino@30275 30 #include "oops/arrayOop.hpp"
jprovino@30275 31 #include "oops/oop.inline.hpp"
jprovino@30275 32
jprovino@30275 33 size_t PLAB::min_size() {
jprovino@30275 34 // Make sure that we return something that is larger than AlignmentReserve
jprovino@30275 35 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
jprovino@30275 36 }
jprovino@30275 37
jprovino@30275 38 size_t PLAB::max_size() {
jprovino@30275 39 return ThreadLocalAllocBuffer::max_size();
jprovino@30275 40 }
jprovino@30275 41
jprovino@30275 42 PLAB::PLAB(size_t desired_plab_sz_) :
jprovino@30275 43 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
tschatzl@30564 44 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
jprovino@30275 45 {
jprovino@30275 46 // ArrayOopDesc::header_size depends on command line initialization.
jprovino@30275 47 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
jprovino@30275 48 assert(min_size() > AlignmentReserve,
david@33105 49 "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
david@33105 50 "to be able to contain objects", min_size(), AlignmentReserve);
jprovino@30275 51 }
jprovino@30275 52
jprovino@30275 53 // If the minimum object size is greater than MinObjAlignment, we can
jprovino@30275 54 // end up with a shard at the end of the buffer that's smaller than
jprovino@30275 55 // the smallest object. We can't allow that because the buffer must
jprovino@30275 56 // look like it's full of objects when we retire it, so we make
jprovino@30275 57 // sure we have enough space for a filler int array object.
jprovino@30275 58 size_t PLAB::AlignmentReserve;
jprovino@30275 59
jprovino@30275 60 void PLAB::flush_and_retire_stats(PLABStats* stats) {
jprovino@30275 61 // Retire the last allocation buffer.
jprovino@30275 62 size_t unused = retire_internal();
jprovino@30275 63
jprovino@30275 64 // Now flush the statistics.
jprovino@30275 65 stats->add_allocated(_allocated);
jprovino@30275 66 stats->add_wasted(_wasted);
tschatzl@30564 67 stats->add_undo_wasted(_undo_wasted);
jprovino@30275 68 stats->add_unused(unused);
jprovino@30275 69
jprovino@30275 70 // Since we have flushed the stats we need to clear the _allocated and _wasted
jprovino@30275 71 // fields in case somebody retains an instance of this over GCs. Not doing so
jprovino@30275 72 // will artifically inflate the values in the statistics.
tschatzl@30564 73 _allocated = 0;
tschatzl@30564 74 _wasted = 0;
tschatzl@30564 75 _undo_wasted = 0;
jprovino@30275 76 }
jprovino@30275 77
jprovino@30275 78 void PLAB::retire() {
jprovino@30275 79 _wasted += retire_internal();
jprovino@30275 80 }
jprovino@30275 81
jprovino@30275 82 size_t PLAB::retire_internal() {
jprovino@30275 83 size_t result = 0;
jprovino@30275 84 if (_top < _hard_end) {
jprovino@30275 85 CollectedHeap::fill_with_object(_top, _hard_end);
jprovino@30275 86 result += invalidate();
jprovino@30275 87 }
jprovino@30275 88 return result;
jprovino@30275 89 }
jprovino@30275 90
tschatzl@30564 91 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
tschatzl@30564 92 CollectedHeap::fill_with_object(obj, word_sz);
tschatzl@30564 93 _undo_wasted += word_sz;
tschatzl@30564 94 }
tschatzl@30564 95
tschatzl@30564 96 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) {
tschatzl@30564 97 assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
tschatzl@30564 98 assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
tschatzl@30564 99 _top = obj;
tschatzl@30564 100 }
tschatzl@30564 101
tschatzl@30564 102 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
tschatzl@30564 103 // Is the alloc in the current alloc buffer?
tschatzl@30564 104 if (contains(obj)) {
tschatzl@30564 105 assert(contains(obj + word_sz - 1),
tschatzl@30564 106 "should contain whole object");
tschatzl@30564 107 undo_last_allocation(obj, word_sz);
tschatzl@30564 108 } else {
tschatzl@30564 109 add_undo_waste(obj, word_sz);
tschatzl@30564 110 }
tschatzl@30564 111 }
tschatzl@30564 112
tschatzl@36390 113 void PLABStats::log_plab_allocation() {
tschatzl@36390 114 log_debug(gc, plab)("%s PLAB allocation: "
tschatzl@36390 115 "allocated: " SIZE_FORMAT "B, "
tschatzl@36390 116 "wasted: " SIZE_FORMAT "B, "
tschatzl@36390 117 "unused: " SIZE_FORMAT "B, "
tschatzl@36390 118 "used: " SIZE_FORMAT "B, "
tschatzl@36390 119 "undo waste: " SIZE_FORMAT "B, ",
tschatzl@36390 120 _description,
tschatzl@36390 121 _allocated * HeapWordSize,
tschatzl@36390 122 _wasted * HeapWordSize,
tschatzl@36390 123 _unused * HeapWordSize,
tschatzl@36390 124 used() * HeapWordSize,
tschatzl@36390 125 _undo_wasted * HeapWordSize);
tschatzl@36390 126 }
tschatzl@36390 127
tschatzl@36390 128 void PLABStats::log_sizing(size_t calculated_words, size_t net_desired_words) {
tschatzl@36390 129 log_debug(gc, plab)("%s sizing: "
tschatzl@36390 130 "calculated: " SIZE_FORMAT "B, "
tschatzl@36390 131 "actual: " SIZE_FORMAT "B",
tschatzl@36390 132 _description,
tschatzl@36390 133 calculated_words * HeapWordSize,
tschatzl@36390 134 net_desired_words * HeapWordSize);
tschatzl@36390 135 }
tschatzl@36390 136
sangheki@31632 137 // Calculates plab size for current number of gc worker threads.
sangheki@31632 138 size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
stefank@46618 139 return align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size()));
sangheki@31632 140 }
sangheki@31632 141
sangheki@31632 142 // Compute desired plab size for one gc worker thread and latch result for later
jprovino@30275 143 // use. This should be called once at the end of parallel
jprovino@30275 144 // scavenge; it clears the sensor accumulators.
sangheki@31632 145 void PLABStats::adjust_desired_plab_sz() {
tschatzl@36390 146 log_plab_allocation();
tschatzl@36390 147
tschatzl@36390 148 if (!ResizePLAB) {
tschatzl@36390 149 // Clear accumulators for next round.
tschatzl@36390 150 reset();
tschatzl@36390 151 return;
tschatzl@36390 152 }
jprovino@30275 153
jprovino@30275 154 assert(is_object_aligned(max_size()) && min_size() <= max_size(),
jprovino@30275 155 "PLAB clipping computation may be incorrect");
jprovino@30275 156
aharlap@46290 157 assert(_allocated != 0 || _unused == 0,
aharlap@46290 158 "Inconsistency in PLAB stats: "
aharlap@46290 159 "_allocated: " SIZE_FORMAT ", "
aharlap@46290 160 "_wasted: " SIZE_FORMAT ", "
aharlap@46290 161 "_unused: " SIZE_FORMAT ", "
aharlap@46290 162 "_undo_wasted: " SIZE_FORMAT,
aharlap@46290 163 _allocated, _wasted, _unused, _undo_wasted);
jprovino@30275 164
aharlap@46290 165 size_t plab_sz = compute_desired_plab_sz();
aharlap@46290 166 // Take historical weighted average
aharlap@46290 167 _filter.sample(plab_sz);
aharlap@46290 168 _desired_net_plab_sz = MAX2(min_size(), (size_t)_filter.average());
aharlap@46290 169
aharlap@46290 170 log_sizing(plab_sz, _desired_net_plab_sz);
aharlap@46290 171 // Clear accumulators for next round
aharlap@46290 172 reset();
aharlap@46290 173 }
aharlap@46290 174
aharlap@46290 175 size_t PLABStats::compute_desired_plab_sz() {
aharlap@46290 176 size_t allocated = MAX2(_allocated, size_t(1));
aharlap@46290 177 double wasted_frac = (double)_unused / (double)allocated;
jprovino@30275 178 size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
jprovino@30275 179 if (target_refills == 0) {
jprovino@30275 180 target_refills = 1;
jprovino@30275 181 }
aharlap@46290 182 size_t used = allocated - _wasted - _unused;
sangheki@31632 183 // Assumed to have 1 gc worker thread
sangheki@31632 184 size_t recent_plab_sz = used / target_refills;
aharlap@46290 185 return recent_plab_sz;
jprovino@30275 186 }