annotate src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp @ 5649:46d7652b223c

8026853: Prepare GC code for collector policy regression fix Summary: Cleanup related to the NewSize and MaxNewSize bugs Reviewed-by: tschatzl, jcoomes, ehelin
author jwilhelm
date Mon, 21 Oct 2013 18:56:20 +0200
parents f95d63e2154a
children 8f07aa079343
rev   line source
duke@0 1 /*
stefank@1879 2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #include "precompiled.hpp"
stefank@1879 26 #include "gc_implementation/parallelScavenge/asPSYoungGen.hpp"
stefank@1879 27 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@1879 28 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
stefank@1879 29 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
stefank@1879 30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
stefank@1879 31 #include "gc_implementation/shared/gcUtil.hpp"
stefank@1879 32 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@1879 33 #include "oops/oop.inline.hpp"
stefank@1879 34 #include "runtime/java.hpp"
duke@0 35
duke@0 36 ASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
duke@0 37 size_t minimum_byte_size,
duke@0 38 size_t byte_size_limit) :
duke@0 39 PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
duke@0 40 _gen_size_limit(byte_size_limit) {
duke@0 41 }
duke@0 42
duke@0 43
duke@0 44 ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
duke@0 45 size_t init_byte_size,
duke@0 46 size_t minimum_byte_size,
duke@0 47 size_t byte_size_limit) :
duke@0 48 //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
duke@0 49 PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
duke@0 50 _gen_size_limit(byte_size_limit) {
duke@0 51
duke@0 52 assert(vs->committed_size() == init_byte_size, "Cannot replace with");
duke@0 53
duke@0 54 _virtual_space = vs;
duke@0 55 }
duke@0 56
duke@0 57 void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
duke@0 58 size_t alignment) {
duke@0 59 assert(_init_gen_size != 0, "Should have a finite size");
duke@0 60 _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
duke@0 61 if (!_virtual_space->expand_by(_init_gen_size)) {
duke@0 62 vm_exit_during_initialization("Could not reserve enough space for "
duke@0 63 "object heap");
duke@0 64 }
duke@0 65 }
duke@0 66
duke@0 67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
duke@0 68 initialize_virtual_space(rs, alignment);
duke@0 69 initialize_work();
duke@0 70 }
duke@0 71
duke@0 72 size_t ASPSYoungGen::available_for_expansion() {
duke@0 73 size_t current_committed_size = virtual_space()->committed_size();
duke@0 74 assert((gen_size_limit() >= current_committed_size),
duke@0 75 "generation size limit is wrong");
duke@0 76 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 77 size_t result = gen_size_limit() - current_committed_size;
duke@0 78 size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
duke@0 79 return result_aligned;
duke@0 80 }
duke@0 81
duke@0 82 // Return the number of bytes the young gen is willing give up.
duke@0 83 //
duke@0 84 // Future implementations could check the survivors and if to_space is in the
duke@0 85 // right place (below from_space), take a chunk from to_space.
duke@0 86 size_t ASPSYoungGen::available_for_contraction() {
duke@0 87 size_t uncommitted_bytes = virtual_space()->uncommitted_size();
duke@0 88 if (uncommitted_bytes != 0) {
duke@0 89 return uncommitted_bytes;
duke@0 90 }
duke@0 91
duke@0 92 if (eden_space()->is_empty()) {
duke@0 93 // Respect the minimum size for eden and for the young gen as a whole.
duke@0 94 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@13 95 const size_t eden_alignment = heap->intra_heap_alignment();
duke@0 96 const size_t gen_alignment = heap->young_gen_alignment();
duke@0 97
duke@0 98 assert(eden_space()->capacity_in_bytes() >= eden_alignment,
duke@0 99 "Alignment is wrong");
duke@0 100 size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
duke@0 101 eden_avail = align_size_down(eden_avail, gen_alignment);
duke@0 102
duke@0 103 assert(virtual_space()->committed_size() >= min_gen_size(),
duke@0 104 "minimum gen size is wrong");
duke@0 105 size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
duke@0 106 assert(virtual_space()->is_aligned(gen_avail), "not aligned");
duke@0 107
duke@0 108 const size_t max_contraction = MIN2(eden_avail, gen_avail);
duke@0 109 // See comment for ASPSOldGen::available_for_contraction()
duke@0 110 // for reasons the "increment" fraction is used.
duke@0 111 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@0 112 size_t result = policy->eden_increment_aligned_down(max_contraction);
duke@0 113 size_t result_aligned = align_size_down(result, gen_alignment);
duke@0 114 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 115 gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
duke@0 116 result_aligned/K);
duke@0 117 gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K);
duke@0 118 gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K);
duke@0 119 gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
duke@0 120 }
duke@0 121 return result_aligned;
duke@0 122 }
duke@0 123
duke@0 124 return 0;
duke@0 125 }
duke@0 126
duke@0 127 // The current implementation only considers to the end of eden.
duke@0 128 // If to_space is below from_space, to_space is not considered.
duke@0 129 // to_space can be.
duke@0 130 size_t ASPSYoungGen::available_to_live() {
duke@0 131 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@13 132 const size_t alignment = heap->intra_heap_alignment();
duke@0 133
duke@0 134 // Include any space that is committed but is not in eden.
duke@0 135 size_t available = pointer_delta(eden_space()->bottom(),
duke@0 136 virtual_space()->low(),
duke@0 137 sizeof(char));
duke@0 138
duke@0 139 const size_t eden_capacity = eden_space()->capacity_in_bytes();
duke@0 140 if (eden_space()->is_empty() && eden_capacity > alignment) {
duke@0 141 available += eden_capacity - alignment;
duke@0 142 }
duke@0 143 return available;
duke@0 144 }
duke@0 145
duke@0 146 // Similar to PSYoungGen::resize_generation() but
duke@0 147 // allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
duke@0 148 // expands at the low end of the virtual space
duke@0 149 // moves the boundary between the generations in order to expand
duke@0 150 // some additional diagnostics
duke@0 151 // If no additional changes are required, this can be deleted
duke@0 152 // and the changes factored back into PSYoungGen::resize_generation().
duke@0 153 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
duke@0 154 const size_t alignment = virtual_space()->alignment();
duke@0 155 size_t orig_size = virtual_space()->committed_size();
duke@0 156 bool size_changed = false;
duke@0 157
duke@0 158 // There used to be a guarantee here that
duke@0 159 // (eden_size + 2*survivor_size) <= _max_gen_size
duke@0 160 // This requirement is enforced by the calculation of desired_size
duke@0 161 // below. It may not be true on entry since the size of the
duke@0 162 // eden_size is no bounded by the generation size.
duke@0 163
duke@0 164 assert(max_size() == reserved().byte_size(), "max gen size problem?");
duke@0 165 assert(min_gen_size() <= orig_size && orig_size <= max_size(),
duke@0 166 "just checking");
duke@0 167
duke@0 168 // Adjust new generation size
duke@0 169 const size_t eden_plus_survivors =
duke@0 170 align_size_up(eden_size + 2 * survivor_size, alignment);
duke@0 171 size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
duke@0 172 min_gen_size());
duke@0 173 assert(desired_size <= gen_size_limit(), "just checking");
duke@0 174
duke@0 175 if (desired_size > orig_size) {
duke@0 176 // Grow the generation
duke@0 177 size_t change = desired_size - orig_size;
jmasa@263 178 HeapWord* prev_low = (HeapWord*) virtual_space()->low();
duke@0 179 if (!virtual_space()->expand_by(change)) {
duke@0 180 return false;
duke@0 181 }
jmasa@263 182 if (ZapUnusedHeapArea) {
jmasa@263 183 // Mangle newly committed space immediately because it
jmasa@263 184 // can be done here more simply that after the new
jmasa@263 185 // spaces have been computed.
jmasa@263 186 HeapWord* new_low = (HeapWord*) virtual_space()->low();
jmasa@263 187 assert(new_low < prev_low, "Did not grow");
jmasa@263 188
jmasa@263 189 MemRegion mangle_region(new_low, prev_low);
jmasa@263 190 SpaceMangler::mangle_region(mangle_region);
jmasa@263 191 }
duke@0 192 size_changed = true;
duke@0 193 } else if (desired_size < orig_size) {
duke@0 194 size_t desired_change = orig_size - desired_size;
duke@0 195
duke@0 196 // How much is available for shrinking.
duke@0 197 size_t available_bytes = limit_gen_shrink(desired_change);
duke@0 198 size_t change = MIN2(desired_change, available_bytes);
duke@0 199 virtual_space()->shrink_by(change);
duke@0 200 size_changed = true;
duke@0 201 } else {
duke@0 202 if (Verbose && PrintGC) {
duke@0 203 if (orig_size == gen_size_limit()) {
duke@0 204 gclog_or_tty->print_cr("ASPSYoung generation size at maximum: "
duke@0 205 SIZE_FORMAT "K", orig_size/K);
duke@0 206 } else if (orig_size == min_gen_size()) {
duke@0 207 gclog_or_tty->print_cr("ASPSYoung generation size at minium: "
duke@0 208 SIZE_FORMAT "K", orig_size/K);
duke@0 209 }
duke@0 210 }
duke@0 211 }
duke@0 212
duke@0 213 if (size_changed) {
duke@0 214 reset_after_change();
duke@0 215 if (Verbose && PrintGC) {
duke@0 216 size_t current_size = virtual_space()->committed_size();
duke@0 217 gclog_or_tty->print_cr("ASPSYoung generation size changed: "
duke@0 218 SIZE_FORMAT "K->" SIZE_FORMAT "K",
duke@0 219 orig_size/K, current_size/K);
duke@0 220 }
duke@0 221 }
duke@0 222
duke@0 223 guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
duke@0 224 virtual_space()->committed_size() == max_size(), "Sanity");
duke@0 225
duke@0 226 return true;
duke@0 227 }
duke@0 228
duke@0 229 // Similar to PSYoungGen::resize_spaces() but
duke@0 230 // eden always starts at the low end of the committed virtual space
duke@0 231 // current implementation does not allow holes between the spaces
duke@0 232 // _young_generation_boundary has to be reset because it changes.
duke@0 233 // so additional verification
jmasa@263 234
duke@0 235 void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
duke@0 236 size_t requested_survivor_size) {
jmasa@263 237 assert(UseAdaptiveSizePolicy, "sanity check");
duke@0 238 assert(requested_eden_size > 0 && requested_survivor_size > 0,
duke@0 239 "just checking");
duke@0 240
duke@0 241 space_invariants();
duke@0 242
duke@0 243 // We require eden and to space to be empty
duke@0 244 if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
duke@0 245 return;
duke@0 246 }
duke@0 247
duke@0 248 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 249 gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
duke@0 250 SIZE_FORMAT
duke@0 251 ", requested_survivor_size: " SIZE_FORMAT ")",
duke@0 252 requested_eden_size, requested_survivor_size);
duke@0 253 gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@0 254 SIZE_FORMAT,
duke@0 255 eden_space()->bottom(),
duke@0 256 eden_space()->end(),
duke@0 257 pointer_delta(eden_space()->end(),
duke@0 258 eden_space()->bottom(),
duke@0 259 sizeof(char)));
duke@0 260 gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@0 261 SIZE_FORMAT,
duke@0 262 from_space()->bottom(),
duke@0 263 from_space()->end(),
duke@0 264 pointer_delta(from_space()->end(),
duke@0 265 from_space()->bottom(),
duke@0 266 sizeof(char)));
duke@0 267 gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@0 268 SIZE_FORMAT,
duke@0 269 to_space()->bottom(),
duke@0 270 to_space()->end(),
duke@0 271 pointer_delta( to_space()->end(),
duke@0 272 to_space()->bottom(),
duke@0 273 sizeof(char)));
duke@0 274 }
duke@0 275
duke@0 276 // There's nothing to do if the new sizes are the same as the current
duke@0 277 if (requested_survivor_size == to_space()->capacity_in_bytes() &&
duke@0 278 requested_survivor_size == from_space()->capacity_in_bytes() &&
duke@0 279 requested_eden_size == eden_space()->capacity_in_bytes()) {
duke@0 280 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 281 gclog_or_tty->print_cr(" capacities are the right sizes, returning");
duke@0 282 }
duke@0 283 return;
duke@0 284 }
duke@0 285
duke@0 286 char* eden_start = (char*)virtual_space()->low();
duke@0 287 char* eden_end = (char*)eden_space()->end();
duke@0 288 char* from_start = (char*)from_space()->bottom();
duke@0 289 char* from_end = (char*)from_space()->end();
duke@0 290 char* to_start = (char*)to_space()->bottom();
duke@0 291 char* to_end = (char*)to_space()->end();
duke@0 292
duke@0 293 assert(eden_start < from_start, "Cannot push into from_space");
duke@0 294
duke@0 295 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@13 296 const size_t alignment = heap->intra_heap_alignment();
jmasa@263 297 const bool maintain_minimum =
jmasa@263 298 (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
duke@0 299
jmasa@263 300 bool eden_from_to_order = from_start < to_start;
duke@0 301 // Check whether from space is below to space
jmasa@263 302 if (eden_from_to_order) {
duke@0 303 // Eden, from, to
jmasa@263 304
duke@0 305 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 306 gclog_or_tty->print_cr(" Eden, from, to:");
duke@0 307 }
duke@0 308
duke@0 309 // Set eden
jmasa@263 310 // "requested_eden_size" is a goal for the size of eden
jmasa@263 311 // and may not be attainable. "eden_size" below is
jmasa@263 312 // calculated based on the location of from-space and
jmasa@263 313 // the goal for the size of eden. from-space is
jmasa@263 314 // fixed in place because it contains live data.
jmasa@263 315 // The calculation is done this way to avoid 32bit
jmasa@263 316 // overflow (i.e., eden_start + requested_eden_size
jmasa@263 317 // may too large for representation in 32bits).
jmasa@263 318 size_t eden_size;
jmasa@263 319 if (maintain_minimum) {
jmasa@263 320 // Only make eden larger than the requested size if
jmasa@263 321 // the minimum size of the generation has to be maintained.
jmasa@263 322 // This could be done in general but policy at a higher
jmasa@263 323 // level is determining a requested size for eden and that
jmasa@263 324 // should be honored unless there is a fundamental reason.
jmasa@263 325 eden_size = pointer_delta(from_start,
jmasa@263 326 eden_start,
jmasa@263 327 sizeof(char));
jmasa@263 328 } else {
jmasa@263 329 eden_size = MIN2(requested_eden_size,
jmasa@263 330 pointer_delta(from_start, eden_start, sizeof(char)));
jmasa@263 331 }
jmasa@263 332
duke@0 333 eden_end = eden_start + eden_size;
jcoomes@1409 334 assert(eden_end >= eden_start, "addition overflowed");
duke@0 335
duke@0 336 // To may resize into from space as long as it is clear of live data.
duke@0 337 // From space must remain page aligned, though, so we need to do some
duke@0 338 // extra calculations.
duke@0 339
duke@0 340 // First calculate an optimal to-space
duke@0 341 to_end = (char*)virtual_space()->high();
duke@0 342 to_start = (char*)pointer_delta(to_end,
duke@0 343 (char*)requested_survivor_size,
duke@0 344 sizeof(char));
duke@0 345
duke@0 346 // Does the optimal to-space overlap from-space?
duke@0 347 if (to_start < (char*)from_space()->end()) {
duke@0 348 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 349
duke@0 350 // Calculate the minimum offset possible for from_end
duke@0 351 size_t from_size =
duke@0 352 pointer_delta(from_space()->top(), from_start, sizeof(char));
duke@0 353
duke@0 354 // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
duke@0 355 if (from_size == 0) {
duke@0 356 from_size = alignment;
duke@0 357 } else {
duke@0 358 from_size = align_size_up(from_size, alignment);
duke@0 359 }
duke@0 360
duke@0 361 from_end = from_start + from_size;
duke@0 362 assert(from_end > from_start, "addition overflow or from_size problem");
duke@0 363
duke@0 364 guarantee(from_end <= (char*)from_space()->end(),
duke@0 365 "from_end moved to the right");
duke@0 366
duke@0 367 // Now update to_start with the new from_end
duke@0 368 to_start = MAX2(from_end, to_start);
duke@0 369 }
duke@0 370
duke@0 371 guarantee(to_start != to_end, "to space is zero sized");
duke@0 372
duke@0 373 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 374 gclog_or_tty->print_cr(" [eden_start .. eden_end): "
duke@0 375 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@0 376 eden_start,
duke@0 377 eden_end,
duke@0 378 pointer_delta(eden_end, eden_start, sizeof(char)));
duke@0 379 gclog_or_tty->print_cr(" [from_start .. from_end): "
duke@0 380 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@0 381 from_start,
duke@0 382 from_end,
duke@0 383 pointer_delta(from_end, from_start, sizeof(char)));
duke@0 384 gclog_or_tty->print_cr(" [ to_start .. to_end): "
duke@0 385 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@0 386 to_start,
duke@0 387 to_end,
duke@0 388 pointer_delta( to_end, to_start, sizeof(char)));
duke@0 389 }
duke@0 390 } else {
duke@0 391 // Eden, to, from
duke@0 392 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 393 gclog_or_tty->print_cr(" Eden, to, from:");
duke@0 394 }
duke@0 395
duke@0 396 // To space gets priority over eden resizing. Note that we position
duke@0 397 // to space as if we were able to resize from space, even though from
duke@0 398 // space is not modified.
duke@0 399 // Giving eden priority was tried and gave poorer performance.
duke@0 400 to_end = (char*)pointer_delta(virtual_space()->high(),
duke@0 401 (char*)requested_survivor_size,
duke@0 402 sizeof(char));
duke@0 403 to_end = MIN2(to_end, from_start);
duke@0 404 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
duke@0 405 sizeof(char));
duke@0 406 // if the space sizes are to be increased by several times then
duke@0 407 // 'to_start' will point beyond the young generation. In this case
duke@0 408 // 'to_start' should be adjusted.
duke@0 409 to_start = MAX2(to_start, eden_start + alignment);
duke@0 410
duke@0 411 // Compute how big eden can be, then adjust end.
jmasa@263 412 // See comments above on calculating eden_end.
jmasa@263 413 size_t eden_size;
jmasa@263 414 if (maintain_minimum) {
jmasa@263 415 eden_size = pointer_delta(to_start, eden_start, sizeof(char));
jmasa@263 416 } else {
jmasa@263 417 eden_size = MIN2(requested_eden_size,
jmasa@263 418 pointer_delta(to_start, eden_start, sizeof(char)));
jmasa@263 419 }
duke@0 420 eden_end = eden_start + eden_size;
jcoomes@1409 421 assert(eden_end >= eden_start, "addition overflowed");
duke@0 422
duke@0 423 // Don't let eden shrink down to 0 or less.
duke@0 424 eden_end = MAX2(eden_end, eden_start + alignment);
duke@0 425 to_start = MAX2(to_start, eden_end);
duke@0 426
duke@0 427 if (PrintAdaptiveSizePolicy && Verbose) {
duke@0 428 gclog_or_tty->print_cr(" [eden_start .. eden_end): "
duke@0 429 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@0 430 eden_start,
duke@0 431 eden_end,
duke@0 432 pointer_delta(eden_end, eden_start, sizeof(char)));
duke@0 433 gclog_or_tty->print_cr(" [ to_start .. to_end): "
duke@0 434 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@0 435 to_start,
duke@0 436 to_end,
duke@0 437 pointer_delta( to_end, to_start, sizeof(char)));
duke@0 438 gclog_or_tty->print_cr(" [from_start .. from_end): "
duke@0 439 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@0 440 from_start,
duke@0 441 from_end,
duke@0 442 pointer_delta(from_end, from_start, sizeof(char)));
duke@0 443 }
duke@0 444 }
duke@0 445
duke@0 446
duke@0 447 guarantee((HeapWord*)from_start <= from_space()->bottom(),
duke@0 448 "from start moved to the right");
duke@0 449 guarantee((HeapWord*)from_end >= from_space()->top(),
duke@0 450 "from end moved into live data");
duke@0 451 assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
duke@0 452 assert(is_object_aligned((intptr_t)from_start), "checking alignment");
duke@0 453 assert(is_object_aligned((intptr_t)to_start), "checking alignment");
duke@0 454
duke@0 455 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
duke@0 456 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
duke@0 457 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
duke@0 458
duke@0 459 // Let's make sure the call to initialize doesn't reset "top"!
duke@0 460 DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
duke@0 461
duke@0 462 // For PrintAdaptiveSizePolicy block below
duke@0 463 size_t old_from = from_space()->capacity_in_bytes();
duke@0 464 size_t old_to = to_space()->capacity_in_bytes();
duke@0 465
jmasa@263 466 if (ZapUnusedHeapArea) {
jmasa@263 467 // NUMA is a special case because a numa space is not mangled
jmasa@263 468 // in order to not prematurely bind its address to memory to
jmasa@263 469 // the wrong memory (i.e., don't want the GC thread to first
jmasa@263 470 // touch the memory). The survivor spaces are not numa
jmasa@263 471 // spaces and are mangled.
jmasa@263 472 if (UseNUMA) {
jmasa@263 473 if (eden_from_to_order) {
jmasa@263 474 mangle_survivors(from_space(), fromMR, to_space(), toMR);
jmasa@263 475 } else {
jmasa@263 476 mangle_survivors(to_space(), toMR, from_space(), fromMR);
jmasa@263 477 }
jmasa@263 478 }
jmasa@263 479
jmasa@263 480 // If not mangling the spaces, do some checking to verify that
jmasa@263 481 // the spaces are already mangled.
jmasa@263 482 // The spaces should be correctly mangled at this point so
jmasa@263 483 // do some checking here. Note that they are not being mangled
jmasa@263 484 // in the calls to initialize().
jmasa@263 485 // Must check mangling before the spaces are reshaped. Otherwise,
jmasa@263 486 // the bottom or end of one space may have moved into an area
jmasa@263 487 // covered by another space and a failure of the check may
jmasa@263 488 // not correctly indicate which space is not properly mangled.
jmasa@263 489
jmasa@263 490 HeapWord* limit = (HeapWord*) virtual_space()->high();
jmasa@263 491 eden_space()->check_mangled_unused_area(limit);
jmasa@263 492 from_space()->check_mangled_unused_area(limit);
jmasa@263 493 to_space()->check_mangled_unused_area(limit);
jmasa@263 494 }
jmasa@263 495 // When an existing space is being initialized, it is not
jmasa@263 496 // mangled because the space has been previously mangled.
jmasa@263 497 eden_space()->initialize(edenMR,
jmasa@263 498 SpaceDecorator::Clear,
jmasa@263 499 SpaceDecorator::DontMangle);
jmasa@263 500 to_space()->initialize(toMR,
jmasa@263 501 SpaceDecorator::Clear,
jmasa@263 502 SpaceDecorator::DontMangle);
jmasa@263 503 from_space()->initialize(fromMR,
jmasa@263 504 SpaceDecorator::DontClear,
jmasa@263 505 SpaceDecorator::DontMangle);
jmasa@263 506
duke@0 507 PSScavenge::set_young_generation_boundary(eden_space()->bottom());
duke@0 508
duke@0 509 assert(from_space()->top() == old_from_top, "from top changed!");
duke@0 510
duke@0 511 if (PrintAdaptiveSizePolicy) {
duke@0 512 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 513 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 514
duke@0 515 gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
duke@0 516 "collection: %d "
duke@0 517 "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
duke@0 518 "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
duke@0 519 heap->total_collections(),
duke@0 520 old_from, old_to,
duke@0 521 from_space()->capacity_in_bytes(),
duke@0 522 to_space()->capacity_in_bytes());
duke@0 523 gclog_or_tty->cr();
duke@0 524 }
duke@0 525 space_invariants();
duke@0 526 }
duke@0 527 void ASPSYoungGen::reset_after_change() {
duke@0 528 assert_locked_or_safepoint(Heap_lock);
duke@0 529
duke@0 530 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
duke@0 531 (HeapWord*)virtual_space()->high_boundary());
duke@0 532 PSScavenge::reference_processor()->set_span(_reserved);
duke@0 533
duke@0 534 HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
duke@0 535 HeapWord* eden_bottom = eden_space()->bottom();
duke@0 536 if (new_eden_bottom != eden_bottom) {
duke@0 537 MemRegion eden_mr(new_eden_bottom, eden_space()->end());
jmasa@263 538 eden_space()->initialize(eden_mr,
jmasa@263 539 SpaceDecorator::Clear,
jmasa@263 540 SpaceDecorator::Mangle);
duke@0 541 PSScavenge::set_young_generation_boundary(eden_space()->bottom());
duke@0 542 }
duke@0 543 MemRegion cmr((HeapWord*)virtual_space()->low(),
duke@0 544 (HeapWord*)virtual_space()->high());
duke@0 545 Universe::heap()->barrier_set()->resize_covered_region(cmr);
duke@0 546
duke@0 547 space_invariants();
duke@0 548 }