annotate src/share/vm/runtime/virtualspace.cpp @ 6000:58c34ffeff58

7102489: RFE: cleanup jlong typedef on __APPLE__and _LLP64 systems. Summary: Define jlong as long on all LP64 platforms and add JLONG_FORMAT macro. Reviewed-by: dholmes, coleenp, mikael, kvn
author hseigel
date Wed, 19 Apr 2017 06:00:00 +0100
parents de5e8c8a9b87
children 72453885979f
rev   line source
duke@0 1 /*
dcubed@4744 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1563 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1563 20 * or visit www.oracle.com if you need additional information or have any
trims@1563 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1992 25 #include "precompiled.hpp"
stefank@1992 26 #include "oops/markOop.hpp"
stefank@1992 27 #include "oops/oop.inline.hpp"
stefank@1992 28 #include "runtime/virtualspace.hpp"
zgu@4135 29 #include "services/memTracker.hpp"
stefank@1992 30 #ifdef TARGET_OS_FAMILY_linux
stefank@1992 31 # include "os_linux.inline.hpp"
stefank@1992 32 #endif
stefank@1992 33 #ifdef TARGET_OS_FAMILY_solaris
stefank@1992 34 # include "os_solaris.inline.hpp"
stefank@1992 35 #endif
stefank@1992 36 #ifdef TARGET_OS_FAMILY_windows
stefank@1992 37 # include "os_windows.inline.hpp"
stefank@1992 38 #endif
andrew@5935 39 #ifdef TARGET_OS_FAMILY_aix
andrew@5935 40 # include "os_aix.inline.hpp"
andrew@5935 41 #endif
never@3009 42 #ifdef TARGET_OS_FAMILY_bsd
never@3009 43 # include "os_bsd.inline.hpp"
never@3009 44 #endif
duke@0 45
duke@0 46
duke@0 47 // ReservedSpace
stefank@5112 48
stefank@5112 49 // Dummy constructor
stefank@5112 50 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
stefank@5112 51 _alignment(0), _special(false), _executable(false) {
stefank@5112 52 }
stefank@5112 53
duke@0 54 ReservedSpace::ReservedSpace(size_t size) {
stefank@5112 55 size_t page_size = os::page_size_for_region(size, size, 1);
stefank@5112 56 bool large_pages = page_size != (size_t)os::vm_page_size();
stefank@5112 57 // Don't force the alignment to be large page aligned,
stefank@5112 58 // since that will waste memory.
stefank@5112 59 size_t alignment = os::vm_allocation_granularity();
stefank@5112 60 initialize(size, alignment, large_pages, NULL, 0, false);
duke@0 61 }
duke@0 62
duke@0 63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
coleenp@237 64 bool large,
coleenp@237 65 char* requested_address,
coleenp@237 66 const size_t noaccess_prefix) {
coleenp@237 67 initialize(size+noaccess_prefix, alignment, large, requested_address,
coleenp@694 68 noaccess_prefix, false);
coleenp@694 69 }
coleenp@694 70
coleenp@694 71 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
coleenp@694 72 bool large,
coleenp@694 73 bool executable) {
coleenp@694 74 initialize(size, alignment, large, NULL, 0, executable);
duke@0 75 }
duke@0 76
duke@0 77 char *
duke@0 78 ReservedSpace::align_reserved_region(char* addr, const size_t len,
duke@0 79 const size_t prefix_size,
duke@0 80 const size_t prefix_align,
duke@0 81 const size_t suffix_size,
duke@0 82 const size_t suffix_align)
duke@0 83 {
duke@0 84 assert(addr != NULL, "sanity");
duke@0 85 const size_t required_size = prefix_size + suffix_size;
duke@0 86 assert(len >= required_size, "len too small");
duke@0 87
duke@0 88 const size_t s = size_t(addr);
johnc@2819 89 const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
duke@0 90 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
duke@0 91
duke@0 92 if (len < beg_delta + required_size) {
duke@0 93 return NULL; // Cannot do proper alignment.
duke@0 94 }
duke@0 95 const size_t end_delta = len - (beg_delta + required_size);
duke@0 96
duke@0 97 if (beg_delta != 0) {
jcoomes@4895 98 os::release_or_uncommit_partial_region(addr, beg_delta);
duke@0 99 }
duke@0 100
duke@0 101 if (end_delta != 0) {
duke@0 102 char* release_addr = (char*) (s + beg_delta + required_size);
jcoomes@4895 103 os::release_or_uncommit_partial_region(release_addr, end_delta);
duke@0 104 }
duke@0 105
duke@0 106 return (char*) (s + beg_delta);
duke@0 107 }
duke@0 108
jcoomes@4895 109 void ReservedSpace::set_raw_base_and_size(char * const raw_base,
jcoomes@4895 110 size_t raw_size) {
jcoomes@4895 111 assert(raw_base == NULL || !os::can_release_partial_region(), "sanity");
jcoomes@4895 112 _raw_base = raw_base;
jcoomes@4895 113 _raw_size = raw_size;
jcoomes@4895 114 }
jcoomes@4895 115
jcoomes@4895 116 // On some systems (e.g., windows), the address returned by os::reserve_memory()
jcoomes@4895 117 // is the only addr that can be passed to os::release_memory(). If alignment
jcoomes@4895 118 // was done by this class, that original address is _raw_base.
jcoomes@4895 119 void ReservedSpace::release_memory(char* default_addr, size_t default_size) {
jcoomes@4895 120 bool ok;
jcoomes@4895 121 if (_raw_base == NULL) {
jcoomes@4895 122 ok = os::release_memory(default_addr, default_size);
jcoomes@4895 123 } else {
jcoomes@4895 124 assert(!os::can_release_partial_region(), "sanity");
jcoomes@4895 125 ok = os::release_memory(_raw_base, _raw_size);
jcoomes@4895 126 }
jcoomes@4895 127 if (!ok) {
jcoomes@4895 128 fatal("os::release_memory failed");
jcoomes@4895 129 }
jcoomes@4895 130 set_raw_base_and_size(NULL, 0);
jcoomes@4895 131 }
jcoomes@4895 132
duke@0 133 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
duke@0 134 const size_t prefix_size,
duke@0 135 const size_t prefix_align,
duke@0 136 const size_t suffix_size,
duke@0 137 const size_t suffix_align)
duke@0 138 {
duke@0 139 assert(reserve_size > prefix_size + suffix_size, "should not be here");
duke@0 140
duke@0 141 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
duke@0 142 if (raw_addr == NULL) return NULL;
duke@0 143
duke@0 144 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
duke@0 145 prefix_align, suffix_size,
duke@0 146 suffix_align);
duke@0 147 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
duke@0 148 fatal("os::release_memory failed");
duke@0 149 }
duke@0 150
jcoomes@4895 151 if (!os::can_release_partial_region()) {
jcoomes@4895 152 set_raw_base_and_size(raw_addr, reserve_size);
jcoomes@4895 153 }
jcoomes@4895 154
duke@0 155 #ifdef ASSERT
duke@0 156 if (result != NULL) {
duke@0 157 const size_t raw = size_t(raw_addr);
duke@0 158 const size_t res = size_t(result);
duke@0 159 assert(res >= raw, "alignment decreased start addr");
duke@0 160 assert(res + prefix_size + suffix_size <= raw + reserve_size,
duke@0 161 "alignment increased end addr");
johnc@2819 162 assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
johnc@2819 163 assert(((res + prefix_size) & (suffix_align - 1)) == 0,
duke@0 164 "bad alignment of suffix");
duke@0 165 }
duke@0 166 #endif
duke@0 167
duke@0 168 return result;
duke@0 169 }
duke@0 170
kvn@1633 171 // Helper method.
jcoomes@4895 172 bool ReservedSpace::failed_to_reserve_as_requested(char* base,
jcoomes@4895 173 char* requested_address,
jcoomes@4895 174 const size_t size,
jcoomes@4895 175 bool special)
kvn@1633 176 {
kvn@1633 177 if (base == requested_address || requested_address == NULL)
kvn@1633 178 return false; // did not fail
kvn@1633 179
kvn@1633 180 if (base != NULL) {
kvn@1633 181 // Different reserve address may be acceptable in other cases
kvn@1633 182 // but for compressed oops heap should be at requested address.
kvn@1633 183 assert(UseCompressedOops, "currently requested address used only for compressed oops");
kvn@1633 184 if (PrintCompressedOopsMode) {
kvn@1633 185 tty->cr();
johnc@2819 186 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
kvn@1633 187 }
kvn@1633 188 // OS ignored requested address. Try different address.
kvn@1633 189 if (special) {
kvn@1633 190 if (!os::release_memory_special(base, size)) {
kvn@1633 191 fatal("os::release_memory_special failed");
kvn@1633 192 }
kvn@1633 193 } else {
jcoomes@4895 194 release_memory(base, size);
kvn@1633 195 }
kvn@1633 196 }
kvn@1633 197 return true;
kvn@1633 198 }
kvn@1633 199
duke@0 200 ReservedSpace::ReservedSpace(const size_t prefix_size,
duke@0 201 const size_t prefix_align,
duke@0 202 const size_t suffix_size,
coleenp@237 203 const size_t suffix_align,
kvn@680 204 char* requested_address,
coleenp@237 205 const size_t noaccess_prefix)
duke@0 206 {
duke@0 207 assert(prefix_size != 0, "sanity");
duke@0 208 assert(prefix_align != 0, "sanity");
duke@0 209 assert(suffix_size != 0, "sanity");
duke@0 210 assert(suffix_align != 0, "sanity");
johnc@2819 211 assert((prefix_size & (prefix_align - 1)) == 0,
duke@0 212 "prefix_size not divisible by prefix_align");
johnc@2819 213 assert((suffix_size & (suffix_align - 1)) == 0,
duke@0 214 "suffix_size not divisible by suffix_align");
johnc@2819 215 assert((suffix_align & (prefix_align - 1)) == 0,
duke@0 216 "suffix_align not divisible by prefix_align");
duke@0 217
kvn@1633 218 // Assert that if noaccess_prefix is used, it is the same as prefix_align.
kvn@1633 219 assert(noaccess_prefix == 0 ||
kvn@1633 220 noaccess_prefix == prefix_align, "noaccess prefix wrong");
kvn@1633 221
jcoomes@4895 222 set_raw_base_and_size(NULL, 0);
jcoomes@4895 223
coleenp@237 224 // Add in noaccess_prefix to prefix_size;
coleenp@237 225 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
coleenp@237 226 const size_t size = adjusted_prefix_size + suffix_size;
coleenp@237 227
duke@0 228 // On systems where the entire region has to be reserved and committed up
duke@0 229 // front, the compound alignment normally done by this method is unnecessary.
duke@0 230 const bool try_reserve_special = UseLargePages &&
duke@0 231 prefix_align == os::large_page_size();
duke@0 232 if (!os::can_commit_large_page_memory() && try_reserve_special) {
coleenp@694 233 initialize(size, prefix_align, true, requested_address, noaccess_prefix,
coleenp@694 234 false);
duke@0 235 return;
duke@0 236 }
duke@0 237
duke@0 238 _base = NULL;
duke@0 239 _size = 0;
duke@0 240 _alignment = 0;
duke@0 241 _special = false;
coleenp@237 242 _noaccess_prefix = 0;
coleenp@694 243 _executable = false;
coleenp@237 244
duke@0 245 // Optimistically try to reserve the exact size needed.
kvn@680 246 char* addr;
kvn@680 247 if (requested_address != 0) {
kvn@1633 248 requested_address -= noaccess_prefix; // adjust address
kvn@1633 249 assert(requested_address != NULL, "huge noaccess prefix?");
kvn@1633 250 addr = os::attempt_reserve_memory_at(size, requested_address);
kvn@1633 251 if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
kvn@1633 252 // OS ignored requested address. Try different address.
kvn@1633 253 addr = NULL;
kvn@1633 254 }
kvn@680 255 } else {
kvn@680 256 addr = os::reserve_memory(size, NULL, prefix_align);
kvn@680 257 }
duke@0 258 if (addr == NULL) return;
duke@0 259
duke@0 260 // Check whether the result has the needed alignment (unlikely unless
johnc@2819 261 // prefix_align < suffix_align).
johnc@2819 262 const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
duke@0 263 if (ofs != 0) {
duke@0 264 // Wrong alignment. Release, allocate more space and do manual alignment.
duke@0 265 //
duke@0 266 // On most operating systems, another allocation with a somewhat larger size
duke@0 267 // will return an address "close to" that of the previous allocation. The
duke@0 268 // result is often the same address (if the kernel hands out virtual
duke@0 269 // addresses from low to high), or an address that is offset by the increase
duke@0 270 // in size. Exploit that to minimize the amount of extra space requested.
jcoomes@4895 271 release_memory(addr, size);
duke@0 272
duke@0 273 const size_t extra = MAX2(ofs, suffix_align - ofs);
coleenp@237 274 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
duke@0 275 suffix_size, suffix_align);
duke@0 276 if (addr == NULL) {
duke@0 277 // Try an even larger region. If this fails, address space is exhausted.
coleenp@237 278 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
duke@0 279 prefix_align, suffix_size, suffix_align);
duke@0 280 }
johnc@2819 281
johnc@2819 282 if (requested_address != 0 &&
johnc@2819 283 failed_to_reserve_as_requested(addr, requested_address, size, false)) {
johnc@2819 284 // As a result of the alignment constraints, the allocated addr differs
johnc@2819 285 // from the requested address. Return back to the caller who can
johnc@2819 286 // take remedial action (like try again without a requested address).
johnc@2819 287 assert(_base == NULL, "should be");
johnc@2819 288 return;
johnc@2819 289 }
duke@0 290 }
duke@0 291
duke@0 292 _base = addr;
duke@0 293 _size = size;
duke@0 294 _alignment = prefix_align;
coleenp@237 295 _noaccess_prefix = noaccess_prefix;
duke@0 296 }
duke@0 297
duke@0 298 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
coleenp@237 299 char* requested_address,
coleenp@694 300 const size_t noaccess_prefix,
coleenp@694 301 bool executable) {
duke@0 302 const size_t granularity = os::vm_allocation_granularity();
johnc@2819 303 assert((size & (granularity - 1)) == 0,
duke@0 304 "size not aligned to os::vm_allocation_granularity()");
johnc@2819 305 assert((alignment & (granularity - 1)) == 0,
duke@0 306 "alignment not aligned to os::vm_allocation_granularity()");
duke@0 307 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
duke@0 308 "not a power of 2");
duke@0 309
jcoomes@4895 310 set_raw_base_and_size(NULL, 0);
jcoomes@4895 311
johnc@2819 312 alignment = MAX2(alignment, (size_t)os::vm_page_size());
johnc@2819 313
johnc@2819 314 // Assert that if noaccess_prefix is used, it is the same as alignment.
johnc@2819 315 assert(noaccess_prefix == 0 ||
johnc@2819 316 noaccess_prefix == alignment, "noaccess prefix wrong");
johnc@2819 317
duke@0 318 _base = NULL;
duke@0 319 _size = 0;
duke@0 320 _special = false;
coleenp@694 321 _executable = executable;
duke@0 322 _alignment = 0;
coleenp@237 323 _noaccess_prefix = 0;
duke@0 324 if (size == 0) {
duke@0 325 return;
duke@0 326 }
duke@0 327
duke@0 328 // If OS doesn't support demand paging for large page memory, we need
duke@0 329 // to use reserve_memory_special() to reserve and pin the entire region.
duke@0 330 bool special = large && !os::can_commit_large_page_memory();
duke@0 331 char* base = NULL;
duke@0 332
kvn@1633 333 if (requested_address != 0) {
kvn@1633 334 requested_address -= noaccess_prefix; // adjust requested address
kvn@1633 335 assert(requested_address != NULL, "huge noaccess prefix?");
kvn@1633 336 }
kvn@1633 337
duke@0 338 if (special) {
duke@0 339
stefank@5112 340 base = os::reserve_memory_special(size, alignment, requested_address, executable);
duke@0 341
duke@0 342 if (base != NULL) {
kvn@1633 343 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
kvn@1633 344 // OS ignored requested address. Try different address.
kvn@1633 345 return;
kvn@1633 346 }
stefank@5112 347 // Check alignment constraints.
johnc@2819 348 assert((uintptr_t) base % alignment == 0,
stefank@5112 349 err_msg("Large pages returned a non-aligned address, base: "
stefank@5112 350 PTR_FORMAT " alignment: " PTR_FORMAT,
stefank@5112 351 base, (void*)(uintptr_t)alignment));
duke@0 352 _special = true;
duke@0 353 } else {
duke@0 354 // failed; try to reserve regular memory below
kvn@1633 355 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
kvn@1633 356 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
kvn@1633 357 if (PrintCompressedOopsMode) {
kvn@1633 358 tty->cr();
kvn@1633 359 tty->print_cr("Reserve regular memory without large pages.");
kvn@1633 360 }
kvn@1633 361 }
duke@0 362 }
duke@0 363 }
duke@0 364
duke@0 365 if (base == NULL) {
duke@0 366 // Optimistically assume that the OSes returns an aligned base pointer.
duke@0 367 // When reserving a large address range, most OSes seem to align to at
duke@0 368 // least 64K.
duke@0 369
duke@0 370 // If the memory was requested at a particular address, use
duke@0 371 // os::attempt_reserve_memory_at() to avoid over mapping something
duke@0 372 // important. If available space is not detected, return NULL.
duke@0 373
duke@0 374 if (requested_address != 0) {
kvn@1633 375 base = os::attempt_reserve_memory_at(size, requested_address);
kvn@1633 376 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
kvn@1633 377 // OS ignored requested address. Try different address.
kvn@1633 378 base = NULL;
kvn@1633 379 }
duke@0 380 } else {
duke@0 381 base = os::reserve_memory(size, NULL, alignment);
duke@0 382 }
duke@0 383
duke@0 384 if (base == NULL) return;
duke@0 385
duke@0 386 // Check alignment constraints
johnc@2819 387 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
duke@0 388 // Base not aligned, retry
jcoomes@4895 389 release_memory(base, size);
jcoomes@4895 390
brutisso@3760 391 // Make sure that size is aligned
duke@0 392 size = align_size_up(size, alignment);
brutisso@3760 393 base = os::reserve_memory_aligned(size, alignment);
johnc@2819 394
johnc@2819 395 if (requested_address != 0 &&
johnc@2819 396 failed_to_reserve_as_requested(base, requested_address, size, false)) {
johnc@2819 397 // As a result of the alignment constraints, the allocated base differs
johnc@2819 398 // from the requested address. Return back to the caller who can
johnc@2819 399 // take remedial action (like try again without a requested address).
johnc@2819 400 assert(_base == NULL, "should be");
johnc@2819 401 return;
johnc@2819 402 }
duke@0 403 }
duke@0 404 }
duke@0 405 // Done
duke@0 406 _base = base;
duke@0 407 _size = size;
johnc@2819 408 _alignment = alignment;
coleenp@237 409 _noaccess_prefix = noaccess_prefix;
coleenp@237 410
coleenp@237 411 // Assert that if noaccess_prefix is used, it is the same as alignment.
coleenp@237 412 assert(noaccess_prefix == 0 ||
coleenp@237 413 noaccess_prefix == _alignment, "noaccess prefix wrong");
duke@0 414
duke@0 415 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
duke@0 416 "area must be distinguisable from marks for mark-sweep");
duke@0 417 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
duke@0 418 "area must be distinguisable from marks for mark-sweep");
duke@0 419 }
duke@0 420
duke@0 421
duke@0 422 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
coleenp@694 423 bool special, bool executable) {
duke@0 424 assert((size % os::vm_allocation_granularity()) == 0,
duke@0 425 "size not allocation aligned");
duke@0 426 _base = base;
duke@0 427 _size = size;
jcoomes@4895 428 set_raw_base_and_size(NULL, 0);
duke@0 429 _alignment = alignment;
coleenp@237 430 _noaccess_prefix = 0;
duke@0 431 _special = special;
coleenp@694 432 _executable = executable;
duke@0 433 }
duke@0 434
duke@0 435
duke@0 436 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
duke@0 437 bool split, bool realloc) {
duke@0 438 assert(partition_size <= size(), "partition failed");
duke@0 439 if (split) {
coleenp@694 440 os::split_reserved_memory(base(), size(), partition_size, realloc);
duke@0 441 }
coleenp@694 442 ReservedSpace result(base(), partition_size, alignment, special(),
coleenp@694 443 executable());
duke@0 444 return result;
duke@0 445 }
duke@0 446
duke@0 447
duke@0 448 ReservedSpace
duke@0 449 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
duke@0 450 assert(partition_size <= size(), "partition failed");
duke@0 451 ReservedSpace result(base() + partition_size, size() - partition_size,
coleenp@694 452 alignment, special(), executable());
duke@0 453 return result;
duke@0 454 }
duke@0 455
duke@0 456
duke@0 457 size_t ReservedSpace::page_align_size_up(size_t size) {
duke@0 458 return align_size_up(size, os::vm_page_size());
duke@0 459 }
duke@0 460
duke@0 461
duke@0 462 size_t ReservedSpace::page_align_size_down(size_t size) {
duke@0 463 return align_size_down(size, os::vm_page_size());
duke@0 464 }
duke@0 465
duke@0 466
duke@0 467 size_t ReservedSpace::allocation_align_size_up(size_t size) {
duke@0 468 return align_size_up(size, os::vm_allocation_granularity());
duke@0 469 }
duke@0 470
duke@0 471
duke@0 472 size_t ReservedSpace::allocation_align_size_down(size_t size) {
duke@0 473 return align_size_down(size, os::vm_allocation_granularity());
duke@0 474 }
duke@0 475
duke@0 476
duke@0 477 void ReservedSpace::release() {
duke@0 478 if (is_reserved()) {
coleenp@237 479 char *real_base = _base - _noaccess_prefix;
coleenp@237 480 const size_t real_size = _size + _noaccess_prefix;
duke@0 481 if (special()) {
coleenp@237 482 os::release_memory_special(real_base, real_size);
duke@0 483 } else{
jcoomes@4895 484 release_memory(real_base, real_size);
duke@0 485 }
duke@0 486 _base = NULL;
duke@0 487 _size = 0;
coleenp@237 488 _noaccess_prefix = 0;
duke@0 489 _special = false;
coleenp@694 490 _executable = false;
duke@0 491 }
duke@0 492 }
duke@0 493
coleenp@237 494 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
kvn@1633 495 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
coleenp@3436 496 (Universe::narrow_oop_base() != NULL) &&
kvn@1633 497 Universe::narrow_oop_use_implicit_null_checks()),
kvn@1633 498 "noaccess_prefix should be used only with non zero based compressed oops");
kvn@1633 499
kvn@1633 500 // If there is no noaccess prefix, return.
coleenp@237 501 if (_noaccess_prefix == 0) return;
coleenp@237 502
coleenp@237 503 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
coleenp@237 504 "must be at least page size big");
coleenp@237 505
coleenp@237 506 // Protect memory at the base of the allocated region.
coleenp@237 507 // If special, the page was committed (only matters on windows)
coleenp@237 508 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
coleenp@237 509 _special)) {
coleenp@237 510 fatal("cannot protect protection page");
coleenp@237 511 }
kvn@1633 512 if (PrintCompressedOopsMode) {
kvn@1633 513 tty->cr();
kvn@1633 514 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
kvn@1633 515 }
coleenp@237 516
coleenp@237 517 _base += _noaccess_prefix;
coleenp@237 518 _size -= _noaccess_prefix;
coleenp@237 519 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
coleenp@237 520 "must be exactly of required size and alignment");
coleenp@237 521 }
coleenp@237 522
coleenp@237 523 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
coleenp@237 524 bool large, char* requested_address) :
coleenp@237 525 ReservedSpace(size, alignment, large,
coleenp@237 526 requested_address,
kvn@680 527 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
kvn@680 528 Universe::narrow_oop_use_implicit_null_checks()) ?
coleenp@328 529 lcm(os::vm_page_size(), alignment) : 0) {
zgu@4135 530 if (base() > 0) {
zgu@4135 531 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
zgu@4135 532 }
zgu@4135 533
coleenp@237 534 // Only reserved space for the java heap should have a noaccess_prefix
coleenp@237 535 // if using compressed oops.
coleenp@237 536 protect_noaccess_prefix(size);
coleenp@237 537 }
coleenp@237 538
coleenp@237 539 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
coleenp@237 540 const size_t prefix_align,
coleenp@237 541 const size_t suffix_size,
kvn@680 542 const size_t suffix_align,
kvn@680 543 char* requested_address) :
coleenp@237 544 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
kvn@680 545 requested_address,
kvn@680 546 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
kvn@680 547 Universe::narrow_oop_use_implicit_null_checks()) ?
coleenp@328 548 lcm(os::vm_page_size(), prefix_align) : 0) {
zgu@4135 549 if (base() > 0) {
zgu@4135 550 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
zgu@4135 551 }
zgu@4135 552
coleenp@237 553 protect_noaccess_prefix(prefix_size+suffix_size);
coleenp@237 554 }
duke@0 555
coleenp@694 556 // Reserve space for code segment. Same as Java heap only we mark this as
coleenp@694 557 // executable.
coleenp@694 558 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
coleenp@694 559 size_t rs_align,
coleenp@694 560 bool large) :
coleenp@694 561 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
zgu@4135 562 MemTracker::record_virtual_memory_type((address)base(), mtCode);
coleenp@694 563 }
coleenp@694 564
duke@0 565 // VirtualSpace
duke@0 566
duke@0 567 VirtualSpace::VirtualSpace() {
duke@0 568 _low_boundary = NULL;
duke@0 569 _high_boundary = NULL;
duke@0 570 _low = NULL;
duke@0 571 _high = NULL;
duke@0 572 _lower_high = NULL;
duke@0 573 _middle_high = NULL;
duke@0 574 _upper_high = NULL;
duke@0 575 _lower_high_boundary = NULL;
duke@0 576 _middle_high_boundary = NULL;
duke@0 577 _upper_high_boundary = NULL;
duke@0 578 _lower_alignment = 0;
duke@0 579 _middle_alignment = 0;
duke@0 580 _upper_alignment = 0;
coleenp@237 581 _special = false;
coleenp@694 582 _executable = false;
duke@0 583 }
duke@0 584
duke@0 585
duke@0 586 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
duke@0 587 if(!rs.is_reserved()) return false; // allocation failed.
duke@0 588 assert(_low_boundary == NULL, "VirtualSpace already initialized");
duke@0 589 _low_boundary = rs.base();
duke@0 590 _high_boundary = low_boundary() + rs.size();
duke@0 591
duke@0 592 _low = low_boundary();
duke@0 593 _high = low();
duke@0 594
duke@0 595 _special = rs.special();
coleenp@694 596 _executable = rs.executable();
duke@0 597
duke@0 598 // When a VirtualSpace begins life at a large size, make all future expansion
duke@0 599 // and shrinking occur aligned to a granularity of large pages. This avoids
duke@0 600 // fragmentation of physical addresses that inhibits the use of large pages
duke@0 601 // by the OS virtual memory system. Empirically, we see that with a 4MB
duke@0 602 // page size, the only spaces that get handled this way are codecache and
duke@0 603 // the heap itself, both of which provide a substantial performance
duke@0 604 // boost in many benchmarks when covered by large pages.
duke@0 605 //
duke@0 606 // No attempt is made to force large page alignment at the very top and
duke@0 607 // bottom of the space if they are not aligned so already.
duke@0 608 _lower_alignment = os::vm_page_size();
duke@0 609 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
duke@0 610 _upper_alignment = os::vm_page_size();
duke@0 611
duke@0 612 // End of each region
duke@0 613 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
duke@0 614 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
duke@0 615 _upper_high_boundary = high_boundary();
duke@0 616
duke@0 617 // High address of each region
duke@0 618 _lower_high = low_boundary();
duke@0 619 _middle_high = lower_high_boundary();
duke@0 620 _upper_high = middle_high_boundary();
duke@0 621
duke@0 622 // commit to initial size
duke@0 623 if (committed_size > 0) {
duke@0 624 if (!expand_by(committed_size)) {
duke@0 625 return false;
duke@0 626 }
duke@0 627 }
duke@0 628 return true;
duke@0 629 }
duke@0 630
duke@0 631
duke@0 632 VirtualSpace::~VirtualSpace() {
duke@0 633 release();
duke@0 634 }
duke@0 635
duke@0 636
duke@0 637 void VirtualSpace::release() {
coleenp@237 638 // This does not release memory it never reserved.
coleenp@237 639 // Caller must release via rs.release();
duke@0 640 _low_boundary = NULL;
duke@0 641 _high_boundary = NULL;
duke@0 642 _low = NULL;
duke@0 643 _high = NULL;
duke@0 644 _lower_high = NULL;
duke@0 645 _middle_high = NULL;
duke@0 646 _upper_high = NULL;
duke@0 647 _lower_high_boundary = NULL;
duke@0 648 _middle_high_boundary = NULL;
duke@0 649 _upper_high_boundary = NULL;
duke@0 650 _lower_alignment = 0;
duke@0 651 _middle_alignment = 0;
duke@0 652 _upper_alignment = 0;
duke@0 653 _special = false;
coleenp@694 654 _executable = false;
duke@0 655 }
duke@0 656
duke@0 657
duke@0 658 size_t VirtualSpace::committed_size() const {
duke@0 659 return pointer_delta(high(), low(), sizeof(char));
duke@0 660 }
duke@0 661
duke@0 662
duke@0 663 size_t VirtualSpace::reserved_size() const {
duke@0 664 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
duke@0 665 }
duke@0 666
duke@0 667
duke@0 668 size_t VirtualSpace::uncommitted_size() const {
duke@0 669 return reserved_size() - committed_size();
duke@0 670 }
duke@0 671
duke@0 672
duke@0 673 bool VirtualSpace::contains(const void* p) const {
duke@0 674 return low() <= (const char*) p && (const char*) p < high();
duke@0 675 }
duke@0 676
duke@0 677 /*
duke@0 678 First we need to determine if a particular virtual space is using large
duke@0 679 pages. This is done at the initialize function and only virtual spaces
duke@0 680 that are larger than LargePageSizeInBytes use large pages. Once we
duke@0 681 have determined this, all expand_by and shrink_by calls must grow and
duke@0 682 shrink by large page size chunks. If a particular request
duke@0 683 is within the current large page, the call to commit and uncommit memory
duke@0 684 can be ignored. In the case that the low and high boundaries of this
duke@0 685 space is not large page aligned, the pages leading to the first large
duke@0 686 page address and the pages after the last large page address must be
duke@0 687 allocated with default pages.
duke@0 688 */
duke@0 689 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
duke@0 690 if (uncommitted_size() < bytes) return false;
duke@0 691
duke@0 692 if (special()) {
duke@0 693 // don't commit memory if the entire space is pinned in memory
duke@0 694 _high += bytes;
duke@0 695 return true;
duke@0 696 }
duke@0 697
duke@0 698 char* previous_high = high();
duke@0 699 char* unaligned_new_high = high() + bytes;
duke@0 700 assert(unaligned_new_high <= high_boundary(),
duke@0 701 "cannot expand by more than upper boundary");
duke@0 702
duke@0 703 // Calculate where the new high for each of the regions should be. If
duke@0 704 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
duke@0 705 // then the unaligned lower and upper new highs would be the
duke@0 706 // lower_high() and upper_high() respectively.
duke@0 707 char* unaligned_lower_new_high =
duke@0 708 MIN2(unaligned_new_high, lower_high_boundary());
duke@0 709 char* unaligned_middle_new_high =
duke@0 710 MIN2(unaligned_new_high, middle_high_boundary());
duke@0 711 char* unaligned_upper_new_high =
duke@0 712 MIN2(unaligned_new_high, upper_high_boundary());
duke@0 713
duke@0 714 // Align the new highs based on the regions alignment. lower and upper
duke@0 715 // alignment will always be default page size. middle alignment will be
duke@0 716 // LargePageSizeInBytes if the actual size of the virtual space is in
duke@0 717 // fact larger than LargePageSizeInBytes.
duke@0 718 char* aligned_lower_new_high =
duke@0 719 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@0 720 char* aligned_middle_new_high =
duke@0 721 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@0 722 char* aligned_upper_new_high =
duke@0 723 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@0 724
duke@0 725 // Determine which regions need to grow in this expand_by call.
duke@0 726 // If you are growing in the lower region, high() must be in that
duke@0 727 // region so calcuate the size based on high(). For the middle and
duke@0 728 // upper regions, determine the starting point of growth based on the
duke@0 729 // location of high(). By getting the MAX of the region's low address
duke@0 730 // (or the prevoius region's high address) and high(), we can tell if it
duke@0 731 // is an intra or inter region growth.
duke@0 732 size_t lower_needs = 0;
duke@0 733 if (aligned_lower_new_high > lower_high()) {
duke@0 734 lower_needs =
duke@0 735 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
duke@0 736 }
duke@0 737 size_t middle_needs = 0;
duke@0 738 if (aligned_middle_new_high > middle_high()) {
duke@0 739 middle_needs =
duke@0 740 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
duke@0 741 }
duke@0 742 size_t upper_needs = 0;
duke@0 743 if (aligned_upper_new_high > upper_high()) {
duke@0 744 upper_needs =
duke@0 745 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
duke@0 746 }
duke@0 747
duke@0 748 // Check contiguity.
duke@0 749 assert(low_boundary() <= lower_high() &&
duke@0 750 lower_high() <= lower_high_boundary(),
duke@0 751 "high address must be contained within the region");
duke@0 752 assert(lower_high_boundary() <= middle_high() &&
duke@0 753 middle_high() <= middle_high_boundary(),
duke@0 754 "high address must be contained within the region");
duke@0 755 assert(middle_high_boundary() <= upper_high() &&
duke@0 756 upper_high() <= upper_high_boundary(),
duke@0 757 "high address must be contained within the region");
duke@0 758
duke@0 759 // Commit regions
duke@0 760 if (lower_needs > 0) {
duke@0 761 assert(low_boundary() <= lower_high() &&
duke@0 762 lower_high() + lower_needs <= lower_high_boundary(),
duke@0 763 "must not expand beyond region");
coleenp@694 764 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
dcubed@4744 765 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
dcubed@4744 766 ", lower_needs=" SIZE_FORMAT ", %d) failed",
dcubed@4744 767 lower_high(), lower_needs, _executable);)
duke@0 768 return false;
duke@0 769 } else {
duke@0 770 _lower_high += lower_needs;
dcubed@4744 771 }
duke@0 772 }
duke@0 773 if (middle_needs > 0) {
duke@0 774 assert(lower_high_boundary() <= middle_high() &&
duke@0 775 middle_high() + middle_needs <= middle_high_boundary(),
duke@0 776 "must not expand beyond region");
coleenp@694 777 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
coleenp@694 778 _executable)) {
dcubed@4744 779 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
dcubed@4744 780 ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
dcubed@4744 781 ", %d) failed", middle_high(), middle_needs,
dcubed@4744 782 middle_alignment(), _executable);)
duke@0 783 return false;
duke@0 784 }
duke@0 785 _middle_high += middle_needs;
duke@0 786 }
duke@0 787 if (upper_needs > 0) {
duke@0 788 assert(middle_high_boundary() <= upper_high() &&
duke@0 789 upper_high() + upper_needs <= upper_high_boundary(),
duke@0 790 "must not expand beyond region");
coleenp@694 791 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
dcubed@4744 792 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
dcubed@4744 793 ", upper_needs=" SIZE_FORMAT ", %d) failed",
dcubed@4744 794 upper_high(), upper_needs, _executable);)
duke@0 795 return false;
duke@0 796 } else {
duke@0 797 _upper_high += upper_needs;
duke@0 798 }
duke@0 799 }
duke@0 800
duke@0 801 if (pre_touch || AlwaysPreTouch) {
duke@0 802 int vm_ps = os::vm_page_size();
duke@0 803 for (char* curr = previous_high;
duke@0 804 curr < unaligned_new_high;
duke@0 805 curr += vm_ps) {
duke@0 806 // Note the use of a write here; originally we tried just a read, but
duke@0 807 // since the value read was unused, the optimizer removed the read.
duke@0 808 // If we ever have a concurrent touchahead thread, we'll want to use
duke@0 809 // a read, to avoid the potential of overwriting data (if a mutator
duke@0 810 // thread beats the touchahead thread to a page). There are various
duke@0 811 // ways of making sure this read is not optimized away: for example,
duke@0 812 // generating the code for a read procedure at runtime.
duke@0 813 *curr = 0;
duke@0 814 }
duke@0 815 }
duke@0 816
duke@0 817 _high += bytes;
duke@0 818 return true;
duke@0 819 }
duke@0 820
duke@0 821 // A page is uncommitted if the contents of the entire page is deemed unusable.
duke@0 822 // Continue to decrement the high() pointer until it reaches a page boundary
duke@0 823 // in which case that particular page can now be uncommitted.
duke@0 824 void VirtualSpace::shrink_by(size_t size) {
duke@0 825 if (committed_size() < size)
duke@0 826 fatal("Cannot shrink virtual space to negative size");
duke@0 827
duke@0 828 if (special()) {
duke@0 829 // don't uncommit if the entire space is pinned in memory
duke@0 830 _high -= size;
duke@0 831 return;
duke@0 832 }
duke@0 833
duke@0 834 char* unaligned_new_high = high() - size;
duke@0 835 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
duke@0 836
duke@0 837 // Calculate new unaligned address
duke@0 838 char* unaligned_upper_new_high =
duke@0 839 MAX2(unaligned_new_high, middle_high_boundary());
duke@0 840 char* unaligned_middle_new_high =
duke@0 841 MAX2(unaligned_new_high, lower_high_boundary());
duke@0 842 char* unaligned_lower_new_high =
duke@0 843 MAX2(unaligned_new_high, low_boundary());
duke@0 844
duke@0 845 // Align address to region's alignment
duke@0 846 char* aligned_upper_new_high =
duke@0 847 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@0 848 char* aligned_middle_new_high =
duke@0 849 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@0 850 char* aligned_lower_new_high =
duke@0 851 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@0 852
duke@0 853 // Determine which regions need to shrink
duke@0 854 size_t upper_needs = 0;
duke@0 855 if (aligned_upper_new_high < upper_high()) {
duke@0 856 upper_needs =
duke@0 857 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
duke@0 858 }
duke@0 859 size_t middle_needs = 0;
duke@0 860 if (aligned_middle_new_high < middle_high()) {
duke@0 861 middle_needs =
duke@0 862 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
duke@0 863 }
duke@0 864 size_t lower_needs = 0;
duke@0 865 if (aligned_lower_new_high < lower_high()) {
duke@0 866 lower_needs =
duke@0 867 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
duke@0 868 }
duke@0 869
duke@0 870 // Check contiguity.
duke@0 871 assert(middle_high_boundary() <= upper_high() &&
duke@0 872 upper_high() <= upper_high_boundary(),
duke@0 873 "high address must be contained within the region");
duke@0 874 assert(lower_high_boundary() <= middle_high() &&
duke@0 875 middle_high() <= middle_high_boundary(),
duke@0 876 "high address must be contained within the region");
duke@0 877 assert(low_boundary() <= lower_high() &&
duke@0 878 lower_high() <= lower_high_boundary(),
duke@0 879 "high address must be contained within the region");
duke@0 880
duke@0 881 // Uncommit
duke@0 882 if (upper_needs > 0) {
duke@0 883 assert(middle_high_boundary() <= aligned_upper_new_high &&
duke@0 884 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
duke@0 885 "must not shrink beyond region");
duke@0 886 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
duke@0 887 debug_only(warning("os::uncommit_memory failed"));
duke@0 888 return;
duke@0 889 } else {
duke@0 890 _upper_high -= upper_needs;
duke@0 891 }
duke@0 892 }
duke@0 893 if (middle_needs > 0) {
duke@0 894 assert(lower_high_boundary() <= aligned_middle_new_high &&
duke@0 895 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
duke@0 896 "must not shrink beyond region");
duke@0 897 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
duke@0 898 debug_only(warning("os::uncommit_memory failed"));
duke@0 899 return;
duke@0 900 } else {
duke@0 901 _middle_high -= middle_needs;
duke@0 902 }
duke@0 903 }
duke@0 904 if (lower_needs > 0) {
duke@0 905 assert(low_boundary() <= aligned_lower_new_high &&
duke@0 906 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
duke@0 907 "must not shrink beyond region");
duke@0 908 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
duke@0 909 debug_only(warning("os::uncommit_memory failed"));
duke@0 910 return;
duke@0 911 } else {
duke@0 912 _lower_high -= lower_needs;
duke@0 913 }
duke@0 914 }
duke@0 915
duke@0 916 _high -= size;
duke@0 917 }
duke@0 918
duke@0 919 #ifndef PRODUCT
duke@0 920 void VirtualSpace::check_for_contiguity() {
duke@0 921 // Check contiguity.
duke@0 922 assert(low_boundary() <= lower_high() &&
duke@0 923 lower_high() <= lower_high_boundary(),
duke@0 924 "high address must be contained within the region");
duke@0 925 assert(lower_high_boundary() <= middle_high() &&
duke@0 926 middle_high() <= middle_high_boundary(),
duke@0 927 "high address must be contained within the region");
duke@0 928 assert(middle_high_boundary() <= upper_high() &&
duke@0 929 upper_high() <= upper_high_boundary(),
duke@0 930 "high address must be contained within the region");
duke@0 931 assert(low() >= low_boundary(), "low");
duke@0 932 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
duke@0 933 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
duke@0 934 assert(high() <= upper_high(), "upper high");
duke@0 935 }
duke@0 936
duke@0 937 void VirtualSpace::print() {
duke@0 938 tty->print ("Virtual space:");
duke@0 939 if (special()) tty->print(" (pinned in memory)");
duke@0 940 tty->cr();
hseigel@6000 941 tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
hseigel@6000 942 tty->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
duke@0 943 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
duke@0 944 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
duke@0 945 }
duke@0 946
stefank@5112 947
stefank@5112 948 /////////////// Unit tests ///////////////
stefank@5112 949
stefank@5112 950 #ifndef PRODUCT
stefank@5112 951
stefank@5112 952 #define test_log(...) \
stefank@5112 953 do {\
stefank@5112 954 if (VerboseInternalVMTests) { \
stefank@5112 955 tty->print_cr(__VA_ARGS__); \
stefank@5112 956 tty->flush(); \
stefank@5112 957 }\
stefank@5112 958 } while (false)
stefank@5112 959
stefank@5112 960 class TestReservedSpace : AllStatic {
stefank@5112 961 public:
stefank@5112 962 static void small_page_write(void* addr, size_t size) {
stefank@5112 963 size_t page_size = os::vm_page_size();
stefank@5112 964
stefank@5112 965 char* end = (char*)addr + size;
stefank@5112 966 for (char* p = (char*)addr; p < end; p += page_size) {
stefank@5112 967 *p = 1;
stefank@5112 968 }
stefank@5112 969 }
stefank@5112 970
stefank@5112 971 static void release_memory_for_test(ReservedSpace rs) {
stefank@5112 972 if (rs.special()) {
stefank@5112 973 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
stefank@5112 974 } else {
stefank@5112 975 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
stefank@5112 976 }
stefank@5112 977 }
stefank@5112 978
stefank@5112 979 static void test_reserved_space1(size_t size, size_t alignment) {
stefank@5112 980 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
stefank@5112 981
stefank@5112 982 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
stefank@5112 983
stefank@5112 984 ReservedSpace rs(size, // size
stefank@5112 985 alignment, // alignment
stefank@5112 986 UseLargePages, // large
stefank@5112 987 NULL, // requested_address
stefank@5112 988 0); // noacces_prefix
stefank@5112 989
stefank@5112 990 test_log(" rs.special() == %d", rs.special());
stefank@5112 991
stefank@5112 992 assert(rs.base() != NULL, "Must be");
stefank@5112 993 assert(rs.size() == size, "Must be");
stefank@5112 994
stefank@5112 995 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
stefank@5112 996 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
stefank@5112 997
stefank@5112 998 if (rs.special()) {
stefank@5112 999 small_page_write(rs.base(), size);
stefank@5112 1000 }
stefank@5112 1001
stefank@5112 1002 release_memory_for_test(rs);
stefank@5112 1003 }
stefank@5112 1004
stefank@5112 1005 static void test_reserved_space2(size_t size) {
stefank@5112 1006 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
stefank@5112 1007
stefank@5112 1008 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
stefank@5112 1009
stefank@5112 1010 ReservedSpace rs(size);
stefank@5112 1011
stefank@5112 1012 test_log(" rs.special() == %d", rs.special());
stefank@5112 1013
stefank@5112 1014 assert(rs.base() != NULL, "Must be");
stefank@5112 1015 assert(rs.size() == size, "Must be");
stefank@5112 1016
stefank@5112 1017 if (rs.special()) {
stefank@5112 1018 small_page_write(rs.base(), size);
stefank@5112 1019 }
stefank@5112 1020
stefank@5112 1021 release_memory_for_test(rs);
stefank@5112 1022 }
stefank@5112 1023
stefank@5112 1024 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
stefank@5112 1025 test_log("test_reserved_space3(%p, %p, %d)",
stefank@5112 1026 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
stefank@5112 1027
stefank@5112 1028 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
stefank@5112 1029 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
stefank@5112 1030
stefank@5112 1031 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
stefank@5112 1032
stefank@5112 1033 ReservedSpace rs(size, alignment, large, false);
stefank@5112 1034
stefank@5112 1035 test_log(" rs.special() == %d", rs.special());
stefank@5112 1036
stefank@5112 1037 assert(rs.base() != NULL, "Must be");
stefank@5112 1038 assert(rs.size() == size, "Must be");
stefank@5112 1039
stefank@5112 1040 if (rs.special()) {
stefank@5112 1041 small_page_write(rs.base(), size);
stefank@5112 1042 }
stefank@5112 1043
stefank@5112 1044 release_memory_for_test(rs);
stefank@5112 1045 }
stefank@5112 1046
stefank@5112 1047
stefank@5112 1048 static void test_reserved_space1() {
stefank@5112 1049 size_t size = 2 * 1024 * 1024;
stefank@5112 1050 size_t ag = os::vm_allocation_granularity();
stefank@5112 1051
stefank@5112 1052 test_reserved_space1(size, ag);
stefank@5112 1053 test_reserved_space1(size * 2, ag);
stefank@5112 1054 test_reserved_space1(size * 10, ag);
stefank@5112 1055 }
stefank@5112 1056
stefank@5112 1057 static void test_reserved_space2() {
stefank@5112 1058 size_t size = 2 * 1024 * 1024;
stefank@5112 1059 size_t ag = os::vm_allocation_granularity();
stefank@5112 1060
stefank@5112 1061 test_reserved_space2(size * 1);
stefank@5112 1062 test_reserved_space2(size * 2);
stefank@5112 1063 test_reserved_space2(size * 10);
stefank@5112 1064 test_reserved_space2(ag);
stefank@5112 1065 test_reserved_space2(size - ag);
stefank@5112 1066 test_reserved_space2(size);
stefank@5112 1067 test_reserved_space2(size + ag);
stefank@5112 1068 test_reserved_space2(size * 2);
stefank@5112 1069 test_reserved_space2(size * 2 - ag);
stefank@5112 1070 test_reserved_space2(size * 2 + ag);
stefank@5112 1071 test_reserved_space2(size * 3);
stefank@5112 1072 test_reserved_space2(size * 3 - ag);
stefank@5112 1073 test_reserved_space2(size * 3 + ag);
stefank@5112 1074 test_reserved_space2(size * 10);
stefank@5112 1075 test_reserved_space2(size * 10 + size / 2);
stefank@5112 1076 }
stefank@5112 1077
stefank@5112 1078 static void test_reserved_space3() {
stefank@5112 1079 size_t ag = os::vm_allocation_granularity();
stefank@5112 1080
stefank@5112 1081 test_reserved_space3(ag, ag , false);
stefank@5112 1082 test_reserved_space3(ag * 2, ag , false);
stefank@5112 1083 test_reserved_space3(ag * 3, ag , false);
stefank@5112 1084 test_reserved_space3(ag * 2, ag * 2, false);
stefank@5112 1085 test_reserved_space3(ag * 4, ag * 2, false);
stefank@5112 1086 test_reserved_space3(ag * 8, ag * 2, false);
stefank@5112 1087 test_reserved_space3(ag * 4, ag * 4, false);
stefank@5112 1088 test_reserved_space3(ag * 8, ag * 4, false);
stefank@5112 1089 test_reserved_space3(ag * 16, ag * 4, false);
stefank@5112 1090
stefank@5112 1091 if (UseLargePages) {
stefank@5112 1092 size_t lp = os::large_page_size();
stefank@5112 1093
stefank@5112 1094 // Without large pages
stefank@5112 1095 test_reserved_space3(lp, ag * 4, false);
stefank@5112 1096 test_reserved_space3(lp * 2, ag * 4, false);
stefank@5112 1097 test_reserved_space3(lp * 4, ag * 4, false);
stefank@5112 1098 test_reserved_space3(lp, lp , false);
stefank@5112 1099 test_reserved_space3(lp * 2, lp , false);
stefank@5112 1100 test_reserved_space3(lp * 3, lp , false);
stefank@5112 1101 test_reserved_space3(lp * 2, lp * 2, false);
stefank@5112 1102 test_reserved_space3(lp * 4, lp * 2, false);
stefank@5112 1103 test_reserved_space3(lp * 8, lp * 2, false);
stefank@5112 1104
stefank@5112 1105 // With large pages
stefank@5112 1106 test_reserved_space3(lp, ag * 4 , true);
stefank@5112 1107 test_reserved_space3(lp * 2, ag * 4, true);
stefank@5112 1108 test_reserved_space3(lp * 4, ag * 4, true);
stefank@5112 1109 test_reserved_space3(lp, lp , true);
stefank@5112 1110 test_reserved_space3(lp * 2, lp , true);
stefank@5112 1111 test_reserved_space3(lp * 3, lp , true);
stefank@5112 1112 test_reserved_space3(lp * 2, lp * 2, true);
stefank@5112 1113 test_reserved_space3(lp * 4, lp * 2, true);
stefank@5112 1114 test_reserved_space3(lp * 8, lp * 2, true);
stefank@5112 1115 }
stefank@5112 1116 }
stefank@5112 1117
stefank@5112 1118 static void test_reserved_space() {
stefank@5112 1119 test_reserved_space1();
stefank@5112 1120 test_reserved_space2();
stefank@5112 1121 test_reserved_space3();
stefank@5112 1122 }
stefank@5112 1123 };
stefank@5112 1124
stefank@5112 1125 void TestReservedSpace_test() {
stefank@5112 1126 TestReservedSpace::test_reserved_space();
stefank@5112 1127 }
stefank@5112 1128
stefank@5112 1129 #endif // PRODUCT
stefank@5112 1130
duke@0 1131 #endif