annotate src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 141:fcbfc50865ab

6684395: Port NUMA-aware allocator to linux Summary: NUMA-aware allocator port to Linux Reviewed-by: jmasa, apetrusenko
author iveresov
date Tue, 29 Apr 2008 13:51:26 +0400
parents a61af66fc99e
children e3729351c946
rev   line source
duke@0 1
duke@0 2 /*
duke@0 3 * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@0 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 5 *
duke@0 6 * This code is free software; you can redistribute it and/or modify it
duke@0 7 * under the terms of the GNU General Public License version 2 only, as
duke@0 8 * published by the Free Software Foundation.
duke@0 9 *
duke@0 10 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 13 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 14 * accompanied this code).
duke@0 15 *
duke@0 16 * You should have received a copy of the GNU General Public License version
duke@0 17 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 19 *
duke@0 20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 21 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 22 * have any questions.
duke@0 23 *
duke@0 24 */
duke@0 25
duke@0 26 # include "incls/_precompiled.incl"
duke@0 27 # include "incls/_mutableNUMASpace.cpp.incl"
duke@0 28
duke@0 29
duke@0 30 MutableNUMASpace::MutableNUMASpace() {
duke@0 31 _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
duke@0 32 _page_size = os::vm_page_size();
duke@0 33 _adaptation_cycles = 0;
duke@0 34 _samples_count = 0;
duke@0 35 update_layout(true);
duke@0 36 }
duke@0 37
duke@0 38 MutableNUMASpace::~MutableNUMASpace() {
duke@0 39 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 40 delete lgrp_spaces()->at(i);
duke@0 41 }
duke@0 42 delete lgrp_spaces();
duke@0 43 }
duke@0 44
duke@0 45 void MutableNUMASpace::mangle_unused_area() {
duke@0 46 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 47 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 48 MutableSpace *s = ls->space();
iveresov@141 49 if (!os::numa_has_static_binding()) {
iveresov@141 50 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
iveresov@141 51 if (top < s->end()) {
iveresov@141 52 ls->add_invalid_region(MemRegion(top, s->end()));
iveresov@141 53 }
duke@0 54 }
duke@0 55 s->mangle_unused_area();
duke@0 56 }
duke@0 57 }
duke@0 58
duke@0 59 // There may be unallocated holes in the middle chunks
duke@0 60 // that should be filled with dead objects to ensure parseability.
duke@0 61 void MutableNUMASpace::ensure_parsability() {
duke@0 62 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 63 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 64 MutableSpace *s = ls->space();
duke@0 65 if (!s->contains(top())) {
duke@0 66 if (s->free_in_words() > 0) {
duke@0 67 SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
duke@0 68 size_t area_touched_words = pointer_delta(s->end(), s->top(), sizeof(HeapWordSize));
duke@0 69 #ifndef ASSERT
duke@0 70 if (!ZapUnusedHeapArea) {
duke@0 71 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
duke@0 72 area_touched_words);
duke@0 73 }
duke@0 74 #endif
iveresov@141 75 if (!os::numa_has_static_binding()) {
iveresov@141 76 MemRegion invalid;
iveresov@141 77 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
iveresov@141 78 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
iveresov@141 79 os::vm_page_size());
iveresov@141 80 if (crossing_start != crossing_end) {
iveresov@141 81 // If object header crossed a small page boundary we mark the area
iveresov@141 82 // as invalid rounding it to a page_size().
iveresov@141 83 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
iveresov@141 84 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
iveresov@141 85 s->end());
iveresov@141 86 invalid = MemRegion(start, end);
iveresov@141 87 }
iveresov@141 88
iveresov@141 89 ls->add_invalid_region(invalid);
duke@0 90 }
duke@0 91 s->set_top(s->end());
duke@0 92 }
duke@0 93 } else {
iveresov@141 94 if (!os::numa_has_static_binding()) {
duke@0 95 #ifdef ASSERT
duke@0 96 MemRegion invalid(s->top(), s->end());
duke@0 97 ls->add_invalid_region(invalid);
iveresov@141 98 #else
iveresov@141 99 if (ZapUnusedHeapArea) {
iveresov@141 100 MemRegion invalid(s->top(), s->end());
iveresov@141 101 ls->add_invalid_region(invalid);
iveresov@141 102 } else break;
duke@0 103 #endif
iveresov@141 104 }
duke@0 105 }
duke@0 106 }
duke@0 107 }
duke@0 108
duke@0 109 size_t MutableNUMASpace::used_in_words() const {
duke@0 110 size_t s = 0;
duke@0 111 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 112 s += lgrp_spaces()->at(i)->space()->used_in_words();
duke@0 113 }
duke@0 114 return s;
duke@0 115 }
duke@0 116
duke@0 117 size_t MutableNUMASpace::free_in_words() const {
duke@0 118 size_t s = 0;
duke@0 119 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 120 s += lgrp_spaces()->at(i)->space()->free_in_words();
duke@0 121 }
duke@0 122 return s;
duke@0 123 }
duke@0 124
duke@0 125
duke@0 126 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
duke@0 127 guarantee(thr != NULL, "No thread");
duke@0 128 int lgrp_id = thr->lgrp_id();
duke@0 129 assert(lgrp_id != -1, "No lgrp_id set");
duke@0 130 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@0 131 if (i == -1) {
duke@0 132 return 0;
duke@0 133 }
duke@0 134 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
duke@0 135 }
duke@0 136
duke@0 137 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
duke@0 138 guarantee(thr != NULL, "No thread");
duke@0 139 int lgrp_id = thr->lgrp_id();
duke@0 140 assert(lgrp_id != -1, "No lgrp_id set");
duke@0 141 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@0 142 if (i == -1) {
duke@0 143 return 0;
duke@0 144 }
duke@0 145 return lgrp_spaces()->at(i)->space()->free_in_bytes();
duke@0 146 }
duke@0 147
duke@0 148 // Check if the NUMA topology has changed. Add and remove spaces if needed.
duke@0 149 // The update can be forced by setting the force parameter equal to true.
duke@0 150 bool MutableNUMASpace::update_layout(bool force) {
duke@0 151 // Check if the topology had changed.
duke@0 152 bool changed = os::numa_topology_changed();
duke@0 153 if (force || changed) {
duke@0 154 // Compute lgrp intersection. Add/remove spaces.
duke@0 155 int lgrp_limit = (int)os::numa_get_groups_num();
duke@0 156 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
duke@0 157 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
duke@0 158 assert(lgrp_num > 0, "There should be at least one locality group");
duke@0 159 // Add new spaces for the new nodes
duke@0 160 for (int i = 0; i < lgrp_num; i++) {
duke@0 161 bool found = false;
duke@0 162 for (int j = 0; j < lgrp_spaces()->length(); j++) {
duke@0 163 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
duke@0 164 found = true;
duke@0 165 break;
duke@0 166 }
duke@0 167 }
duke@0 168 if (!found) {
duke@0 169 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i]));
duke@0 170 }
duke@0 171 }
duke@0 172
duke@0 173 // Remove spaces for the removed nodes.
duke@0 174 for (int i = 0; i < lgrp_spaces()->length();) {
duke@0 175 bool found = false;
duke@0 176 for (int j = 0; j < lgrp_num; j++) {
duke@0 177 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
duke@0 178 found = true;
duke@0 179 break;
duke@0 180 }
duke@0 181 }
duke@0 182 if (!found) {
duke@0 183 delete lgrp_spaces()->at(i);
duke@0 184 lgrp_spaces()->remove_at(i);
duke@0 185 } else {
duke@0 186 i++;
duke@0 187 }
duke@0 188 }
duke@0 189
duke@0 190 FREE_C_HEAP_ARRAY(int, lgrp_ids);
duke@0 191
duke@0 192 if (changed) {
duke@0 193 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
duke@0 194 thread->set_lgrp_id(-1);
duke@0 195 }
duke@0 196 }
duke@0 197 return true;
duke@0 198 }
duke@0 199 return false;
duke@0 200 }
duke@0 201
duke@0 202 // Bias region towards the first-touching lgrp. Set the right page sizes.
iveresov@141 203 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
duke@0 204 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
duke@0 205 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
duke@0 206 if (end > start) {
duke@0 207 MemRegion aligned_region(start, end);
duke@0 208 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
duke@0 209 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
duke@0 210 assert(region().contains(aligned_region), "Sanity");
iveresov@141 211 // First we tell the OS which page size we want in the given range. The underlying
iveresov@141 212 // large page can be broken down if we require small pages.
iveresov@141 213 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
iveresov@141 214 // Then we uncommit the pages in the range.
duke@0 215 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
iveresov@141 216 // And make them local/first-touch biased.
iveresov@141 217 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
duke@0 218 }
duke@0 219 }
duke@0 220
duke@0 221 // Free all pages in the region.
duke@0 222 void MutableNUMASpace::free_region(MemRegion mr) {
duke@0 223 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
duke@0 224 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
duke@0 225 if (end > start) {
duke@0 226 MemRegion aligned_region(start, end);
duke@0 227 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
duke@0 228 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
duke@0 229 assert(region().contains(aligned_region), "Sanity");
duke@0 230 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
duke@0 231 }
duke@0 232 }
duke@0 233
duke@0 234 // Update space layout. Perform adaptation.
duke@0 235 void MutableNUMASpace::update() {
duke@0 236 if (update_layout(false)) {
duke@0 237 // If the topology has changed, make all chunks zero-sized.
duke@0 238 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 239 MutableSpace *s = lgrp_spaces()->at(i)->space();
duke@0 240 s->set_end(s->bottom());
duke@0 241 s->set_top(s->bottom());
duke@0 242 }
duke@0 243 initialize(region(), true);
duke@0 244 } else {
duke@0 245 bool should_initialize = false;
iveresov@141 246 if (!os::numa_has_static_binding()) {
iveresov@141 247 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@141 248 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
iveresov@141 249 should_initialize = true;
iveresov@141 250 break;
iveresov@141 251 }
duke@0 252 }
duke@0 253 }
duke@0 254
duke@0 255 if (should_initialize ||
duke@0 256 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
duke@0 257 initialize(region(), true);
duke@0 258 }
duke@0 259 }
duke@0 260
duke@0 261 if (NUMAStats) {
duke@0 262 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 263 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
duke@0 264 }
duke@0 265 }
duke@0 266
duke@0 267 scan_pages(NUMAPageScanRate);
duke@0 268 }
duke@0 269
duke@0 270 // Scan pages. Free pages that have smaller size or wrong placement.
duke@0 271 void MutableNUMASpace::scan_pages(size_t page_count)
duke@0 272 {
duke@0 273 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
duke@0 274 if (pages_per_chunk > 0) {
duke@0 275 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 276 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 277 ls->scan_pages(page_size(), pages_per_chunk);
duke@0 278 }
duke@0 279 }
duke@0 280 }
duke@0 281
duke@0 282 // Accumulate statistics about the allocation rate of each lgrp.
duke@0 283 void MutableNUMASpace::accumulate_statistics() {
duke@0 284 if (UseAdaptiveNUMAChunkSizing) {
duke@0 285 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 286 lgrp_spaces()->at(i)->sample();
duke@0 287 }
duke@0 288 increment_samples_count();
duke@0 289 }
duke@0 290
duke@0 291 if (NUMAStats) {
duke@0 292 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 293 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
duke@0 294 }
duke@0 295 }
duke@0 296 }
duke@0 297
duke@0 298 // Get the current size of a chunk.
duke@0 299 // This function computes the size of the chunk based on the
duke@0 300 // difference between chunk ends. This allows it to work correctly in
duke@0 301 // case the whole space is resized and during the process of adaptive
duke@0 302 // chunk resizing.
duke@0 303 size_t MutableNUMASpace::current_chunk_size(int i) {
duke@0 304 HeapWord *cur_end, *prev_end;
duke@0 305 if (i == 0) {
duke@0 306 prev_end = bottom();
duke@0 307 } else {
duke@0 308 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
duke@0 309 }
duke@0 310 if (i == lgrp_spaces()->length() - 1) {
duke@0 311 cur_end = end();
duke@0 312 } else {
duke@0 313 cur_end = lgrp_spaces()->at(i)->space()->end();
duke@0 314 }
duke@0 315 if (cur_end > prev_end) {
duke@0 316 return pointer_delta(cur_end, prev_end, sizeof(char));
duke@0 317 }
duke@0 318 return 0;
duke@0 319 }
duke@0 320
duke@0 321 // Return the default chunk size by equally diving the space.
duke@0 322 // page_size() aligned.
duke@0 323 size_t MutableNUMASpace::default_chunk_size() {
duke@0 324 return base_space_size() / lgrp_spaces()->length() * page_size();
duke@0 325 }
duke@0 326
duke@0 327 // Produce a new chunk size. page_size() aligned.
duke@0 328 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
duke@0 329 size_t pages_available = base_space_size();
duke@0 330 for (int j = 0; j < i; j++) {
duke@0 331 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
duke@0 332 }
duke@0 333 pages_available -= lgrp_spaces()->length() - i - 1;
duke@0 334 assert(pages_available > 0, "No pages left");
duke@0 335 float alloc_rate = 0;
duke@0 336 for (int j = i; j < lgrp_spaces()->length(); j++) {
duke@0 337 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
duke@0 338 }
duke@0 339 size_t chunk_size = 0;
duke@0 340 if (alloc_rate > 0) {
duke@0 341 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 342 chunk_size = (size_t)(ls->alloc_rate()->average() * pages_available / alloc_rate) * page_size();
duke@0 343 }
duke@0 344 chunk_size = MAX2(chunk_size, page_size());
duke@0 345
duke@0 346 if (limit > 0) {
duke@0 347 limit = round_down(limit, page_size());
duke@0 348 if (chunk_size > current_chunk_size(i)) {
duke@0 349 chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit);
duke@0 350 } else {
duke@0 351 chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit);
duke@0 352 }
duke@0 353 }
duke@0 354 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
duke@0 355 return chunk_size;
duke@0 356 }
duke@0 357
duke@0 358
duke@0 359 // Return the bottom_region and the top_region. Align them to page_size() boundary.
duke@0 360 // |------------------new_region---------------------------------|
duke@0 361 // |----bottom_region--|---intersection---|------top_region------|
duke@0 362 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
duke@0 363 MemRegion* bottom_region, MemRegion *top_region) {
duke@0 364 // Is there bottom?
duke@0 365 if (new_region.start() < intersection.start()) { // Yes
duke@0 366 // Try to coalesce small pages into a large one.
duke@0 367 if (UseLargePages && page_size() >= os::large_page_size()) {
duke@0 368 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), os::large_page_size());
duke@0 369 if (new_region.contains(p)
duke@0 370 && pointer_delta(p, new_region.start(), sizeof(char)) >= os::large_page_size()) {
duke@0 371 if (intersection.contains(p)) {
duke@0 372 intersection = MemRegion(p, intersection.end());
duke@0 373 } else {
duke@0 374 intersection = MemRegion(p, p);
duke@0 375 }
duke@0 376 }
duke@0 377 }
duke@0 378 *bottom_region = MemRegion(new_region.start(), intersection.start());
duke@0 379 } else {
duke@0 380 *bottom_region = MemRegion();
duke@0 381 }
duke@0 382
duke@0 383 // Is there top?
duke@0 384 if (intersection.end() < new_region.end()) { // Yes
duke@0 385 // Try to coalesce small pages into a large one.
duke@0 386 if (UseLargePages && page_size() >= os::large_page_size()) {
duke@0 387 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), os::large_page_size());
duke@0 388 if (new_region.contains(p)
duke@0 389 && pointer_delta(new_region.end(), p, sizeof(char)) >= os::large_page_size()) {
duke@0 390 if (intersection.contains(p)) {
duke@0 391 intersection = MemRegion(intersection.start(), p);
duke@0 392 } else {
duke@0 393 intersection = MemRegion(p, p);
duke@0 394 }
duke@0 395 }
duke@0 396 }
duke@0 397 *top_region = MemRegion(intersection.end(), new_region.end());
duke@0 398 } else {
duke@0 399 *top_region = MemRegion();
duke@0 400 }
duke@0 401 }
duke@0 402
duke@0 403 // Try to merge the invalid region with the bottom or top region by decreasing
duke@0 404 // the intersection area. Return the invalid_region aligned to the page_size()
duke@0 405 // boundary if it's inside the intersection. Return non-empty invalid_region
duke@0 406 // if it lies inside the intersection (also page-aligned).
duke@0 407 // |------------------new_region---------------------------------|
duke@0 408 // |----------------|-------invalid---|--------------------------|
duke@0 409 // |----bottom_region--|---intersection---|------top_region------|
duke@0 410 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
duke@0 411 MemRegion *invalid_region) {
duke@0 412 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
duke@0 413 *intersection = MemRegion(invalid_region->end(), intersection->end());
duke@0 414 *invalid_region = MemRegion();
duke@0 415 } else
duke@0 416 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
duke@0 417 *intersection = MemRegion(intersection->start(), invalid_region->start());
duke@0 418 *invalid_region = MemRegion();
duke@0 419 } else
duke@0 420 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
duke@0 421 *intersection = MemRegion(new_region.start(), new_region.start());
duke@0 422 *invalid_region = MemRegion();
duke@0 423 } else
duke@0 424 if (intersection->contains(invalid_region)) {
duke@0 425 // That's the only case we have to make an additional bias_region() call.
duke@0 426 HeapWord* start = invalid_region->start();
duke@0 427 HeapWord* end = invalid_region->end();
duke@0 428 if (UseLargePages && page_size() >= os::large_page_size()) {
duke@0 429 HeapWord *p = (HeapWord*)round_down((intptr_t) start, os::large_page_size());
duke@0 430 if (new_region.contains(p)) {
duke@0 431 start = p;
duke@0 432 }
duke@0 433 p = (HeapWord*)round_to((intptr_t) end, os::large_page_size());
duke@0 434 if (new_region.contains(end)) {
duke@0 435 end = p;
duke@0 436 }
duke@0 437 }
duke@0 438 if (intersection->start() > start) {
duke@0 439 *intersection = MemRegion(start, intersection->end());
duke@0 440 }
duke@0 441 if (intersection->end() < end) {
duke@0 442 *intersection = MemRegion(intersection->start(), end);
duke@0 443 }
duke@0 444 *invalid_region = MemRegion(start, end);
duke@0 445 }
duke@0 446 }
duke@0 447
duke@0 448 void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
duke@0 449 assert(clear_space, "Reallocation will destory data!");
duke@0 450 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
duke@0 451
duke@0 452 MemRegion old_region = region(), new_region;
duke@0 453 set_bottom(mr.start());
duke@0 454 set_end(mr.end());
duke@0 455 MutableSpace::set_top(bottom());
duke@0 456
duke@0 457 // Compute chunk sizes
duke@0 458 size_t prev_page_size = page_size();
duke@0 459 set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size());
duke@0 460 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
duke@0 461 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
duke@0 462 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
duke@0 463
duke@0 464 // Try small pages if the chunk size is too small
duke@0 465 if (base_space_size_pages / lgrp_spaces()->length() == 0
duke@0 466 && page_size() > (size_t)os::vm_page_size()) {
duke@0 467 set_page_size(os::vm_page_size());
duke@0 468 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
duke@0 469 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
duke@0 470 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
duke@0 471 }
duke@0 472 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
duke@0 473 set_base_space_size(base_space_size_pages);
duke@0 474
duke@0 475 // Handle space resize
duke@0 476 MemRegion top_region, bottom_region;
duke@0 477 if (!old_region.equals(region())) {
duke@0 478 new_region = MemRegion(rounded_bottom, rounded_end);
duke@0 479 MemRegion intersection = new_region.intersection(old_region);
duke@0 480 if (intersection.start() == NULL ||
duke@0 481 intersection.end() == NULL ||
duke@0 482 prev_page_size > page_size()) { // If the page size got smaller we have to change
duke@0 483 // the page size preference for the whole space.
duke@0 484 intersection = MemRegion(new_region.start(), new_region.start());
duke@0 485 }
duke@0 486 select_tails(new_region, intersection, &bottom_region, &top_region);
iveresov@141 487 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
iveresov@141 488 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
duke@0 489 }
duke@0 490
duke@0 491 // Check if the space layout has changed significantly?
duke@0 492 // This happens when the space has been resized so that either head or tail
duke@0 493 // chunk became less than a page.
duke@0 494 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
duke@0 495 current_chunk_size(0) > page_size() &&
duke@0 496 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
duke@0 497
duke@0 498
duke@0 499 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 500 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 501 MutableSpace *s = ls->space();
duke@0 502 old_region = s->region();
duke@0 503
duke@0 504 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
duke@0 505 if (i < lgrp_spaces()->length() - 1) {
duke@0 506 if (!UseAdaptiveNUMAChunkSizing ||
duke@0 507 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
duke@0 508 samples_count() < AdaptiveSizePolicyReadyThreshold) {
duke@0 509 // No adaptation. Divide the space equally.
duke@0 510 chunk_byte_size = default_chunk_size();
duke@0 511 } else
duke@0 512 if (!layout_valid || NUMASpaceResizeRate == 0) {
duke@0 513 // Fast adaptation. If no space resize rate is set, resize
duke@0 514 // the chunks instantly.
duke@0 515 chunk_byte_size = adaptive_chunk_size(i, 0);
duke@0 516 } else {
duke@0 517 // Slow adaptation. Resize the chunks moving no more than
duke@0 518 // NUMASpaceResizeRate bytes per collection.
duke@0 519 size_t limit = NUMASpaceResizeRate /
duke@0 520 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
duke@0 521 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
duke@0 522 }
duke@0 523
duke@0 524 assert(chunk_byte_size >= page_size(), "Chunk size too small");
duke@0 525 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
duke@0 526 }
duke@0 527
duke@0 528 if (i == 0) { // Bottom chunk
duke@0 529 if (i != lgrp_spaces()->length() - 1) {
duke@0 530 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
duke@0 531 } else {
duke@0 532 new_region = MemRegion(bottom(), end());
duke@0 533 }
duke@0 534 } else
duke@0 535 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
duke@0 536 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
duke@0 537 new_region = MemRegion(ps->end(),
duke@0 538 ps->end() + (chunk_byte_size >> LogHeapWordSize));
duke@0 539 } else { // Top chunk
duke@0 540 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
duke@0 541 new_region = MemRegion(ps->end(), end());
duke@0 542 }
duke@0 543 guarantee(region().contains(new_region), "Region invariant");
duke@0 544
duke@0 545
duke@0 546 // The general case:
duke@0 547 // |---------------------|--invalid---|--------------------------|
duke@0 548 // |------------------new_region---------------------------------|
duke@0 549 // |----bottom_region--|---intersection---|------top_region------|
duke@0 550 // |----old_region----|
duke@0 551 // The intersection part has all pages in place we don't need to migrate them.
duke@0 552 // Pages for the top and bottom part should be freed and then reallocated.
duke@0 553
duke@0 554 MemRegion intersection = old_region.intersection(new_region);
duke@0 555
duke@0 556 if (intersection.start() == NULL || intersection.end() == NULL) {
duke@0 557 intersection = MemRegion(new_region.start(), new_region.start());
duke@0 558 }
duke@0 559
iveresov@141 560 if (!os::numa_has_static_binding()) {
iveresov@141 561 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
iveresov@141 562 // Invalid region is a range of memory that could've possibly
iveresov@141 563 // been allocated on the other node. That's relevant only on Solaris where
iveresov@141 564 // there is no static memory binding.
iveresov@141 565 if (!invalid_region.is_empty()) {
iveresov@141 566 merge_regions(new_region, &intersection, &invalid_region);
iveresov@141 567 free_region(invalid_region);
iveresov@141 568 ls->set_invalid_region(MemRegion());
iveresov@141 569 }
duke@0 570 }
iveresov@141 571
duke@0 572 select_tails(new_region, intersection, &bottom_region, &top_region);
iveresov@141 573
iveresov@141 574 if (!os::numa_has_static_binding()) {
iveresov@141 575 // If that's a system with the first-touch policy then it's enough
iveresov@141 576 // to free the pages.
iveresov@141 577 free_region(bottom_region);
iveresov@141 578 free_region(top_region);
iveresov@141 579 } else {
iveresov@141 580 // In a system with static binding we have to change the bias whenever
iveresov@141 581 // we reshape the heap.
iveresov@141 582 bias_region(bottom_region, ls->lgrp_id());
iveresov@141 583 bias_region(top_region, ls->lgrp_id());
iveresov@141 584 }
duke@0 585
duke@0 586 // If we clear the region, we would mangle it in debug. That would cause page
duke@0 587 // allocation in a different place. Hence setting the top directly.
duke@0 588 s->initialize(new_region, false);
duke@0 589 s->set_top(s->bottom());
duke@0 590
duke@0 591 set_adaptation_cycles(samples_count());
duke@0 592 }
duke@0 593 }
duke@0 594
duke@0 595 // Set the top of the whole space.
duke@0 596 // Mark the the holes in chunks below the top() as invalid.
duke@0 597 void MutableNUMASpace::set_top(HeapWord* value) {
duke@0 598 bool found_top = false;
duke@0 599 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 600 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 601 MutableSpace *s = ls->space();
duke@0 602 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
duke@0 603
duke@0 604 if (s->contains(value)) {
iveresov@141 605 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
duke@0 606 ls->add_invalid_region(MemRegion(top, value));
duke@0 607 }
duke@0 608 s->set_top(value);
duke@0 609 found_top = true;
duke@0 610 } else {
duke@0 611 if (found_top) {
duke@0 612 s->set_top(s->bottom());
duke@0 613 } else {
iveresov@141 614 if (!os::numa_has_static_binding() && top < s->end()) {
iveresov@141 615 ls->add_invalid_region(MemRegion(top, s->end()));
iveresov@141 616 }
iveresov@141 617 s->set_top(s->end());
duke@0 618 }
duke@0 619 }
duke@0 620 }
duke@0 621 MutableSpace::set_top(value);
duke@0 622 }
duke@0 623
duke@0 624 void MutableNUMASpace::clear() {
duke@0 625 MutableSpace::set_top(bottom());
duke@0 626 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 627 lgrp_spaces()->at(i)->space()->clear();
duke@0 628 }
duke@0 629 }
duke@0 630
iveresov@141 631 /*
iveresov@141 632 Linux supports static memory binding, therefore the most part of the
iveresov@141 633 logic dealing with the possible invalid page allocation is effectively
iveresov@141 634 disabled. Besides there is no notion of the home node in Linux. A
iveresov@141 635 thread is allowed to migrate freely. Although the scheduler is rather
iveresov@141 636 reluctant to move threads between the nodes. We check for the current
iveresov@141 637 node every allocation. And with a high probability a thread stays on
iveresov@141 638 the same node for some time allowing local access to recently allocated
iveresov@141 639 objects.
iveresov@141 640 */
iveresov@141 641
duke@0 642 HeapWord* MutableNUMASpace::allocate(size_t size) {
iveresov@141 643 Thread* thr = Thread::current();
iveresov@141 644 int lgrp_id = thr->lgrp_id();
iveresov@141 645 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
duke@0 646 lgrp_id = os::numa_get_group_id();
iveresov@141 647 thr->set_lgrp_id(lgrp_id);
duke@0 648 }
duke@0 649
duke@0 650 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@0 651
duke@0 652 // It is possible that a new CPU has been hotplugged and
duke@0 653 // we haven't reshaped the space accordingly.
duke@0 654 if (i == -1) {
duke@0 655 i = os::random() % lgrp_spaces()->length();
duke@0 656 }
duke@0 657
duke@0 658 MutableSpace *s = lgrp_spaces()->at(i)->space();
duke@0 659 HeapWord *p = s->allocate(size);
duke@0 660
duke@0 661 if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
duke@0 662 s->set_top(s->top() - size);
duke@0 663 p = NULL;
duke@0 664 }
duke@0 665 if (p != NULL) {
duke@0 666 if (top() < s->top()) { // Keep _top updated.
duke@0 667 MutableSpace::set_top(s->top());
duke@0 668 }
duke@0 669 }
iveresov@141 670 // Make the page allocation happen here if there is no static binding..
iveresov@141 671 if (p != NULL && !os::numa_has_static_binding()) {
duke@0 672 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
duke@0 673 *(int*)i = 0;
duke@0 674 }
duke@0 675 }
duke@0 676 return p;
duke@0 677 }
duke@0 678
duke@0 679 // This version is lock-free.
duke@0 680 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
iveresov@141 681 Thread* thr = Thread::current();
iveresov@141 682 int lgrp_id = thr->lgrp_id();
iveresov@141 683 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
duke@0 684 lgrp_id = os::numa_get_group_id();
iveresov@141 685 thr->set_lgrp_id(lgrp_id);
duke@0 686 }
duke@0 687
duke@0 688 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@0 689 // It is possible that a new CPU has been hotplugged and
duke@0 690 // we haven't reshaped the space accordingly.
duke@0 691 if (i == -1) {
duke@0 692 i = os::random() % lgrp_spaces()->length();
duke@0 693 }
duke@0 694 MutableSpace *s = lgrp_spaces()->at(i)->space();
duke@0 695 HeapWord *p = s->cas_allocate(size);
duke@0 696 if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
duke@0 697 if (s->cas_deallocate(p, size)) {
duke@0 698 // We were the last to allocate and created a fragment less than
duke@0 699 // a minimal object.
duke@0 700 p = NULL;
duke@0 701 }
duke@0 702 }
duke@0 703 if (p != NULL) {
duke@0 704 HeapWord* cur_top, *cur_chunk_top = p + size;
duke@0 705 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
duke@0 706 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
duke@0 707 break;
duke@0 708 }
duke@0 709 }
duke@0 710 }
duke@0 711
iveresov@141 712 // Make the page allocation happen here if there is no static binding.
iveresov@141 713 if (p != NULL && !os::numa_has_static_binding() ) {
duke@0 714 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
duke@0 715 *(int*)i = 0;
duke@0 716 }
duke@0 717 }
duke@0 718 return p;
duke@0 719 }
duke@0 720
duke@0 721 void MutableNUMASpace::print_short_on(outputStream* st) const {
duke@0 722 MutableSpace::print_short_on(st);
duke@0 723 st->print(" (");
duke@0 724 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 725 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
duke@0 726 lgrp_spaces()->at(i)->space()->print_short_on(st);
duke@0 727 if (i < lgrp_spaces()->length() - 1) {
duke@0 728 st->print(", ");
duke@0 729 }
duke@0 730 }
duke@0 731 st->print(")");
duke@0 732 }
duke@0 733
duke@0 734 void MutableNUMASpace::print_on(outputStream* st) const {
duke@0 735 MutableSpace::print_on(st);
duke@0 736 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 737 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@0 738 st->print(" lgrp %d", ls->lgrp_id());
duke@0 739 ls->space()->print_on(st);
duke@0 740 if (NUMAStats) {
duke@0 741 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
duke@0 742 ls->space_stats()->_local_space / K,
duke@0 743 ls->space_stats()->_remote_space / K,
duke@0 744 ls->space_stats()->_unbiased_space / K,
duke@0 745 ls->space_stats()->_uncommited_space / K,
duke@0 746 ls->space_stats()->_large_pages,
duke@0 747 ls->space_stats()->_small_pages);
duke@0 748 }
duke@0 749 }
duke@0 750 }
duke@0 751
duke@0 752 void MutableNUMASpace::verify(bool allow_dirty) const {
duke@0 753 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@0 754 lgrp_spaces()->at(i)->space()->verify(allow_dirty);
duke@0 755 }
duke@0 756 }
duke@0 757
duke@0 758 // Scan pages and gather stats about page placement and size.
duke@0 759 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
duke@0 760 clear_space_stats();
duke@0 761 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
duke@0 762 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
duke@0 763 if (start < end) {
duke@0 764 for (char *p = start; p < end;) {
duke@0 765 os::page_info info;
duke@0 766 if (os::get_page_info(p, &info)) {
duke@0 767 if (info.size > 0) {
duke@0 768 if (info.size > (size_t)os::vm_page_size()) {
duke@0 769 space_stats()->_large_pages++;
duke@0 770 } else {
duke@0 771 space_stats()->_small_pages++;
duke@0 772 }
duke@0 773 if (info.lgrp_id == lgrp_id()) {
duke@0 774 space_stats()->_local_space += info.size;
duke@0 775 } else {
duke@0 776 space_stats()->_remote_space += info.size;
duke@0 777 }
duke@0 778 p += info.size;
duke@0 779 } else {
duke@0 780 p += os::vm_page_size();
duke@0 781 space_stats()->_uncommited_space += os::vm_page_size();
duke@0 782 }
duke@0 783 } else {
duke@0 784 return;
duke@0 785 }
duke@0 786 }
duke@0 787 }
duke@0 788 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
duke@0 789 pointer_delta(space()->end(), end, sizeof(char));
duke@0 790
duke@0 791 }
duke@0 792
duke@0 793 // Scan page_count pages and verify if they have the right size and right placement.
duke@0 794 // If invalid pages are found they are freed in hope that subsequent reallocation
duke@0 795 // will be more successful.
duke@0 796 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
duke@0 797 {
duke@0 798 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
duke@0 799 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
duke@0 800
duke@0 801 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
duke@0 802 set_last_page_scanned(range_start);
duke@0 803 }
duke@0 804
duke@0 805 char *scan_start = last_page_scanned();
duke@0 806 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
duke@0 807
duke@0 808 os::page_info page_expected, page_found;
duke@0 809 page_expected.size = page_size;
duke@0 810 page_expected.lgrp_id = lgrp_id();
duke@0 811
duke@0 812 char *s = scan_start;
duke@0 813 while (s < scan_end) {
duke@0 814 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
duke@0 815 if (e == NULL) {
duke@0 816 break;
duke@0 817 }
duke@0 818 if (e != scan_end) {
duke@0 819 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
duke@0 820 && page_expected.size != 0) {
duke@0 821 os::free_memory(s, pointer_delta(e, s, sizeof(char)));
duke@0 822 }
duke@0 823 page_expected = page_found;
duke@0 824 }
duke@0 825 s = e;
duke@0 826 }
duke@0 827
duke@0 828 set_last_page_scanned(scan_end);
duke@0 829 }