changeset 4532:e9cab25ca15e

Merge
author vladidan
date Wed, 29 May 2013 16:12:56 -0400
parents 851f1e0cef3c f1faa475b37d
children 1a7a04b4e45f
files
diffstat 16 files changed, 237 insertions(+), 121 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Apr 30 17:36:01 2013 -0400
+++ b/.hgtags	Wed May 29 16:12:56 2013 -0400
@@ -499,3 +499,5 @@
 7eabf05bddea524aa4a00c1fc6f2eba21c06e275 hs24-b44
 a8a071629df4856a44660143c6dd8e7843cdcca2 jdk7u40-b25
 69fecd3e06892e95a32ce4c27f85b1d61e946fc8 hs24-b45
+43fd44b89792fcc931569218dce51df4c2856a17 jdk7u40-b26
+e50c5a1869b1f629508780eda1592674177a9f91 hs24-b46
--- a/make/hotspot_version	Tue Apr 30 17:36:01 2013 -0400
+++ b/make/hotspot_version	Wed May 29 16:12:56 2013 -0400
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=46
+HS_BUILD_NUMBER=47
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/src/os/posix/vm/os_posix.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/os/posix/vm/os_posix.cpp	Wed May 29 16:12:56 2013 -0400
@@ -134,6 +134,10 @@
   return aligned_base;
 }
 
+bool os::can_release_partial_region() {
+  return true;
+}
+
 void os::Posix::print_load_average(outputStream* st) {
   st->print("load average:");
   double loadavg[3];
--- a/src/os/windows/vm/os_windows.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/os/windows/vm/os_windows.cpp	Wed May 29 16:12:56 2013 -0400
@@ -2971,6 +2971,10 @@
   }
 }
 
+bool os::can_release_partial_region() {
+  return false;
+}
+
 // Multiple threads can race in this code but it's not possible to unmap small sections of
 // virtual space to get requested alignment, like posix-like os's.
 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Wed May 29 16:12:56 2013 -0400
@@ -91,24 +91,32 @@
   send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
 }
 
-class ObjectCountEventSenderClosure : public KlassInfoClosure {
-  GCTracer* _gc_tracer;
- public:
-  ObjectCountEventSenderClosure(GCTracer* gc_tracer) : _gc_tracer(gc_tracer) {}
- private:
-  void do_cinfo(KlassInfoEntry* entry) {
-    if (is_visible_klass(entry->klass())) {
-      _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
-                                                   entry->words() * BytesPerWord);
-      }
+void ObjectCountEventSenderClosure::do_cinfo(KlassInfoEntry* entry) {
+  if (should_send_event(entry)) {
+    send_event(entry);
   }
+}
 
+void ObjectCountEventSenderClosure::send_event(KlassInfoEntry* entry) {
+  _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
+                                               entry->words() * BytesPerWord);
+}
+
+bool ObjectCountEventSenderClosure::should_send_event(KlassInfoEntry* entry) const {
+  double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
+  return percentage_of_heap > _size_threshold_percentage;
+}
+
+bool ObjectCountFilter::do_object_b(oop obj) {
+  bool is_alive = _is_alive == NULL? true : _is_alive->do_object_b(obj);
+  return is_alive && is_externally_visible_klass(obj->klass());
+}
+
+bool ObjectCountFilter::is_externally_visible_klass(klassOop k) const {
   // Do not expose internal implementation specific classes
-  bool is_visible_klass(klassOop k) {
-    return k->klass_part()->oop_is_instance() ||
-           (k->klass_part()->oop_is_array() && k != Universe::systemObjArrayKlassObj());
-  }
-};
+  return (k->klass_part()->oop_is_instance() || k->klass_part()->oop_is_array()) &&
+         k != Universe::systemObjArrayKlassObj();
+}
 
 void GCTracer::report_object_count_after_gc(BoolObjectClosure *is_alive_cl) {
   if (should_send_object_count_after_gc_event()) {
@@ -116,8 +124,11 @@
 
     KlassInfoTable cit(HeapInspection::start_of_perm_gen());
     if (!cit.allocation_failed()) {
-      ObjectCountEventSenderClosure event_sender(this);
-      HeapInspection::instance_inspection(&cit, &event_sender, false, is_alive_cl);
+      ObjectCountFilter object_filter(is_alive_cl);
+      HeapInspection::populate_table(&cit, false, &object_filter);
+
+      ObjectCountEventSenderClosure event_sender(this, cit.size_of_instances_in_words());
+      cit.iterate(&event_sender);
     }
   }
 }
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Wed May 29 16:12:56 2013 -0400
@@ -30,6 +30,7 @@
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
+#include "memory/klassInfoClosure.hpp"
 #include "memory/referenceType.hpp"
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -139,6 +140,33 @@
   bool should_send_object_count_after_gc_event() const;
 };
 
+class ObjectCountEventSenderClosure : public KlassInfoClosure {
+  GCTracer* _gc_tracer;
+  const double _size_threshold_percentage;
+  const size_t _total_size_in_words;
+ public:
+  ObjectCountEventSenderClosure(GCTracer* gc_tracer, size_t total_size_in_words) :
+    _gc_tracer(gc_tracer),
+    _size_threshold_percentage(ObjectCountCutOffPercent / 100),
+    _total_size_in_words(total_size_in_words)
+  {}
+  virtual void do_cinfo(KlassInfoEntry* entry);
+ protected:
+  virtual void send_event(KlassInfoEntry* entry);
+ private:
+  bool should_send_event(KlassInfoEntry* entry) const;
+};
+
+class ObjectCountFilter : public BoolObjectClosure {
+  BoolObjectClosure* _is_alive;
+ public:
+  ObjectCountFilter(BoolObjectClosure* is_alive = NULL) : _is_alive(is_alive) {}
+  bool do_object_b(oop obj);
+  void do_object(oop obj) { ShouldNotReachHere(); }
+ private:
+  bool is_externally_visible_klass(klassOop k) const;
+};
+
 class YoungGCTracer : public GCTracer {
   static const uint UNSET_TENURING_THRESHOLD = (uint) -1;
 
--- a/src/share/vm/memory/heapInspection.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/memory/heapInspection.cpp	Wed May 29 16:12:56 2013 -0400
@@ -113,9 +113,8 @@
   }
 }
 
-KlassInfoTable::KlassInfoTable(HeapWord* ref) {
-  _size = 0;
-  _ref = ref;
+KlassInfoTable::KlassInfoTable(HeapWord* ref) :
+  _size(0), _ref(ref), _size_of_instances_in_words(0) {
   _buckets = (KlassInfoBucket *) os::malloc(sizeof(KlassInfoBucket) * _num_buckets, mtInternal);
   if (_buckets != NULL) {
     _size = _num_buckets;
@@ -160,6 +159,7 @@
   if (elt != NULL) {
     elt->set_count(elt->count() + 1);
     elt->set_words(elt->words() + obj->size());
+    _size_of_instances_in_words += obj->size();
     return true;
   } else {
     return false;
@@ -173,6 +173,10 @@
   }
 }
 
+size_t KlassInfoTable::size_of_instances_in_words() const {
+  return _size_of_instances_in_words;
+}
+
 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
   return (*e1)->compare(*e1,*e2);
 }
@@ -282,10 +286,9 @@
   }
 }
 
-size_t HeapInspection::instance_inspection(KlassInfoTable* cit,
-                                           KlassInfoClosure* cl,
-                                           bool need_prologue,
-                                           BoolObjectClosure* filter) {
+size_t HeapInspection::populate_table(KlassInfoTable* cit,
+                                      bool need_prologue,
+                                      BoolObjectClosure *filter) {
   ResourceMark rm;
 
   if (need_prologue) {
@@ -294,7 +297,6 @@
 
   RecordInstanceClosure ric(cit, filter);
   Universe::heap()->object_iterate(&ric);
-  cit->iterate(cl);
 
   // need to run epilogue if we run prologue
   if (need_prologue) {
@@ -309,17 +311,20 @@
 
   KlassInfoTable cit(start_of_perm_gen());
   if (!cit.allocation_failed()) {
+    size_t missed_count = populate_table(&cit, need_prologue);
+    if (missed_count != 0) {
+      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
+                   " total instances in data below",
+                   missed_count);
+    }
+
     KlassInfoHisto histo("\n"
                      " num     #instances         #bytes  class name\n"
                      "----------------------------------------------");
     HistoClosure hc(&histo);
 
-    size_t missed_count = instance_inspection(&cit, &hc, need_prologue);
-    if (missed_count != 0) {
-      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
-                   " total instances in data below",
-                   missed_count);
-    }
+    cit.iterate(&hc);
+
     histo.sort();
     histo.print_on(st);
   } else {
--- a/src/share/vm/memory/heapInspection.hpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/memory/heapInspection.hpp	Wed May 29 16:12:56 2013 -0400
@@ -26,6 +26,7 @@
 #define SHARE_VM_MEMORY_HEAPINSPECTION_HPP
 
 #include "memory/allocation.inline.hpp"
+#include "memory/klassInfoClosure.hpp"
 #include "oops/oop.inline.hpp"
 
 
@@ -64,12 +65,6 @@
   void print_on(outputStream* st) const;
 };
 
-class KlassInfoClosure: public StackObj {
- public:
-  // Called for each KlassInfoEntry.
-  virtual void do_cinfo(KlassInfoEntry* cie) = 0;
-};
-
 class KlassInfoBucket: public CHeapObj<mtInternal> {
  private:
   KlassInfoEntry* _list;
@@ -86,6 +81,7 @@
  private:
   int _size;
   static const int _num_buckets = 20011;
+  size_t _size_of_instances_in_words;
 
   // An aligned reference address (typically the least
   // address in the perm gen) used for hashing klass
@@ -102,6 +98,7 @@
   bool record_instance(const oop obj);
   void iterate(KlassInfoClosure* cic);
   bool allocation_failed() { return _buckets == NULL; }
+  size_t size_of_instances_in_words() const;
 };
 
 class KlassInfoHisto : public StackObj {
@@ -125,10 +122,9 @@
 class HeapInspection : public AllStatic {
  public:
   static void heap_inspection(outputStream* st, bool need_prologue);
-  static size_t instance_inspection(KlassInfoTable* cit,
-                                    KlassInfoClosure* cl,
-                                    bool need_prologue,
-                                    BoolObjectClosure* filter = NULL);
+  static size_t populate_table(KlassInfoTable* cit,
+                               bool need_prologue,
+                               BoolObjectClosure* filter = NULL);
   static HeapWord* start_of_perm_gen();
   static void find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result);
  private:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/klassInfoClosure.hpp	Wed May 29 16:12:56 2013 -0400
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
+#define SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
+
+class KlassInfoEntry;
+
+class KlassInfoClosure : public StackObj {
+ public:
+  // Called for each KlassInfoEntry.
+  virtual void do_cinfo(KlassInfoEntry* cie) = 0;
+};
+
+#endif // SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
--- a/src/share/vm/prims/jvmtiImpl.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Wed May 29 16:12:56 2013 -0400
@@ -372,19 +372,14 @@
   case CLEAR_BREAKPOINT:
     _breakpoints->clear_at_safepoint(*_bp);
     break;
-  case CLEAR_ALL_BREAKPOINT:
-    _breakpoints->clearall_at_safepoint();
-    break;
   default:
     assert(false, "Unknown operation");
   }
 }
 
 void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
-  // This operation keeps breakpoints alive
-  if (_breakpoints != NULL) {
-    _breakpoints->oops_do(f);
-  }
+  // The JvmtiBreakpoints in _breakpoints will be visited via
+  // JvmtiExport::oops_do.
   if (_bp != NULL) {
     _bp->oops_do(f);
   }
@@ -445,23 +440,13 @@
   }
 }
 
-void JvmtiBreakpoints::clearall_at_safepoint() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-
-  int len = _bps.length();
-  for (int i=0; i<len; i++) {
-    _bps.at(i).clear();
-  }
-  _bps.clear();
-}
-
 int JvmtiBreakpoints::length() { return _bps.length(); }
 
 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
   if ( _bps.find(bp) != -1) {
      return JVMTI_ERROR_DUPLICATE;
   }
-  VM_ChangeBreakpoints set_breakpoint(this,VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
+  VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
   VMThread::execute(&set_breakpoint);
   return JVMTI_ERROR_NONE;
 }
@@ -471,7 +456,7 @@
      return JVMTI_ERROR_NOT_FOUND;
   }
 
-  VM_ChangeBreakpoints clear_breakpoint(this,VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
+  VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
   VMThread::execute(&clear_breakpoint);
   return JVMTI_ERROR_NONE;
 }
@@ -502,11 +487,6 @@
   }
 }
 
-void JvmtiBreakpoints::clearall() {
-  VM_ChangeBreakpoints clearall_breakpoint(this,VM_ChangeBreakpoints::CLEAR_ALL_BREAKPOINT);
-  VMThread::execute(&clearall_breakpoint);
-}
-
 //
 // class JvmtiCurrentBreakpoints
 //
--- a/src/share/vm/prims/jvmtiImpl.hpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/prims/jvmtiImpl.hpp	Wed May 29 16:12:56 2013 -0400
@@ -200,47 +200,6 @@
 
 ///////////////////////////////////////////////////////////////
 //
-// class VM_ChangeBreakpoints
-// Used by              : JvmtiBreakpoints
-// Used by JVMTI methods: none directly.
-// Note: A Helper class.
-//
-// VM_ChangeBreakpoints implements a VM_Operation for ALL modifications to the JvmtiBreakpoints class.
-//
-
-class VM_ChangeBreakpoints : public VM_Operation {
-private:
-  JvmtiBreakpoints* _breakpoints;
-  int               _operation;
-  JvmtiBreakpoint*  _bp;
-
-public:
-  enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1, CLEAR_ALL_BREAKPOINT=2 };
-
-  VM_ChangeBreakpoints(JvmtiBreakpoints* breakpoints, int operation) {
-    _breakpoints = breakpoints;
-    _bp = NULL;
-    _operation = operation;
-    assert(breakpoints != NULL, "breakpoints != NULL");
-    assert(operation == CLEAR_ALL_BREAKPOINT, "unknown breakpoint operation");
-  }
-  VM_ChangeBreakpoints(JvmtiBreakpoints* breakpoints, int operation, JvmtiBreakpoint *bp) {
-    _breakpoints = breakpoints;
-    _bp = bp;
-    _operation = operation;
-    assert(breakpoints != NULL, "breakpoints != NULL");
-    assert(bp != NULL, "bp != NULL");
-    assert(operation == SET_BREAKPOINT || operation == CLEAR_BREAKPOINT , "unknown breakpoint operation");
-  }
-
-  VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
-  void doit();
-  void oops_do(OopClosure* f);
-};
-
-
-///////////////////////////////////////////////////////////////
-//
 // class JvmtiBreakpoints
 // Used by              : JvmtiCurrentBreakpoints
 // Used by JVMTI methods: none directly
@@ -267,7 +226,6 @@
   friend class VM_ChangeBreakpoints;
   void set_at_safepoint(JvmtiBreakpoint& bp);
   void clear_at_safepoint(JvmtiBreakpoint& bp);
-  void clearall_at_safepoint();
 
   static void do_element(GrowableElement *e);
 
@@ -282,7 +240,6 @@
   int  set(JvmtiBreakpoint& bp);
   int  clear(JvmtiBreakpoint& bp);
   void clearall_in_class_at_safepoint(klassOop klass);
-  void clearall();
   void gc_epilogue();
 };
 
@@ -340,6 +297,40 @@
     return false;
 }
 
+
+///////////////////////////////////////////////////////////////
+//
+// class VM_ChangeBreakpoints
+// Used by              : JvmtiBreakpoints
+// Used by JVMTI methods: none directly.
+// Note: A Helper class.
+//
+// VM_ChangeBreakpoints implements a VM_Operation for ALL modifications to the JvmtiBreakpoints class.
+//
+
+class VM_ChangeBreakpoints : public VM_Operation {
+private:
+  JvmtiBreakpoints* _breakpoints;
+  int               _operation;
+  JvmtiBreakpoint*  _bp;
+
+public:
+  enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1 };
+
+  VM_ChangeBreakpoints(int operation, JvmtiBreakpoint *bp) {
+    JvmtiBreakpoints& current_bps = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
+    _breakpoints = &current_bps;
+    _bp = bp;
+    _operation = operation;
+    assert(bp != NULL, "bp != NULL");
+  }
+
+  VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
+  void doit();
+  void oops_do(OopClosure* f);
+};
+
+
 ///////////////////////////////////////////////////////////////
 // The get/set local operations must only be done by the VM thread
 // because the interpreter version needs to access oop maps, which can
--- a/src/share/vm/runtime/globals.hpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/runtime/globals.hpp	Wed May 29 16:12:56 2013 -0400
@@ -2302,6 +2302,10 @@
           "Print diagnostic message when GC is stalled"                     \
           "by JNI critical section")                                        \
                                                                             \
+  experimental(double, ObjectCountCutOffPercent, 0.5,                       \
+          "The percentage of the used heap that the instances of a class "  \
+          "must occupy for the class to generate a trace event.")           \
+                                                                            \
   /* GC log rotation setting */                                             \
                                                                             \
   product(bool, UseGCLogFileRotation, false,                                \
--- a/src/share/vm/runtime/os.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/runtime/os.cpp	Wed May 29 16:12:56 2013 -0400
@@ -1461,6 +1461,12 @@
   return res;
 }
 
+bool os::release_or_uncommit_partial_region(char * addr, size_t bytes) {
+  if (can_release_partial_region()) {
+    return release_memory(addr, bytes);
+  }
+  return uncommit_memory(addr, bytes);
+}
 
 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
                            char *addr, size_t bytes, bool read_only,
--- a/src/share/vm/runtime/os.hpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/runtime/os.hpp	Wed May 29 16:12:56 2013 -0400
@@ -266,6 +266,8 @@
                               bool executable = false);
   static bool   uncommit_memory(char* addr, size_t bytes);
   static bool   release_memory(char* addr, size_t bytes);
+  static bool   can_release_partial_region();
+  static bool   release_or_uncommit_partial_region(char* addr, size_t bytes);
 
   enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
   static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
--- a/src/share/vm/runtime/virtualspace.cpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/runtime/virtualspace.cpp	Wed May 29 16:12:56 2013 -0400
@@ -81,17 +81,41 @@
   const size_t end_delta = len - (beg_delta + required_size);
 
   if (beg_delta != 0) {
-    os::release_memory(addr, beg_delta);
+    os::release_or_uncommit_partial_region(addr, beg_delta);
   }
 
   if (end_delta != 0) {
     char* release_addr = (char*) (s + beg_delta + required_size);
-    os::release_memory(release_addr, end_delta);
+    os::release_or_uncommit_partial_region(release_addr, end_delta);
   }
 
   return (char*) (s + beg_delta);
 }
 
+void ReservedSpace::set_raw_base_and_size(char * const raw_base,
+                                          size_t raw_size) {
+  assert(raw_base == NULL || !os::can_release_partial_region(), "sanity");
+  _raw_base = raw_base;
+  _raw_size = raw_size;
+}
+
+// On some systems (e.g., windows), the address returned by os::reserve_memory()
+// is the only addr that can be passed to os::release_memory().  If alignment
+// was done by this class, that original address is _raw_base.
+void ReservedSpace::release_memory(char* default_addr, size_t default_size) {
+  bool ok;
+  if (_raw_base == NULL) {
+    ok = os::release_memory(default_addr, default_size);
+  } else {
+    assert(!os::can_release_partial_region(), "sanity");
+    ok = os::release_memory(_raw_base, _raw_size);
+  }
+  if (!ok) {
+    fatal("os::release_memory failed");
+  }
+  set_raw_base_and_size(NULL, 0);
+}
+
 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
                                        const size_t prefix_size,
                                        const size_t prefix_align,
@@ -110,6 +134,10 @@
     fatal("os::release_memory failed");
   }
 
+  if (!os::can_release_partial_region()) {
+    set_raw_base_and_size(raw_addr, reserve_size);
+  }
+
 #ifdef ASSERT
   if (result != NULL) {
     const size_t raw = size_t(raw_addr);
@@ -127,8 +155,10 @@
 }
 
 // Helper method.
-static bool failed_to_reserve_as_requested(char* base, char* requested_address,
-                                           const size_t size, bool special)
+bool ReservedSpace::failed_to_reserve_as_requested(char* base,
+                                                   char* requested_address,
+                                                   const size_t size,
+                                                   bool special)
 {
   if (base == requested_address || requested_address == NULL)
     return false; // did not fail
@@ -147,9 +177,7 @@
         fatal("os::release_memory_special failed");
       }
     } else {
-      if (!os::release_memory(base, size)) {
-        fatal("os::release_memory failed");
-      }
+      release_memory(base, size);
     }
   }
   return true;
@@ -177,6 +205,8 @@
   assert(noaccess_prefix == 0 ||
          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 
+  set_raw_base_and_size(NULL, 0);
+
   // Add in noaccess_prefix to prefix_size;
   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   const size_t size = adjusted_prefix_size + suffix_size;
@@ -224,9 +254,7 @@
     // result is often the same address (if the kernel hands out virtual
     // addresses from low to high), or an address that is offset by the increase
     // in size.  Exploit that to minimize the amount of extra space requested.
-    if (!os::release_memory(addr, size)) {
-      fatal("os::release_memory failed");
-    }
+    release_memory(addr, size);
 
     const size_t extra = MAX2(ofs, suffix_align - ofs);
     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
@@ -265,6 +293,8 @@
   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
          "not a power of 2");
 
+  set_raw_base_and_size(NULL, 0);
+
   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 
   // Assert that if noaccess_prefix is used, it is the same as alignment.
@@ -340,7 +370,8 @@
     // Check alignment constraints
     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
       // Base not aligned, retry
-      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+      release_memory(base, size);
+
       // Make sure that size is aligned
       size = align_size_up(size, alignment);
       base = os::reserve_memory_aligned(size, alignment);
@@ -378,6 +409,7 @@
          "size not allocation aligned");
   _base = base;
   _size = size;
+  set_raw_base_and_size(NULL, 0);
   _alignment = alignment;
   _noaccess_prefix = 0;
   _special = special;
@@ -433,7 +465,7 @@
     if (special()) {
       os::release_memory_special(real_base, real_size);
     } else{
-      os::release_memory(real_base, real_size);
+      release_memory(real_base, real_size);
     }
     _base = NULL;
     _size = 0;
--- a/src/share/vm/runtime/virtualspace.hpp	Tue Apr 30 17:36:01 2013 -0400
+++ b/src/share/vm/runtime/virtualspace.hpp	Wed May 29 16:12:56 2013 -0400
@@ -35,6 +35,12 @@
   char*  _base;
   size_t _size;
   size_t _noaccess_prefix;
+
+  // The base and size prior to any alignment done by this class; used only on
+  // systems that cannot release part of a region.
+  char*  _raw_base;
+  size_t _raw_size;
+
   size_t _alignment;
   bool   _special;
   bool   _executable;
@@ -42,11 +48,20 @@
   // ReservedSpace
   ReservedSpace(char* base, size_t size, size_t alignment, bool special,
                 bool executable);
+
+  bool failed_to_reserve_as_requested(char* base, char* requested_address,
+                                      const size_t size, bool special);
   void initialize(size_t size, size_t alignment, bool large,
                   char* requested_address,
                   const size_t noaccess_prefix,
                   bool executable);
 
+  inline void set_raw_base_and_size(char * const raw_base, size_t raw_size);
+
+  // Release virtual address space.  If alignment was done, use the saved
+  // address and size when releasing.
+  void release_memory(char * default_addr, size_t default_size);
+
   // Release parts of an already-reserved memory region [addr, addr + len) to
   // get a new region that has "compound alignment."  Return the start of the
   // resulting region, or NULL on failure.