changeset 59668:6e6611dd3331

8245208: ZGC: Don't hold the ZPageAllocator lock while committing/uncommitting memory Reviewed-by: eosterlund, stefank
author pliden
date Tue, 09 Jun 2020 11:01:09 +0200
parents c38008cb9c7d
children b4f6be3e3707
files src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp src/hotspot/share/gc/z/vmStructs_z.hpp src/hotspot/share/gc/z/zAllocationFlags.hpp src/hotspot/share/gc/z/zArguments.cpp src/hotspot/share/gc/z/zCollectedHeap.cpp src/hotspot/share/gc/z/zCollectedHeap.hpp src/hotspot/share/gc/z/zFuture.hpp src/hotspot/share/gc/z/zFuture.inline.hpp src/hotspot/share/gc/z/zHeap.cpp src/hotspot/share/gc/z/zHeap.hpp src/hotspot/share/gc/z/zHeuristics.cpp src/hotspot/share/gc/z/zHeuristics.hpp src/hotspot/share/gc/z/zInitialize.cpp src/hotspot/share/gc/z/zMemory.cpp src/hotspot/share/gc/z/zMemory.hpp src/hotspot/share/gc/z/zPage.cpp src/hotspot/share/gc/z/zPage.hpp src/hotspot/share/gc/z/zPage.inline.hpp src/hotspot/share/gc/z/zPageAllocator.cpp src/hotspot/share/gc/z/zPageAllocator.hpp src/hotspot/share/gc/z/zPageCache.cpp src/hotspot/share/gc/z/zPageCache.hpp src/hotspot/share/gc/z/zPageCache.inline.hpp src/hotspot/share/gc/z/zPhysicalMemory.cpp src/hotspot/share/gc/z/zPhysicalMemory.hpp src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp src/hotspot/share/gc/z/zUncommitter.cpp src/hotspot/share/gc/z/zUncommitter.hpp src/hotspot/share/gc/z/zVirtualMemory.cpp src/hotspot/share/gc/z/zVirtualMemory.hpp src/hotspot/share/jfr/metadata/metadata.xml src/jdk.jfr/share/conf/jfr/default.jfc src/jdk.jfr/share/conf/jfr/profile.jfc test/hotspot/gtest/gc/z/test_zForwarding.cpp test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp test/hotspot/jtreg/gc/z/TestUncommit.java test/jdk/jdk/jfr/event/gc/detailed/TestZPageCacheFlushEvent.java test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java test/lib/jdk/test/lib/jfr/EventNames.java
diffstat 44 files changed, 1229 insertions(+), 958 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -93,11 +93,11 @@
   return _initialized;
 }
 
-void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
   // Does nothing
 }
 
-bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
+bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
   assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
   assert(is_aligned(length, os::vm_page_size()), "Invalid length");
 
@@ -116,7 +116,7 @@
   return true;
 }
 
-size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
   // Try to commit the whole region
   if (commit_inner(offset, length)) {
     // Success
@@ -144,7 +144,7 @@
   }
 }
 
-size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
   assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
   assert(is_aligned(length, os::vm_page_size()), "Invalid length");
 
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -29,17 +29,17 @@
   uintptr_t _base;
   bool      _initialized;
 
-  bool commit_inner(size_t offset, size_t length);
+  bool commit_inner(size_t offset, size_t length) const;
 
 public:
   ZPhysicalMemoryBacking(size_t max_capacity);
 
   bool is_initialized() const;
 
-  void warn_commit_limits(size_t max) const;
+  void warn_commit_limits(size_t max_capacity) const;
 
-  size_t commit(size_t offset, size_t length);
-  size_t uncommit(size_t offset, size_t length);
+  size_t commit(size_t offset, size_t length) const;
+  size_t uncommit(size_t offset, size_t length) const;
 
   void map(uintptr_t addr, size_t size, uintptr_t offset) const;
   void unmap(uintptr_t addr, size_t size) const;
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -302,7 +302,7 @@
   return _initialized;
 }
 
-void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
+void ZPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
   // Note that the available space on a tmpfs or a hugetlbfs filesystem
   // will be zero if no size limit was specified when it was mounted.
   if (_available == 0) {
@@ -316,18 +316,18 @@
   // Warn if the filesystem doesn't currently have enough space available to hold
   // the max heap size. The max heap size will be capped if we later hit this limit
   // when trying to expand the heap.
-  if (_available < max) {
+  if (_available < max_capacity) {
     log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
     log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
     log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
-                    "(available", max / M);
+                      "(available", max_capacity / M);
     log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
-                    "size could", _available / M);
-    log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
+                      "size could", _available / M);
+    log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
   }
 }
 
-void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
+void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
   const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
   FILE* const file = fopen(filename, "r");
   if (file == NULL) {
@@ -350,24 +350,24 @@
   // However, ZGC tends to create the most mappings and dominate the total count.
   // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
   // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
-  const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
+  const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
   if (actual_max_map_count < required_max_map_count) {
     log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
     log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
     log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
-                    max / M, filename);
+                      max_capacity / M, filename);
     log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
-                    "with the current", required_max_map_count, actual_max_map_count);
-    log_warning_p(gc)("limit could lead to a fatal error, due to failure to map memory.");
+                      "with the current", required_max_map_count, actual_max_map_count);
+    log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
   }
 }
 
-void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
   // Warn if available space is too low
-  warn_available_space(max);
+  warn_available_space(max_capacity);
 
   // Warn if max map count is too low
-  warn_max_map_count(max);
+  warn_max_map_count(max_capacity);
 }
 
 bool ZPhysicalMemoryBacking::is_tmpfs() const {
@@ -477,7 +477,7 @@
   return 0;
 }
 
-ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
   // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
   // since Linux 4.3. When fallocate(2) is not supported we emulate it using
   // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
@@ -491,7 +491,7 @@
   }
 }
 
-ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
   const int mode = 0; // Allocate
   const int res = ZSyscall::fallocate(_fd, mode, offset, length);
   if (res == -1) {
@@ -503,7 +503,7 @@
   return 0;
 }
 
-ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
   // Using compat mode is more efficient when allocating space on hugetlbfs.
   // Note that allocating huge pages this way will only reserve them, and not
   // associate them with segments of the file. We must guarantee that we at
@@ -530,7 +530,7 @@
   return fallocate_fill_hole_compat(offset, length);
 }
 
-ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
   if (ZLargePages::is_explicit()) {
     // We can only punch hole in pages that have been touched. Non-touched
     // pages are only reserved, and not associated with any specific file
@@ -553,7 +553,7 @@
   return 0;
 }
 
-ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
   // Try first half
   const size_t offset0 = offset;
   const size_t length0 = align_up(length / 2, _block_size);
@@ -574,7 +574,7 @@
   return 0;
 }
 
-ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
   assert(is_aligned(offset, _block_size), "Invalid offset");
   assert(is_aligned(length, _block_size), "Invalid length");
 
@@ -590,7 +590,7 @@
   return err;
 }
 
-bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
+bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
   log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
                       offset / M, (offset + length) / M, length / M);
 
@@ -627,7 +627,7 @@
   return mapping->at((int)nindex);
 }
 
-size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
   size_t committed = 0;
 
   // Commit one granule at a time, so that each granule
@@ -652,7 +652,7 @@
   return committed;
 }
 
-size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
   // Try to commit the whole region
   if (commit_inner(offset, length)) {
     // Success
@@ -680,7 +680,7 @@
   }
 }
 
-size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
   if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
     // To get granule-level NUMA interleaving when using non-large pages,
     // we must explicitly interleave the memory at commit/fallocate time.
@@ -690,7 +690,7 @@
   return commit_default(offset, length);
 }
 
-size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
   log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
                       offset / M, (offset + length) / M, length / M);
 
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -35,8 +35,8 @@
   size_t   _available;
   bool     _initialized;
 
-  void warn_available_space(size_t max) const;
-  void warn_max_map_count(size_t max) const;
+  void warn_available_space(size_t max_capacity) const;
+  void warn_max_map_count(size_t max_capacity) const;
 
   int create_mem_fd(const char* name) const;
   int create_file_fd(const char* name) const;
@@ -49,26 +49,26 @@
   ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
   ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
   ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
-  ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
-  ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
-  ZErrno fallocate_fill_hole(size_t offset, size_t length);
-  ZErrno fallocate_punch_hole(size_t offset, size_t length);
-  ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
-  ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
+  ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
+  ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
+  ZErrno fallocate_fill_hole(size_t offset, size_t length) const;
+  ZErrno fallocate_punch_hole(size_t offset, size_t length) const;
+  ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
+  ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
 
-  bool commit_inner(size_t offset, size_t length);
-  size_t commit_numa_interleaved(size_t offset, size_t length);
-  size_t commit_default(size_t offset, size_t length);
+  bool commit_inner(size_t offset, size_t length) const;
+  size_t commit_numa_interleaved(size_t offset, size_t length) const;
+  size_t commit_default(size_t offset, size_t length) const;
 
 public:
   ZPhysicalMemoryBacking(size_t max_capacity);
 
   bool is_initialized() const;
 
-  void warn_commit_limits(size_t max) const;
+  void warn_commit_limits(size_t max_capacity) const;
 
-  size_t commit(size_t offset, size_t length);
-  size_t uncommit(size_t offset, size_t length);
+  size_t commit(size_t offset, size_t length) const;
+  size_t uncommit(size_t offset, size_t length) const;
 
   void map(uintptr_t addr, size_t size, uintptr_t offset) const;
   void unmap(uintptr_t addr, size_t size) const;
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -42,7 +42,7 @@
   return true;
 }
 
-void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
   // Does nothing
 }
 
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -44,7 +44,7 @@
 
   bool is_initialized() const;
 
-  void warn_commit_limits(size_t max) const;
+  void warn_commit_limits(size_t max_capacity) const;
 
   size_t commit(size_t offset, size_t length);
   size_t uncommit(size_t offset, size_t length);
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,8 +77,8 @@
   volatile_nonstatic_field(ZPage,               _top,                 uintptr_t)                     \
                                                                                                      \
   nonstatic_field(ZPageAllocator,               _max_capacity,        const size_t)                  \
-  nonstatic_field(ZPageAllocator,               _capacity,            size_t)                        \
-  nonstatic_field(ZPageAllocator,               _used,                size_t)                        \
+  volatile_nonstatic_field(ZPageAllocator,      _capacity,            size_t)                        \
+  volatile_nonstatic_field(ZPageAllocator,      _used,                size_t)                        \
                                                                                                      \
   nonstatic_field(ZPageTable,                   _map,                 ZGranuleMapForPageTable)       \
                                                                                                      \
--- a/src/hotspot/share/gc/z/zAllocationFlags.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,20 +31,22 @@
 // Allocation flags layout
 // -----------------------
 //
-//   7    3 2 1 0
-//  +----+-+-+-+-+
-//  |0000|1|1|1|1|
-//  +----+-+-+-+-+
-//  |    | | | |
-//  |    | | | * 0-0 Worker Thread Flag (1-bit)
-//  |    | | |
-//  |    | | * 1-1 Non-Blocking Flag (1-bit)
-//  |    | |
-//  |    | * 2-2 Relocation Flag (1-bit)
-//  |    |
-//  |    * 3-3 No Reserve Flag (1-bit)
+//   7   4 3 2 1 0
+//  +---+-+-+-+-+-+
+//  |000|1|1|1|1|1|
+//  +---+-+-+-+-+-+
+//  |   | | | | |
+//  |   | | | | * 0-0 Worker Thread Flag (1-bit)
+//  |   | | | |
+//  |   | | | * 1-1 Non-Blocking Flag (1-bit)
+//  |   | | |
+//  |   | | * 2-2 Relocation Flag (1-bit)
+//  |   | |
+//  |   | * 3-3 No Reserve Flag (1-bit)
+//  |   |
+//  |   * 4-4 Low Address Flag (1-bit)
 //  |
-//  * 7-4 Unused (4-bits)
+//  * 7-5 Unused (3-bits)
 //
 
 class ZAllocationFlags {
@@ -53,6 +55,7 @@
   typedef ZBitField<uint8_t, bool, 1, 1> field_non_blocking;
   typedef ZBitField<uint8_t, bool, 2, 1> field_relocation;
   typedef ZBitField<uint8_t, bool, 3, 1> field_no_reserve;
+  typedef ZBitField<uint8_t, bool, 4, 1> field_low_address;
 
   uint8_t _flags;
 
@@ -76,6 +79,10 @@
     _flags |= field_no_reserve::encode(true);
   }
 
+  void set_low_address() {
+    _flags |= field_low_address::encode(true);
+  }
+
   bool worker_thread() const {
     return field_worker_thread::decode(_flags);
   }
@@ -91,6 +98,10 @@
   bool no_reserve() const {
     return field_no_reserve::decode(_flags);
   }
+
+  bool low_address() const {
+    return field_low_address::decode(_flags);
+  }
 };
 
 #endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
--- a/src/hotspot/share/gc/z/zArguments.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,18 @@
     vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
   }
 
+  // Select medium page size so that we can calculate the max reserve
+  ZHeuristics::set_medium_page_size();
+
+  // MinHeapSize/InitialHeapSize must be at least as large as the max reserve
+  const size_t max_reserve = ZHeuristics::max_reserve();
+  if (MinHeapSize < max_reserve) {
+    FLAG_SET_ERGO(MinHeapSize, max_reserve);
+  }
+  if (InitialHeapSize < max_reserve) {
+    FLAG_SET_ERGO(InitialHeapSize, max_reserve);
+  }
+
 #ifdef COMPILER2
   // Enable loop strip mining by default
   if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -25,6 +25,8 @@
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zDirector.hpp"
+#include "gc/z/zDriver.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHeap.inline.hpp"
 #include "gc/z/zNMethod.hpp"
@@ -52,7 +54,6 @@
     _heap(),
     _director(new ZDirector()),
     _driver(new ZDriver()),
-    _uncommitter(new ZUncommitter()),
     _stat(new ZStat()),
     _runtime_workers() {}
 
@@ -78,11 +79,19 @@
   _heap.serviceability_initialize();
 }
 
+class ZStopConcurrentGCThreadClosure : public ThreadClosure {
+public:
+  virtual void do_thread(Thread* thread) {
+    if (thread->is_ConcurrentGC_thread() &&
+        !thread->is_GC_task_thread()) {
+      static_cast<ConcurrentGCThread*>(thread)->stop();
+    }
+  }
+};
+
 void ZCollectedHeap::stop() {
-  _director->stop();
-  _driver->stop();
-  _uncommitter->stop();
-  _stat->stop();
+  ZStopConcurrentGCThreadClosure cl;
+  gc_threads_do(&cl);
 }
 
 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
@@ -278,9 +287,8 @@
 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
   tc->do_thread(_director);
   tc->do_thread(_driver);
-  tc->do_thread(_uncommitter);
   tc->do_thread(_stat);
-  _heap.worker_threads_do(tc);
+  _heap.threads_do(tc);
   _runtime_workers.threads_do(tc);
 }
 
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -27,13 +27,13 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/softRefPolicy.hpp"
 #include "gc/z/zBarrierSet.hpp"
-#include "gc/z/zDirector.hpp"
-#include "gc/z/zDriver.hpp"
 #include "gc/z/zHeap.hpp"
 #include "gc/z/zInitialize.hpp"
 #include "gc/z/zRuntimeWorkers.hpp"
-#include "gc/z/zStat.hpp"
-#include "gc/z/zUncommitter.hpp"
+
+class ZDirector;
+class ZDriver;
+class ZStat;
 
 class ZCollectedHeap : public CollectedHeap {
   friend class VMStructs;
@@ -45,7 +45,6 @@
   ZHeap             _heap;
   ZDirector*        _director;
   ZDriver*          _driver;
-  ZUncommitter*     _uncommitter;
   ZStat*            _stat;
   ZRuntimeWorkers   _runtime_workers;
 
--- a/src/hotspot/share/gc/z/zFuture.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zFuture.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,6 @@
   ZFuture();
 
   void set(T value);
-  T peek();
   T get();
 };
 
--- a/src/hotspot/share/gc/z/zFuture.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zFuture.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,11 +42,6 @@
 }
 
 template <typename T>
-inline T ZFuture<T>::peek() {
-  return _value;
-}
-
-template <typename T>
 inline T ZFuture<T>::get() {
   // Wait for notification
   Thread* const thread = Thread::current();
--- a/src/hotspot/share/gc/z/zHeap.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -27,6 +27,7 @@
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHeap.inline.hpp"
 #include "gc/z/zHeapIterator.hpp"
+#include "gc/z/zHeuristics.hpp"
 #include "gc/z/zMark.inline.hpp"
 #include "gc/z/zPage.inline.hpp"
 #include "gc/z/zPageTable.inline.hpp"
@@ -57,7 +58,7 @@
 ZHeap::ZHeap() :
     _workers(),
     _object_allocator(),
-    _page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
+    _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize, ZHeuristics::max_reserve()),
     _page_table(),
     _forwarding_table(),
     _mark(&_workers, &_page_table),
@@ -66,32 +67,13 @@
     _relocate(&_workers),
     _relocation_set(),
     _unload(&_workers),
-    _serviceability(heap_min_size(), heap_max_size()) {
+    _serviceability(min_capacity(), max_capacity()) {
   // Install global heap instance
   assert(_heap == NULL, "Already initialized");
   _heap = this;
 
   // Update statistics
-  ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
-}
-
-size_t ZHeap::heap_min_size() const {
-  return MinHeapSize;
-}
-
-size_t ZHeap::heap_initial_size() const {
-  return InitialHeapSize;
-}
-
-size_t ZHeap::heap_max_size() const {
-  return MaxHeapSize;
-}
-
-size_t ZHeap::heap_max_reserve_size() const {
-  // Reserve one small page per worker plus one shared medium page. This is still just
-  // an estimate and doesn't guarantee that we can't run out of memory during relocation.
-  const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
-  return MIN2(max_reserve_size, heap_max_size());
+  ZStatHeap::set_at_initialize(min_capacity(), max_capacity(), max_reserve());
 }
 
 bool ZHeap::is_initialized() const {
@@ -198,7 +180,8 @@
   _workers.set_boost(boost);
 }
 
-void ZHeap::worker_threads_do(ThreadClosure* tc) const {
+void ZHeap::threads_do(ThreadClosure* tc) const {
+  _page_allocator.threads_do(tc);
   _workers.threads_do(tc);
 }
 
@@ -237,10 +220,6 @@
   _page_allocator.free_page(page, reclaimed);
 }
 
-uint64_t ZHeap::uncommit(uint64_t delay) {
-  return _page_allocator.uncommit(delay);
-}
-
 void ZHeap::flip_to_marked() {
   ZVerifyViewsFlip flip(&_page_allocator);
   ZAddress::flip_to_marked();
--- a/src/hotspot/share/gc/z/zHeap.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zHeap.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -60,11 +60,6 @@
   ZUnload             _unload;
   ZServiceability     _serviceability;
 
-  size_t heap_min_size() const;
-  size_t heap_initial_size() const;
-  size_t heap_max_size() const;
-  size_t heap_max_reserve_size() const;
-
   void flip_to_marked();
   void flip_to_remapped();
 
@@ -99,11 +94,11 @@
   bool is_in(uintptr_t addr) const;
   uint32_t hash_oop(uintptr_t addr) const;
 
-  // Workers
+  // Threads
   uint nconcurrent_worker_threads() const;
   uint nconcurrent_no_boost_worker_threads() const;
   void set_boost_worker_threads(bool boost);
-  void worker_threads_do(ThreadClosure* tc) const;
+  void threads_do(ThreadClosure* tc) const;
 
   // Reference processing
   ReferenceDiscoverer* reference_discoverer();
@@ -117,9 +112,6 @@
   void undo_alloc_page(ZPage* page);
   void free_page(ZPage* page, bool reclaimed);
 
-  // Uncommit memory
-  uint64_t uncommit(uint64_t delay);
-
   // Object allocation
   uintptr_t alloc_tlab(size_t size);
   uintptr_t alloc_object(size_t size);
--- a/src/hotspot/share/gc/z/zHeuristics.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zHeuristics.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -49,11 +49,16 @@
     ZObjectSizeLimitMedium      = ZPageSizeMedium / 8;
     ZObjectAlignmentMediumShift = (int)ZPageSizeMediumShift - 13;
     ZObjectAlignmentMedium      = 1 << ZObjectAlignmentMediumShift;
+  }
+}
 
-    log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
-  } else {
-    log_info_p(gc, init)("Medium Page Size: N/A");
-  }
+size_t ZHeuristics::max_reserve() {
+  // Reserve one small page per worker plus one shared medium page. This is
+  // still just an estimate and doesn't guarantee that we can't run out of
+  // memory during relocation.
+  const uint nworkers = MAX2(ParallelGCThreads, ConcGCThreads);
+  const size_t reserve = (nworkers * ZPageSizeSmall) + ZPageSizeMedium;
+  return MIN2(MaxHeapSize, reserve);
 }
 
 bool ZHeuristics::use_per_cpu_shared_small_pages() {
--- a/src/hotspot/share/gc/z/zHeuristics.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zHeuristics.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 public:
   static void set_medium_page_size();
 
+  static size_t max_reserve();
+
   static bool use_per_cpu_shared_small_pages();
 
   static uint nparallel_workers();
--- a/src/hotspot/share/gc/z/zInitialize.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zInitialize.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,6 @@
   ZThreadLocalAllocBuffer::initialize();
   ZTracer::initialize();
   ZLargePages::initialize();
-  ZHeuristics::set_medium_page_size();
   ZBarrierSet::set_barrier_set(barrier_set);
 
   initialize_os();
--- a/src/hotspot/share/gc/z/zMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
 
 #include "precompiled.hpp"
 #include "gc/z/zList.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
 #include "gc/z/zMemory.inline.hpp"
 #include "memory/allocation.inline.hpp"
 
@@ -86,6 +87,8 @@
 }
 
 uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
+  ZLocker<ZLock> locker(&_lock);
+
   ZListIterator<ZMemory> iter(&_freelist);
   for (ZMemory* area; iter.next(&area);) {
     if (area->size() >= size) {
@@ -109,6 +112,8 @@
 }
 
 uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) {
+  ZLocker<ZLock> locker(&_lock);
+
   ZMemory* area = _freelist.first();
   if (area != NULL) {
     if (area->size() <= size) {
@@ -133,6 +138,8 @@
 }
 
 uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
+  ZLocker<ZLock> locker(&_lock);
+
   ZListReverseIterator<ZMemory> iter(&_freelist);
   for (ZMemory* area; iter.next(&area);) {
     if (area->size() >= size) {
@@ -155,6 +162,8 @@
 }
 
 uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) {
+  ZLocker<ZLock> locker(&_lock);
+
   ZMemory* area = _freelist.last();
   if (area != NULL) {
     if (area->size() <= size) {
@@ -181,6 +190,8 @@
   assert(start != UINTPTR_MAX, "Invalid address");
   const uintptr_t end = start + size;
 
+  ZLocker<ZLock> locker(&_lock);
+
   ZListIterator<ZMemory> iter(&_freelist);
   for (ZMemory* area; iter.next(&area);) {
     if (start < area->start()) {
--- a/src/hotspot/share/gc/z/zMemory.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zMemory.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #define SHARE_GC_Z_ZMEMORY_HPP
 
 #include "gc/z/zList.hpp"
+#include "gc/z/zLock.hpp"
 #include "memory/allocation.hpp"
 
 class ZMemory : public CHeapObj<mtGC> {
@@ -65,6 +66,7 @@
   };
 
 private:
+  ZLock          _lock;
   ZList<ZMemory> _freelist;
   Callbacks      _callbacks;
 
--- a/src/hotspot/share/gc/z/zPage.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPage.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,7 @@
 void ZPage::assert_initialized() const {
   assert(!_virtual.is_null(), "Should not be null");
   assert(!_physical.is_null(), "Should not be null");
+  assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch");
   assert((_type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
          (_type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
          (_type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)),
@@ -99,6 +100,27 @@
   return page;
 }
 
+ZPage* ZPage::split_committed() {
+  // Split any committed part of this page into a separate page,
+  // leaving this page with only uncommitted physical memory.
+  const ZPhysicalMemory pmem = _physical.split_committed();
+  if (pmem.is_null()) {
+    // Nothing committed
+    return NULL;
+  }
+
+  assert(!_physical.is_null(), "Should not be null");
+
+  // Resize this page
+  const ZVirtualMemory vmem = _virtual.split(pmem.size());
+  _type = type_from_size(_virtual.size());
+  _top = start();
+  _livemap.resize(object_max_count());
+
+  // Create new page
+  return new ZPage(vmem, pmem);
+}
+
 void ZPage::print_on(outputStream* out) const {
   out->print_cr(" %-6s  " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s",
                 type_to_string(), start(), top(), end(),
--- a/src/hotspot/share/gc/z/zPage.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPage.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,17 +69,15 @@
   uintptr_t top() const;
   size_t remaining() const;
 
+  const ZVirtualMemory& virtual_memory() const;
   const ZPhysicalMemory& physical_memory() const;
-  const ZVirtualMemory& virtual_memory() const;
+  ZPhysicalMemory& physical_memory();
 
   uint8_t numa_id();
 
   bool is_allocating() const;
   bool is_relocatable() const;
 
-  bool is_mapped() const;
-  void set_pre_mapped();
-
   uint64_t last_used() const;
   void set_last_used();
 
@@ -88,6 +86,7 @@
   ZPage* retype(uint8_t type);
   ZPage* split(size_t size);
   ZPage* split(uint8_t type, size_t size);
+  ZPage* split_committed();
 
   bool is_in(uintptr_t addr) const;
 
--- a/src/hotspot/share/gc/z/zPage.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPage.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -126,12 +126,16 @@
   return end() - top();
 }
 
+inline const ZVirtualMemory& ZPage::virtual_memory() const {
+  return _virtual;
+}
+
 inline const ZPhysicalMemory& ZPage::physical_memory() const {
   return _physical;
 }
 
-inline const ZVirtualMemory& ZPage::virtual_memory() const {
-  return _virtual;
+inline ZPhysicalMemory& ZPage::physical_memory() {
+  return _physical;
 }
 
 inline uint8_t ZPage::numa_id() {
@@ -150,17 +154,6 @@
   return _seqnum < ZGlobalSeqNum;
 }
 
-inline bool ZPage::is_mapped() const {
-  return _seqnum > 0;
-}
-
-inline void ZPage::set_pre_mapped() {
-  // The _seqnum variable is also used to signal that the virtual and physical
-  // memory has been mapped. So, we need to set it to non-zero when the memory
-  // has been pre-mapped.
-  _seqnum = 1;
-}
-
 inline uint64_t ZPage::last_used() const {
   return _last_used;
 }
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -31,42 +31,56 @@
 #include "gc/z/zLock.inline.hpp"
 #include "gc/z/zPage.inline.hpp"
 #include "gc/z/zPageAllocator.hpp"
-#include "gc/z/zPageCache.inline.hpp"
+#include "gc/z/zPageCache.hpp"
 #include "gc/z/zSafeDelete.inline.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zTask.hpp"
 #include "gc/z/zTracer.inline.hpp"
+#include "gc/z/zUncommitter.hpp"
 #include "gc/z/zWorkers.hpp"
 #include "jfr/jfrEvents.hpp"
+#include "logging/log.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
-static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
 
-class ZPageAllocRequest : public StackObj {
-  friend class ZList<ZPageAllocRequest>;
+enum ZPageAllocationStall {
+  ZPageAllocationStallSuccess,
+  ZPageAllocationStallFailed,
+  ZPageAllocationStallStartGC
+};
+
+class ZPageAllocation : public StackObj {
+  friend class ZList<ZPageAllocation>;
 
 private:
-  const uint8_t                _type;
-  const size_t                 _size;
-  const ZAllocationFlags       _flags;
-  const unsigned int           _total_collections;
-  ZListNode<ZPageAllocRequest> _node;
-  ZFuture<ZPage*>              _result;
+  const uint8_t                 _type;
+  const size_t                  _size;
+  const ZAllocationFlags        _flags;
+  const uint32_t                _seqnum;
+  size_t                        _flushed;
+  size_t                        _committed;
+  ZList<ZPage>                  _pages;
+  ZListNode<ZPageAllocation>    _node;
+  ZFuture<ZPageAllocationStall> _stall_result;
 
 public:
-  ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
+  ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
       _type(type),
       _size(size),
       _flags(flags),
-      _total_collections(total_collections),
+      _seqnum(ZGlobalSeqNum),
+      _flushed(0),
+      _committed(0),
+      _pages(),
       _node(),
-      _result() {}
+      _stall_result() {}
 
   uint8_t type() const {
     return _type;
@@ -80,48 +94,63 @@
     return _flags;
   }
 
-  unsigned int total_collections() const {
-    return _total_collections;
+  uint32_t seqnum() const {
+    return _seqnum;
   }
 
-  ZPage* peek() {
-    return _result.peek();
+  size_t flushed() const {
+    return _flushed;
   }
 
-  ZPage* wait() {
-    return _result.get();
+  void set_flushed(size_t flushed) {
+    _flushed = flushed;
   }
 
-  void satisfy(ZPage* page) {
-    _result.set(page);
+  size_t committed() const {
+    return _committed;
+  }
+
+  void set_committed(size_t committed) {
+    _committed = committed;
+  }
+
+  ZPageAllocationStall wait() {
+    return _stall_result.get();
+  }
+
+  ZList<ZPage>* pages() {
+    return &_pages;
+  }
+
+  void satisfy(ZPageAllocationStall result) {
+    _stall_result.set(result);
   }
 };
 
-ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
-
 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
                                size_t min_capacity,
                                size_t initial_capacity,
                                size_t max_capacity,
                                size_t max_reserve) :
     _lock(),
+    _cache(),
     _virtual(max_capacity),
     _physical(max_capacity),
-    _cache(),
     _min_capacity(min_capacity),
     _max_capacity(max_capacity),
     _max_reserve(max_reserve),
     _current_max_capacity(max_capacity),
     _capacity(0),
+    _claimed(0),
+    _used(0),
     _used_high(0),
     _used_low(0),
-    _used(0),
     _allocated(0),
     _reclaimed(0),
-    _queue(),
+    _stalled(),
     _satisfied(),
+    _uncommitter(new ZUncommitter(this)),
     _safe_delete(),
-    _uncommit(false),
     _initialized(false) {
 
   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
@@ -132,31 +161,25 @@
   log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
   log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
   log_info_p(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
+  if (ZPageSizeMedium > 0) {
+    log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
+  } else {
+    log_info_p(gc, init)("Medium Page Size: N/A");
+  }
   log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 
   // Warn if system limits could stop us from reaching max capacity
   _physical.warn_commit_limits(max_capacity);
 
-  // Commit initial capacity
-  _capacity = _physical.commit(initial_capacity);
-  if (_capacity != initial_capacity) {
+  // Check if uncommit should and can be enabled
+  _physical.try_enable_uncommit(min_capacity, max_capacity);
+
+  // Pre-map initial capacity
+  if (!prime_cache(workers, initial_capacity)) {
     log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
     return;
   }
 
-  // If uncommit is not explicitly disabled, max capacity is greater than
-  // min capacity, and uncommit is supported by the platform, then we will
-  // try to uncommit unused memory.
-  _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
-  if (_uncommit) {
-    log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
-  } else {
-    log_info(gc, init)("Uncommit: Disabled");
-  }
-
-  // Pre-map initial capacity
-  prime_cache(workers, initial_capacity);
-
   // Successfully initialized
   _initialized = true;
 }
@@ -190,21 +213,16 @@
   }
 };
 
-void ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
-  // Allocate physical memory
-  const ZPhysicalMemory pmem = _physical.alloc(size);
-  guarantee(!pmem.is_null(), "Invalid size");
+bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
+  ZAllocationFlags flags;
 
-  // Allocate virtual memory
-  const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
-  guarantee(!vmem.is_null(), "Invalid size");
+  flags.set_non_blocking();
+  flags.set_low_address();
 
-  // Allocate page
-  ZPage* const page = new ZPage(vmem, pmem);
-
-  // Map page
-  map_page(page);
-  page->set_pre_mapped();
+  ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
+  if (page == NULL) {
+    return false;
+  }
 
   if (AlwaysPreTouch) {
     // Pre-touch page
@@ -212,9 +230,9 @@
     workers->run_parallel(&task);
   }
 
-  // Add page to cache
-  page->set_last_used();
-  _cache.free_page(page);
+  free_page(page, false /* reclaimed */);
+
+  return true;
 }
 
 bool ZPageAllocator::is_initialized() const {
@@ -231,11 +249,13 @@
 
 size_t ZPageAllocator::soft_max_capacity() const {
   // Note that SoftMaxHeapSize is a manageable flag
-  return MIN2(SoftMaxHeapSize, _current_max_capacity);
+  const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
+  const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
+  return MIN2(soft_max_capacity, current_max_capacity);
 }
 
 size_t ZPageAllocator::capacity() const {
-  return _capacity;
+  return Atomic::load(&_capacity);
 }
 
 size_t ZPageAllocator::max_reserve() const {
@@ -251,11 +271,15 @@
 }
 
 size_t ZPageAllocator::used() const {
-  return _used;
+  return Atomic::load(&_used);
 }
 
 size_t ZPageAllocator::unused() const {
-  const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
+  const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
+  const ssize_t used = (ssize_t)Atomic::load(&_used);
+  const ssize_t claimed = (ssize_t)Atomic::load(&_claimed);
+  const ssize_t max_reserve = (ssize_t)_max_reserve;
+  const ssize_t unused = capacity - used - claimed - max_reserve;
   return unused > 0 ? (size_t)unused : 0;
 }
 
@@ -274,6 +298,40 @@
   _used_high = _used_low = _used;
 }
 
+size_t ZPageAllocator::increase_capacity(size_t size) {
+  const size_t increased = MIN2(size, _current_max_capacity - _capacity);
+
+  if (increased > 0) {
+    // Update atomically since we have concurrent readers
+    Atomic::add(&_capacity, increased);
+
+    // Record time of last commit. When allocation, we prefer increasing
+    // the capacity over flushing the cache. That means there could be
+    // expired pages in the cache at this time. However, since we are
+    // increasing the capacity we are obviously in need of committed
+    // memory and should therefore not be uncommitting memory.
+    _cache.set_last_commit();
+  }
+
+  return increased;
+}
+
+void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
+  // Update atomically since we have concurrent readers
+  Atomic::sub(&_capacity, size);
+
+  if (set_max_capacity) {
+    // Adjust current max capacity to avoid further attempts to increase capacity
+    log_error_p(gc)("Forced to lower max Java heap size from "
+                    SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
+                    _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
+                    _capacity / M, percent_of(_capacity, _max_capacity));
+
+    // Update atomically since we have concurrent readers
+    Atomic::store(&_current_max_capacity, _capacity);
+  }
+}
+
 void ZPageAllocator::increase_used(size_t size, bool relocation) {
   if (relocation) {
     // Allocating a page for the purpose of relocation has a
@@ -281,9 +339,11 @@
     _reclaimed -= size;
   }
   _allocated += size;
-  _used += size;
-  if (_used > _used_high) {
-    _used_high = _used;
+
+  // Update atomically since we have concurrent readers
+  const size_t used = Atomic::add(&_used, size);
+  if (used > _used_high) {
+    _used_high = used;
   }
 }
 
@@ -297,43 +357,26 @@
   } else {
     _allocated -= size;
   }
-  _used -= size;
-  if (_used < _used_low) {
-    _used_low = _used;
+
+  // Update atomically since we have concurrent readers
+  const size_t used = Atomic::sub(&_used, size);
+  if (used < _used_low) {
+    _used_low = used;
   }
 }
 
-ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
-  // Allocate virtual memory
-  const ZVirtualMemory vmem = _virtual.alloc(size);
-  if (vmem.is_null()) {
-    // Out of address space
-    return NULL;
+bool ZPageAllocator::commit_page(ZPage* page) {
+  // Commit physical memory
+  return _physical.commit(page->physical_memory());
+}
+
+void ZPageAllocator::uncommit_page(ZPage* page) {
+  if (!ZUncommit) {
+    return;
   }
 
-  // Allocate physical memory
-  const ZPhysicalMemory pmem = _physical.alloc(size);
-  assert(!pmem.is_null(), "Invalid size");
-
-  // Allocate page
-  return new ZPage(type, vmem, pmem);
-}
-
-void ZPageAllocator::destroy_page(ZPage* page) {
-  const ZVirtualMemory& vmem = page->virtual_memory();
-  const ZPhysicalMemory& pmem = page->physical_memory();
-
-  // Unmap memory
-  _physical.unmap(pmem, vmem.start());
-
-  // Free physical memory
-  _physical.free(pmem);
-
-  // Free virtual memory
-  _virtual.free(vmem);
-
-  // Delete page safely
-  _safe_delete(page);
+  // Uncommit physical memory
+  _physical.uncommit(page->physical_memory());
 }
 
 void ZPageAllocator::map_page(const ZPage* page) const {
@@ -341,192 +384,307 @@
   _physical.map(page->physical_memory(), page->start());
 }
 
-size_t ZPageAllocator::max_available(bool no_reserve) const {
-  size_t available = _current_max_capacity - _used;
+void ZPageAllocator::unmap_page(const ZPage* page) const {
+  // Unmap physical memory
+  _physical.unmap(page->physical_memory(), page->start());
+}
+
+void ZPageAllocator::destroy_page(ZPage* page) {
+  // Free virtual memory
+  _virtual.free(page->virtual_memory());
+
+  // Free physical memory
+  _physical.free(page->physical_memory());
+
+  // Delete page safely
+  _safe_delete(page);
+}
+
+bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
+  size_t available = _current_max_capacity - _used - _claimed;
 
   if (no_reserve) {
     // The reserve should not be considered available
     available -= MIN2(available, _max_reserve);
   }
 
-  return available;
+  return available >= size;
 }
 
-bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
-  if (max_available(no_reserve) < size) {
-    // Not enough free memory
+bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
+  size_t available = _capacity - _used - _claimed;
+
+  if (no_reserve) {
+    // The reserve should not be considered available
+    available -= MIN2(available, _max_reserve);
+  } else if (_capacity != _current_max_capacity) {
+    // Always increase capacity before using the reserve
     return false;
   }
 
-  // We add the max_reserve to the requested size to avoid losing
-  // the reserve because of failure to increase capacity before
-  // reaching max capacity.
-  size += _max_reserve;
+  return available >= size;
+}
 
-  // Don't try to increase capacity if enough unused capacity
-  // is available or if current max capacity has been reached.
-  const size_t available = _capacity - _used;
-  if (available < size && _capacity < _current_max_capacity) {
-    // Try to increase capacity
-    const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
-    const size_t committed = _physical.commit(commit);
-    _capacity += committed;
+bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages) {
+  if (!is_alloc_allowed(size, no_reserve)) {
+    // Out of memory
+    return false;
+  }
 
-    log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
-                        "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
-                        "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
-                        size / M, no_reserve ? "True" : "False", available / M,
-                        commit / M, committed / M, _capacity / M);
-
-    if (committed != commit) {
-      // Failed, or partly failed, to increase capacity. Adjust current
-      // max capacity to avoid further attempts to increase capacity.
-      log_error_p(gc)("Forced to lower max Java heap size from "
-                      SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
-                      _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
-                      _capacity / M, percent_of(_capacity, _max_capacity));
-
-      _current_max_capacity = _capacity;
+  // Try allocate from the page cache
+  if (is_alloc_allowed_from_cache(size, no_reserve)) {
+    ZPage* const page = _cache.alloc_page(type, size);
+    if (page != NULL) {
+      // Success
+      pages->insert_last(page);
+      return true;
     }
   }
 
-  if (!no_reserve) {
-    size -= _max_reserve;
+  // Try increase capacity
+  const size_t increased = increase_capacity(size);
+  if (increased < size) {
+    // Could not increase capacity enough to satisfy the allocation
+    // completely. Flush the page cache to satisfy the remainder.
+    const size_t remaining = size - increased;
+    _cache.flush_for_allocation(remaining, pages);
   }
 
-  const size_t new_available = _capacity - _used;
-  return new_available >= size;
+  // Success
+  return true;
 }
 
-void ZPageAllocator::ensure_uncached_available(size_t size) {
-  assert(_capacity - _used >= size, "Invalid size");
-  const size_t uncached_available = _capacity - _used - _cache.available();
-  if (size > uncached_available) {
-    flush_cache_for_allocation(size - uncached_available);
+bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
+  const uint8_t type = allocation->type();
+  const size_t size = allocation->size();
+  const ZAllocationFlags flags = allocation->flags();
+  ZList<ZPage>* const pages = allocation->pages();
+
+  // Try allocate without using the reserve
+  if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
+    // If allowed to, try allocate using the reserve
+    if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
+      // Out of memory
+      return false;
+    }
   }
+
+  // Updated used statistics
+  increase_used(size, flags.relocation());
+
+  // Success
+  return true;
 }
 
-ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
-  if (!ensure_available(size, no_reserve)) {
-    // Not enough free memory
-    return NULL;
-  }
-
-  // Try allocate page from the cache
-  ZPage* const page = _cache.alloc_page(type, size);
-  if (page != NULL) {
-    return page;
-  }
-
-  // Try flush pages from the cache
-  ensure_uncached_available(size);
-
-  // Create new page
-  return create_page(type, size);
-}
-
-ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
-  EventZPageAllocation event;
-
-  ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
-  if (page == NULL) {
-    // Out of memory
-    return NULL;
-  }
-
-  // Update used statistics
-  increase_used(size, flags.relocation());
-
-  // Send trace event
-  event.commit(type, size, _used, max_available(flags.no_reserve()),
-               _cache.available(), flags.non_blocking(), flags.no_reserve());
-
-  return page;
-}
-
-void ZPageAllocator::check_out_of_memory_during_initialization() {
+static void check_out_of_memory_during_initialization() {
   if (!is_init_completed()) {
     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
   }
 }
 
-ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
-  // Prepare to block
-  ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
+bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
+  ZStatTimer timer(ZCriticalPhaseAllocationStall);
+  EventZAllocationStall event;
+  ZPageAllocationStall result;
 
-  _lock.lock();
+  // We can only block if the VM is fully initialized
+  check_out_of_memory_during_initialization();
 
-  // Try non-blocking allocation
-  ZPage* page = alloc_page_common(type, size, flags);
-  if (page == NULL) {
-    // Allocation failed, enqueue request
-    _queue.insert_last(&request);
+  do {
+    // Start asynchronous GC
+    ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
+
+    // Wait for allocation to complete, fail or request a GC
+    result = allocation->wait();
+  } while (result == ZPageAllocationStallStartGC);
+
+  {
+    //
+    // We grab the lock here for two different reasons:
+    //
+    // 1) Guard deletion of underlying semaphore. This is a workaround for
+    // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
+    // the semaphore immediately after returning from sem_wait(). The
+    // reason is that sem_post() can touch the semaphore after a waiting
+    // thread have returned from sem_wait(). To avoid this race we are
+    // forcing the waiting thread to acquire/release the lock held by the
+    // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
+    //
+    // 2) Guard the list of satisfied pages.
+    //
+    ZLocker<ZLock> locker(&_lock);
+    _satisfied.remove(allocation);
   }
 
-  _lock.unlock();
+  // Send event
+  event.commit(allocation->type(), allocation->size());
 
-  if (page == NULL) {
-    // Allocation failed
-    ZStatTimer timer(ZCriticalPhaseAllocationStall);
-    EventZAllocationStall event;
+  return (result == ZPageAllocationStallSuccess);
+}
 
-    // We can only block if VM is fully initialized
-    check_out_of_memory_during_initialization();
+bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) {
+  {
+    ZLocker<ZLock> locker(&_lock);
 
-    do {
-      // Start asynchronous GC
-      ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
-
-      // Wait for allocation to complete or fail
-      page = request.wait();
-    } while (page == gc_marker);
-
-    {
-      //
-      // We grab the lock here for two different reasons:
-      //
-      // 1) Guard deletion of underlying semaphore. This is a workaround for
-      // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
-      // the semaphore immediately after returning from sem_wait(). The
-      // reason is that sem_post() can touch the semaphore after a waiting
-      // thread have returned from sem_wait(). To avoid this race we are
-      // forcing the waiting thread to acquire/release the lock held by the
-      // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
-      //
-      // 2) Guard the list of satisfied pages.
-      //
-      ZLocker<ZLock> locker(&_lock);
-      _satisfied.remove(&request);
+    if (alloc_page_common(allocation)) {
+      // Success
+      return true;
     }
 
-    event.commit(type, size);
+    // Failed
+    if (allocation->flags().non_blocking()) {
+      // Don't stall
+      return false;
+    }
+
+    // Enqueue allocation request
+    _stalled.insert_last(allocation);
   }
 
-  return page;
+  // Stall
+  return alloc_page_stall(allocation);
 }
 
-ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
+ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
+  const size_t size = allocation->size();
+
+  // Allocate virtual memory. To make error handling a lot more straight
+  // forward, we allocate virtual memory before destroying flushed pages.
+  const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
+  if (vmem.is_null()) {
+    log_error(gc)("Out of address space");
+    return NULL;
+  }
+
+  ZPhysicalMemory pmem;
+  size_t flushed = 0;
+
+  // Harvest physical memory from flushed pages
+  ZListRemoveIterator<ZPage> iter(allocation->pages());
+  for (ZPage* page; iter.next(&page);) {
+    flushed += page->size();
+
+    unmap_page(page);
+
+    // Harvest flushed physical memory
+    ZPhysicalMemory& fmem = page->physical_memory();
+    pmem.add_segments(fmem);
+    fmem.remove_segments();
+
+    destroy_page(page);
+  }
+
+  if (flushed > 0) {
+    allocation->set_flushed(flushed);
+
+    // Update statistics
+    ZStatInc(ZCounterPageCacheFlush, flushed);
+    log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
+  }
+
+  // Allocate any remaining physical memory. Capacity and used has
+  // already been adjusted, we just need to fetch the memory, which
+  // is guaranteed to succeed.
+  if (flushed < size) {
+    const size_t remaining = size - flushed;
+    allocation->set_committed(remaining);
+    _physical.alloc(pmem, remaining);
+  }
+
+  // Create new page
+  return new ZPage(allocation->type(), vmem, pmem);
+}
+
+static bool is_alloc_satisfied(ZPageAllocation* allocation) {
+  // The allocation is immediately satisfied if the list of pages contains
+  // exactly one page, with the type and size that was requested.
+  return allocation->pages()->size() == 1 &&
+         allocation->pages()->first()->type() == allocation->type() &&
+         allocation->pages()->first()->size() == allocation->size();
+}
+
+ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
+  // Fast path
+  if (is_alloc_satisfied(allocation)) {
+    return allocation->pages()->remove_first();
+  }
+
+  // Slow path
+  ZPage* const page = alloc_page_create(allocation);
+  if (page == NULL) {
+    // Out of address space
+    return NULL;
+  }
+
+  // Commit page
+  if (commit_page(page)) {
+    // Success
+    map_page(page);
+    return page;
+  }
+
+  // Failed or partially failed. Split of any successfully committed
+  // part of the page into a new page and insert it into list of pages,
+  // so that it will be re-inserted into the page cache.
+  ZPage* const committed_page = page->split_committed();
+  destroy_page(page);
+
+  if (committed_page != NULL) {
+    map_page(committed_page);
+    allocation->pages()->insert_last(committed_page);
+  }
+
+  return NULL;
+}
+
+void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
   ZLocker<ZLock> locker(&_lock);
-  return alloc_page_common(type, size, flags);
+
+  size_t freed = 0;
+
+  // Free any allocated/flushed pages
+  ZListRemoveIterator<ZPage> iter(allocation->pages());
+  for (ZPage* page; iter.next(&page);) {
+    freed += page->size();
+    free_page_inner(page, false /* reclaimed */);
+  }
+
+  // Adjust capacity and used to reflect the failed capacity increase
+  const size_t remaining = allocation->size() - freed;
+  decrease_used(remaining, false /* reclaimed */);
+  decrease_capacity(remaining, true /* set_max_capacity */);
+
+  // Try satisfy stalled allocations
+  satisfy_stalled();
 }
 
 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
-  ZPage* const page = flags.non_blocking()
-                      ? alloc_page_nonblocking(type, size, flags)
-                      : alloc_page_blocking(type, size, flags);
-  if (page == NULL) {
+  EventZPageAllocation event;
+
+retry:
+  ZPageAllocation allocation(type, size, flags);
+
+  // Allocate one or more pages from the page cache. If the allocation
+  // succeeds but the returned pages don't cover the complete allocation,
+  // then finalize phase is allowed to allocate the remaining memory
+  // directly from the physical memory manager. Note that this call might
+  // block in a safepoint if the non-blocking flag is not set.
+  if (!alloc_page_or_stall(&allocation)) {
     // Out of memory
     return NULL;
   }
 
-  // Map page if needed
-  if (!page->is_mapped()) {
-    map_page(page);
+  ZPage* const page = alloc_page_finalize(&allocation);
+  if (page == NULL) {
+    // Failed to commit or map. Clean up and retry, in the hope that
+    // we can still allocate by flushing the page cache (more aggressively).
+    alloc_page_failed(&allocation);
+    goto retry;
   }
 
   // Reset page. This updates the page's sequence number and must
-  // be done after page allocation, which potentially blocked in
-  // a safepoint where the global sequence number was updated.
+  // be done after we potentially blocked in a safepoint (stalled)
+  // where the global sequence number was updated.
   page->reset();
 
   // Update allocation statistics. Exclude worker threads to avoid
@@ -539,35 +697,36 @@
     ZStatInc(ZStatAllocRate::counter(), bytes);
   }
 
+  // Send event
+  event.commit(type, size, allocation.flushed(), allocation.committed(),
+               page->physical_memory().nsegments(), flags.non_blocking(), flags.no_reserve());
+
   return page;
 }
 
-void ZPageAllocator::satisfy_alloc_queue() {
+void ZPageAllocator::satisfy_stalled() {
   for (;;) {
-    ZPageAllocRequest* const request = _queue.first();
-    if (request == NULL) {
+    ZPageAllocation* const allocation = _stalled.first();
+    if (allocation == NULL) {
       // Allocation queue is empty
       return;
     }
 
-    ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
-    if (page == NULL) {
+    if (!alloc_page_common(allocation)) {
       // Allocation could not be satisfied, give up
       return;
     }
 
-    // Allocation succeeded, dequeue and satisfy request. Note that
-    // the dequeue operation must happen first, since the request
-    // will immediately be deallocated once it has been satisfied.
-    _queue.remove(request);
-    _satisfied.insert_first(request);
-    request->satisfy(page);
+    // Allocation succeeded, dequeue and satisfy allocation request.
+    // Note that we must dequeue the allocation request first, since
+    // it will immediately be deallocated once it has been satisfied.
+    _stalled.remove(allocation);
+    _satisfied.insert_last(allocation);
+    allocation->satisfy(ZPageAllocationStallSuccess);
   }
 }
 
-void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
-  ZLocker<ZLock> locker(&_lock);
-
+void ZPageAllocator::free_page_inner(ZPage* page, bool reclaimed) {
   // Update used statistics
   decrease_used(page->size(), reclaimed);
 
@@ -576,173 +735,72 @@
 
   // Cache page
   _cache.free_page(page);
-
-  // Try satisfy blocked allocations
-  satisfy_alloc_queue();
 }
 
-size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation) {
-  EventZPageCacheFlush event;
+void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
+  ZLocker<ZLock> locker(&_lock);
 
-  ZList<ZPage> list;
+  // Free page
+  free_page_inner(page, reclaimed);
 
-  // Flush pages
-  _cache.flush(cl, &list);
+  // Try satisfy stalled allocations
+  satisfy_stalled();
+}
 
-  const size_t overflushed = cl->overflushed();
-  if (overflushed > 0) {
-    // Overflushed, keep part of last page
-    ZPage* const page = list.last()->split(overflushed);
-    _cache.free_page(page);
+size_t ZPageAllocator::uncommit(uint64_t* timeout) {
+  // We need to join the suspendible thread set while manipulating capacity and
+  // used, to make sure GC safepoints will have a consistent view. However, when
+  // ZVerifyViews is enabled we need to join at a broader scope to also make sure
+  // we don't change the address good mask after pages have been flushed, and
+  // thereby made invisible to pages_do(), but before they have been unmapped.
+  SuspendibleThreadSetJoiner joiner(ZVerifyViews);
+  ZList<ZPage> pages;
+  size_t flushed;
+
+  {
+    SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
+    ZLocker<ZLock> locker(&_lock);
+
+    // Never uncommit the reserve, and never uncommit below min capacity. We flush
+    // out and uncommit chunks at a time (~0.8% of the max capacity, but at least
+    // one granule and at most 256M), in case demand for memory increases while we
+    // are uncommitting.
+    const size_t retain = clamp(_used + _max_reserve, _min_capacity, _capacity);
+    const size_t release = _capacity - retain;
+    const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
+    const size_t flush = MIN2(release, limit);
+
+    // Flush pages to uncommit
+    flushed = _cache.flush_for_uncommit(flush, &pages, timeout);
+    if (flushed == 0) {
+      // Nothing flushed
+      return 0;
+    }
+
+    // Record flushed pages as claimed
+    Atomic::add(&_claimed, flushed);
   }
 
-  // Destroy pages
-  size_t flushed = 0;
-  for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
-    flushed += page->size();
+  // Unmap, uncommit, and destroy flushed pages
+  ZListRemoveIterator<ZPage> iter(&pages);
+  for (ZPage* page; iter.next(&page);) {
+    unmap_page(page);
+    uncommit_page(page);
     destroy_page(page);
   }
 
-  // Send event
-  event.commit(flushed, for_allocation);
+  {
+    SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
+    ZLocker<ZLock> locker(&_lock);
+
+    // Adjust claimed and capacity to reflect the uncommit
+    Atomic::sub(&_claimed, flushed);
+    decrease_capacity(flushed, false /* set_max_capacity */);
+  }
 
   return flushed;
 }
 
-class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
-public:
-  ZPageCacheFlushForAllocationClosure(size_t requested) :
-      ZPageCacheFlushClosure(requested) {}
-
-  virtual bool do_page(const ZPage* page) {
-    if (_flushed < _requested) {
-      // Flush page
-      _flushed += page->size();
-      return true;
-    }
-
-    // Don't flush page
-    return false;
-  }
-};
-
-void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
-  assert(requested <= _cache.available(), "Invalid request");
-
-  // Flush pages
-  ZPageCacheFlushForAllocationClosure cl(requested);
-  const size_t flushed = flush_cache(&cl, true /* for_allocation */);
-
-  assert(requested == flushed, "Failed to flush");
-
-  const size_t cached_after = _cache.available();
-  const size_t cached_before = cached_after + flushed;
-
-  log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
-                     "Flushed: " SIZE_FORMAT "M",
-                     cached_before / M, percent_of(cached_before, max_capacity()),
-                     cached_after / M, percent_of(cached_after, max_capacity()),
-                     flushed / M);
-
-  // Update statistics
-  ZStatInc(ZCounterPageCacheFlush, flushed);
-}
-
-class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
-private:
-  const uint64_t _now;
-  const uint64_t _delay;
-  uint64_t       _timeout;
-
-public:
-  ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
-      ZPageCacheFlushClosure(requested),
-      _now(os::elapsedTime()),
-      _delay(delay),
-      _timeout(_delay) {}
-
-  virtual bool do_page(const ZPage* page) {
-    const uint64_t expires = page->last_used() + _delay;
-    const uint64_t timeout = expires - MIN2(expires, _now);
-
-    if (_flushed < _requested && timeout == 0) {
-      // Flush page
-      _flushed += page->size();
-      return true;
-    }
-
-    // Record shortest non-expired timeout
-    _timeout = MIN2(_timeout, timeout);
-
-    // Don't flush page
-    return false;
-  }
-
-  uint64_t timeout() const {
-    return _timeout;
-  }
-};
-
-uint64_t ZPageAllocator::uncommit(uint64_t delay) {
-  // Set the default timeout, when no pages are found in the
-  // cache or when uncommit is disabled, equal to the delay.
-  uint64_t timeout = delay;
-
-  if (!_uncommit) {
-    // Disabled
-    return timeout;
-  }
-
-  EventZUncommit event;
-  size_t capacity_before;
-  size_t capacity_after;
-  size_t uncommitted;
-
-  {
-    SuspendibleThreadSetJoiner joiner;
-    ZLocker<ZLock> locker(&_lock);
-
-    // Don't flush more than we will uncommit. Never uncommit
-    // the reserve, and never uncommit below min capacity.
-    const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
-    const size_t guarded = MAX2(needed, _min_capacity);
-    const size_t uncommittable = _capacity - guarded;
-    const size_t uncached_available = _capacity - _used - _cache.available();
-    size_t uncommit = MIN2(uncommittable, uncached_available);
-    const size_t flush = uncommittable - uncommit;
-
-    if (flush > 0) {
-      // Flush pages to uncommit
-      ZPageCacheFlushForUncommitClosure cl(flush, delay);
-      uncommit += flush_cache(&cl, false /* for_allocation */);
-      timeout = cl.timeout();
-    }
-
-    // Uncommit
-    uncommitted = _physical.uncommit(uncommit);
-    _capacity -= uncommitted;
-
-    capacity_after = _capacity;
-    capacity_before = capacity_after + uncommitted;
-  }
-
-  if (uncommitted > 0) {
-    log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
-                       "Uncommitted: " SIZE_FORMAT "M",
-                       capacity_before / M, percent_of(capacity_before, max_capacity()),
-                       capacity_after / M, percent_of(capacity_after, max_capacity()),
-                       uncommitted / M);
-
-    // Send event
-    event.commit(capacity_before, capacity_after, uncommitted);
-
-    // Update statistics
-    ZStatInc(ZCounterUncommit, uncommitted);
-  }
-
-  return timeout;
-}
-
 void ZPageAllocator::enable_deferred_delete() const {
   _safe_delete.enable_deferred_delete();
 }
@@ -762,10 +820,12 @@
 }
 
 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
-  ZListIterator<ZPageAllocRequest> iter(&_satisfied);
-  for (ZPageAllocRequest* request; iter.next(&request);) {
-    const ZPage* const page = request->peek();
-    if (page != NULL) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  ZListIterator<ZPageAllocation> iter(&_satisfied);
+  for (ZPageAllocation* allocation; iter.next(&allocation);) {
+    ZListIterator<ZPage> iter(allocation->pages());
+    for (ZPage* page; iter.next(&page);) {
       cl->do_page(page);
     }
   }
@@ -775,7 +835,7 @@
 
 bool ZPageAllocator::is_alloc_stalled() const {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
-  return !_queue.is_empty();
+  return !_stalled.is_empty();
 }
 
 void ZPageAllocator::check_out_of_memory() {
@@ -783,16 +843,20 @@
 
   // Fail allocation requests that were enqueued before the
   // last GC cycle started, otherwise start a new GC cycle.
-  for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
-    if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
+  for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
+    if (allocation->seqnum() == ZGlobalSeqNum) {
       // Start a new GC cycle, keep allocation requests enqueued
-      request->satisfy(gc_marker);
+      allocation->satisfy(ZPageAllocationStallStartGC);
       return;
     }
 
     // Out of memory, fail allocation request
-    _queue.remove(request);
-    _satisfied.insert_first(request);
-    request->satisfy(NULL);
+    _stalled.remove(allocation);
+    _satisfied.insert_last(allocation);
+    allocation->satisfy(ZPageAllocationStallFailed);
   }
 }
+
+void ZPageAllocator::threads_do(ThreadClosure* tc) const {
+  tc->do_thread(_uncommitter);
+}
--- a/src/hotspot/share/gc/z/zPageAllocator.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPageAllocator.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -31,60 +31,70 @@
 #include "gc/z/zPhysicalMemory.hpp"
 #include "gc/z/zSafeDelete.hpp"
 #include "gc/z/zVirtualMemory.hpp"
-#include "memory/allocation.hpp"
 
-class ZPageAllocRequest;
+class ThreadClosure;
+class ZPageAllocation;
 class ZWorkers;
+class ZUncommitter;
 
 class ZPageAllocator {
   friend class VMStructs;
+  friend class ZUncommitter;
 
 private:
   ZLock                      _lock;
+  ZPageCache                 _cache;
   ZVirtualMemoryManager      _virtual;
   ZPhysicalMemoryManager     _physical;
-  ZPageCache                 _cache;
   const size_t               _min_capacity;
   const size_t               _max_capacity;
   const size_t               _max_reserve;
-  size_t                     _current_max_capacity;
-  size_t                     _capacity;
+  volatile size_t            _current_max_capacity;
+  volatile size_t            _capacity;
+  volatile size_t            _claimed;
+  volatile size_t            _used;
   size_t                     _used_high;
   size_t                     _used_low;
-  size_t                     _used;
   size_t                     _allocated;
   ssize_t                    _reclaimed;
-  ZList<ZPageAllocRequest>   _queue;
-  ZList<ZPageAllocRequest>   _satisfied;
+  ZList<ZPageAllocation>     _stalled;
+  ZList<ZPageAllocation>     _satisfied;
+  ZUncommitter*              _uncommitter;
   mutable ZSafeDelete<ZPage> _safe_delete;
-  bool                       _uncommit;
   bool                       _initialized;
 
-  static ZPage* const gc_marker;
+  bool prime_cache(ZWorkers* workers, size_t size);
 
-  void prime_cache(ZWorkers* workers, size_t size);
+  size_t increase_capacity(size_t size);
+  void decrease_capacity(size_t size, bool set_max_capacity);
 
   void increase_used(size_t size, bool relocation);
   void decrease_used(size_t size, bool reclaimed);
 
-  ZPage* create_page(uint8_t type, size_t size);
+  bool commit_page(ZPage* page);
+  void uncommit_page(ZPage* page);
+
+  void map_page(const ZPage* page) const;
+  void unmap_page(const ZPage* page) const;
+
   void destroy_page(ZPage* page);
 
-  size_t max_available(bool no_reserve) const;
-  bool ensure_available(size_t size, bool no_reserve);
-  void ensure_uncached_available(size_t size);
+  bool is_alloc_allowed(size_t size, bool no_reserve) const;
+  bool is_alloc_allowed_from_cache(size_t size, bool no_reserve) const;
 
-  void check_out_of_memory_during_initialization();
+  bool alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages);
+  bool alloc_page_common(ZPageAllocation* allocation);
+  bool alloc_page_stall(ZPageAllocation* allocation);
+  bool alloc_page_or_stall(ZPageAllocation* allocation);
+  ZPage* alloc_page_create(ZPageAllocation* allocation);
+  ZPage* alloc_page_finalize(ZPageAllocation* allocation);
+  void alloc_page_failed(ZPageAllocation* allocation);
 
-  ZPage* alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve);
-  ZPage* alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags);
-  ZPage* alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags);
-  ZPage* alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags);
+  void satisfy_stalled();
 
-  size_t flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation);
-  void flush_cache_for_allocation(size_t requested);
+  void free_page_inner(ZPage* page, bool reclaimed);
 
-  void satisfy_alloc_queue();
+  size_t uncommit(uint64_t* timeout);
 
 public:
   ZPageAllocator(ZWorkers* workers,
@@ -112,13 +122,9 @@
   ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
   void free_page(ZPage* page, bool reclaimed);
 
-  uint64_t uncommit(uint64_t delay);
-
   void enable_deferred_delete() const;
   void disable_deferred_delete() const;
 
-  void map_page(const ZPage* page) const;
-
   void debug_map_page(const ZPage* page) const;
   void debug_unmap_page(const ZPage* page) const;
 
@@ -126,6 +132,8 @@
   void check_out_of_memory();
 
   void pages_do(ZPageClosure* cl) const;
+
+  void threads_do(ThreadClosure* tc) const;
 };
 
 #endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
--- a/src/hotspot/share/gc/z/zPageCache.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPageCache.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
 #include "gc/z/zList.inline.hpp"
 #include "gc/z/zNUMA.hpp"
 #include "gc/z/zPage.inline.hpp"
@@ -29,25 +30,36 @@
 #include "gc/z/zStat.hpp"
 #include "gc/z/zValue.inline.hpp"
 #include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
 
 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
 static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
 static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
 static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
 
+class ZPageCacheFlushClosure : public StackObj {
+  friend class ZPageCache;
+
+protected:
+  const size_t _requested;
+  size_t       _flushed;
+
+public:
+  ZPageCacheFlushClosure(size_t requested);
+  virtual bool do_page(const ZPage* page) = 0;
+};
+
 ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
     _requested(requested),
     _flushed(0) {}
 
-size_t ZPageCacheFlushClosure::overflushed() const {
-  return _flushed > _requested ? _flushed - _requested : 0;
-}
-
 ZPageCache::ZPageCache() :
-    _available(0),
     _small(),
     _medium(),
-    _large() {}
+    _large(),
+    _last_commit(0) {}
 
 ZPage* ZPageCache::alloc_small_page() {
   const uint32_t numa_id = ZNUMA::id();
@@ -161,7 +173,7 @@
         page = oversized->split(type, size);
 
         // Cache remainder
-        free_page_inner(oversized);
+        free_page(oversized);
       } else {
         // Re-type correctly sized page
         page = oversized->retype(type);
@@ -169,16 +181,14 @@
     }
   }
 
-  if (page != NULL) {
-    _available -= page->size();
-  } else {
+  if (page == NULL) {
     ZStatInc(ZCounterPageCacheMiss);
   }
 
   return page;
 }
 
-void ZPageCache::free_page_inner(ZPage* page) {
+void ZPageCache::free_page(ZPage* page) {
   const uint8_t type = page->type();
   if (type == ZPageTypeSmall) {
     _small.get(page->numa_id()).insert_first(page);
@@ -189,11 +199,6 @@
   }
 }
 
-void ZPageCache::free_page(ZPage* page) {
-  free_page_inner(page);
-  _available += page->size();
-}
-
 bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
   ZPage* const page = from->last();
   if (page == NULL || !cl->do_page(page)) {
@@ -202,7 +207,6 @@
   }
 
   // Flush page
-  _available -= page->size();
   from->remove(page);
   to->insert_last(page);
   return true;
@@ -239,6 +243,94 @@
   flush_list(cl, &_large, to);
   flush_list(cl, &_medium, to);
   flush_per_numa_lists(cl, &_small, to);
+
+  if (cl->_flushed > cl->_requested) {
+    // Overflushed, re-insert part of last page into the cache
+    const size_t overflushed = cl->_flushed - cl->_requested;
+    ZPage* const reinsert = to->last()->split(overflushed);
+    free_page(reinsert);
+    cl->_flushed -= overflushed;
+  }
+}
+
+class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
+public:
+  ZPageCacheFlushForAllocationClosure(size_t requested) :
+      ZPageCacheFlushClosure(requested) {}
+
+  virtual bool do_page(const ZPage* page) {
+    if (_flushed < _requested) {
+      // Flush page
+      _flushed += page->size();
+      return true;
+    }
+
+    // Don't flush page
+    return false;
+  }
+};
+
+void ZPageCache::flush_for_allocation(size_t requested, ZList<ZPage>* to) {
+  ZPageCacheFlushForAllocationClosure cl(requested);
+  flush(&cl, to);
+}
+
+class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
+private:
+  const uint64_t _now;
+  uint64_t*      _timeout;
+
+public:
+  ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) :
+      ZPageCacheFlushClosure(requested),
+      _now(now),
+      _timeout(timeout) {
+    // Set initial timeout
+    *_timeout = ZUncommitDelay;
+  }
+
+  virtual bool do_page(const ZPage* page) {
+    const uint64_t expires = page->last_used() + ZUncommitDelay;
+    if (expires > _now) {
+      // Don't flush page, record shortest non-expired timeout
+      *_timeout = MIN2(*_timeout, expires - _now);
+      return false;
+    }
+
+    if (_flushed >= _requested) {
+      // Don't flush page, requested amount flushed
+      return false;
+    }
+
+    // Flush page
+    _flushed += page->size();
+    return true;
+  }
+};
+
+size_t ZPageCache::flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout) {
+  const uint64_t now = os::elapsedTime();
+  const uint64_t expires = _last_commit + ZUncommitDelay;
+  if (expires > now) {
+    // Delay uncommit, set next timeout
+    *timeout = expires - now;
+    return 0;
+  }
+
+  if (requested == 0) {
+    // Nothing to flush, set next timeout
+    *timeout = ZUncommitDelay;
+    return 0;
+  }
+
+  ZPageCacheFlushForUncommitClosure cl(requested, now, timeout);
+  flush(&cl, to);
+
+  return cl._flushed;
+}
+
+void ZPageCache::set_last_commit() {
+  _last_commit = os::elapsedTime();
 }
 
 void ZPageCache::pages_do(ZPageClosure* cl) const {
--- a/src/hotspot/share/gc/z/zPageCache.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPageCache.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,25 +27,15 @@
 #include "gc/z/zList.hpp"
 #include "gc/z/zPage.hpp"
 #include "gc/z/zValue.hpp"
-#include "memory/allocation.hpp"
 
-class ZPageCacheFlushClosure : public StackObj {
-protected:
-  const size_t _requested;
-  size_t       _flushed;
-
-public:
-  ZPageCacheFlushClosure(size_t requested);
-  size_t overflushed() const;
-  virtual bool do_page(const ZPage* page) = 0;
-};
+class ZPageCacheFlushClosure;
 
 class ZPageCache {
 private:
-  size_t                  _available;
   ZPerNUMA<ZList<ZPage> > _small;
   ZList<ZPage>            _medium;
   ZList<ZPage>            _large;
+  uint64_t                _last_commit;
 
   ZPage* alloc_small_page();
   ZPage* alloc_medium_page();
@@ -55,21 +45,21 @@
   ZPage* alloc_oversized_large_page(size_t size);
   ZPage* alloc_oversized_page(size_t size);
 
-  void free_page_inner(ZPage* page);
-
   bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
   void flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
   void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to);
+  void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
 
 public:
   ZPageCache();
 
-  size_t available() const;
-
   ZPage* alloc_page(uint8_t type, size_t size);
   void free_page(ZPage* page);
 
-  void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
+  void flush_for_allocation(size_t requested, ZList<ZPage>* to);
+  size_t flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout);
+
+  void set_last_commit();
 
   void pages_do(ZPageClosure* cl) const;
 };
--- a/src/hotspot/share/gc/z/zPageCache.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
-#define SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
-
-#include "gc/z/zList.inline.hpp"
-#include "gc/z/zPageCache.hpp"
-#include "gc/z/zValue.inline.hpp"
-
-inline size_t ZPageCache::available() const {
-  return _available;
-}
-
-#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
--- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -27,92 +27,212 @@
 #include "gc/z/zLargePages.inline.hpp"
 #include "gc/z/zNUMA.inline.hpp"
 #include "gc/z/zPhysicalMemory.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
 #include "runtime/init.hpp"
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/powerOfTwo.hpp"
 
 ZPhysicalMemory::ZPhysicalMemory() :
+    _nsegments_max(0),
     _nsegments(0),
     _segments(NULL) {}
 
 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
+    _nsegments_max(0),
     _nsegments(0),
     _segments(NULL) {
   add_segment(segment);
 }
 
 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
+    _nsegments_max(0),
     _nsegments(0),
     _segments(NULL) {
-
-  // Copy segments
-  for (size_t i = 0; i < pmem.nsegments(); i++) {
-    add_segment(pmem.segment(i));
-  }
+  add_segments(pmem);
 }
 
 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
-  // Free segments
-  delete [] _segments;
-  _segments = NULL;
-  _nsegments = 0;
-
-  // Copy segments
-  for (size_t i = 0; i < pmem.nsegments(); i++) {
-    add_segment(pmem.segment(i));
-  }
-
+  remove_segments();
+  add_segments(pmem);
   return *this;
 }
 
 ZPhysicalMemory::~ZPhysicalMemory() {
-  delete [] _segments;
-  _segments = NULL;
-  _nsegments = 0;
+  remove_segments();
 }
 
 size_t ZPhysicalMemory::size() const {
   size_t size = 0;
 
-  for (size_t i = 0; i < _nsegments; i++) {
+  for (uint32_t i = 0; i < _nsegments; i++) {
     size += _segments[i].size();
   }
 
   return size;
 }
 
+void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
+  assert(index <= _nsegments, "Invalid index");
+
+  ZPhysicalMemorySegment* const from_segments = _segments;
+
+  if (_nsegments + 1 > _nsegments_max) {
+    // Resize array
+    _nsegments_max = round_up_power_of_2(_nsegments_max + 1);
+    _segments = new ZPhysicalMemorySegment[_nsegments_max];
+
+    // Copy segments before index
+    for (uint32_t i = 0; i < index; i++) {
+      _segments[i] = from_segments[i];
+    }
+  }
+
+  // Copy/Move segments after index
+  for (uint32_t i = _nsegments; i > index; i--) {
+    _segments[i] = from_segments[i - 1];
+  }
+
+  // Insert new segment
+  _segments[index] = ZPhysicalMemorySegment(start, size, committed);
+  _nsegments++;
+
+  // Delete old array
+  if (from_segments != _segments) {
+    delete [] from_segments;
+  }
+}
+
+void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
+  assert(index < _nsegments, "Invalid index");
+  _segments[index] = ZPhysicalMemorySegment(start, size, committed);;
+}
+
+void ZPhysicalMemory::remove_segment(uint32_t index) {
+  assert(index < _nsegments, "Invalid index");
+
+  // Move segments after index
+  for (uint32_t i = index + 1; i < _nsegments; i++) {
+    _segments[i - 1] = _segments[i];
+  }
+
+  _nsegments--;
+}
+
+void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
+  for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+    add_segment(pmem.segment(i));
+  }
+}
+
+void ZPhysicalMemory::remove_segments() {
+  delete [] _segments;
+  _segments = NULL;
+  _nsegments_max = 0;
+  _nsegments = 0;
+}
+
+static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
+  return before.end() == after.start() && before.is_committed() == after.is_committed();
+}
+
 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
-  // Try merge with last segment
-  if (_nsegments > 0) {
-    ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
-    assert(last.end() <= segment.start(), "Segments added out of order");
-    if (last.end() == segment.start()) {
-      last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size());
+  // Insert segments in address order, merge segments when possible
+  for (uint32_t i = _nsegments; i > 0; i--) {
+    const uint32_t current = i - 1;
+
+    if (_segments[current].end() <= segment.start()) {
+      if (is_mergable(_segments[current], segment)) {
+        if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
+          // Merge with end of current segment and start of next segment
+          const size_t start = _segments[current].start();
+          const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size();
+          replace_segment(current, start, size, segment.is_committed());
+          remove_segment(current + 1);
+          return;
+        }
+
+        // Merge with end of current segment
+        const size_t start = _segments[current].start();
+        const size_t size = _segments[current].size() + segment.size();
+        replace_segment(current, start, size, segment.is_committed());
+        return;
+      } else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
+        // Merge with start of next segment
+        const size_t start = segment.start();
+        const size_t size = segment.size() + _segments[current + 1].size();
+        replace_segment(current + 1, start, size, segment.is_committed());
+        return;
+      }
+
+      // Insert after current segment
+      insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
       return;
     }
   }
 
-  // Resize array
-  ZPhysicalMemorySegment* const old_segments = _segments;
-  _segments = new ZPhysicalMemorySegment[_nsegments + 1];
-  for (size_t i = 0; i < _nsegments; i++) {
-    _segments[i] = old_segments[i];
+  if (_nsegments > 0 && is_mergable(segment, _segments[0])) {
+    // Merge with start of first segment
+    const size_t start = segment.start();
+    const size_t size = segment.size() + _segments[0].size();
+    replace_segment(0, start, size, segment.is_committed());
+    return;
   }
-  delete [] old_segments;
 
-  // Add new segment
-  _segments[_nsegments] = segment;
-  _nsegments++;
+  // Insert before first segment
+  insert_segment(0, segment.start(), segment.size(), segment.is_committed());
+}
+
+bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) {
+  assert(index < _nsegments, "Invalid index");
+  assert(size <= _segments[index].size(), "Invalid size");
+  assert(!_segments[index].is_committed(), "Invalid state");
+
+  if (size == _segments[index].size()) {
+    // Completely committed
+    _segments[index].set_committed(true);
+    return true;
+  }
+
+  if (size > 0) {
+    // Partially committed, split segment
+    insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */);
+    replace_segment(index, _segments[index].start(), size, true /* committed */);
+  }
+
+  return false;
+}
+
+bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
+  assert(index < _nsegments, "Invalid index");
+  assert(size <= _segments[index].size(), "Invalid size");
+  assert(_segments[index].is_committed(), "Invalid state");
+
+  if (size == _segments[index].size()) {
+    // Completely uncommitted
+    _segments[index].set_committed(false);
+    return true;
+  }
+
+  if (size > 0) {
+    // Partially uncommitted, split segment
+    insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */);
+    replace_segment(index, _segments[index].start(), size, false /* committed */);
+  }
+
+  return false;
 }
 
 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
   ZPhysicalMemory pmem;
-  size_t nsegments = 0;
+  uint32_t nsegments = 0;
 
-  for (size_t i = 0; i < _nsegments; i++) {
+  for (uint32_t i = 0; i < _nsegments; i++) {
     const ZPhysicalMemorySegment& segment = _segments[i];
     if (pmem.size() < size) {
       if (pmem.size() + segment.size() <= size) {
@@ -121,8 +241,8 @@
       } else {
         // Split segment
         const size_t split_size = size - pmem.size();
-        pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size));
-        _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size);
+        pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
+        _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed());
       }
     } else {
       // Keep segment
@@ -135,25 +255,68 @@
   return pmem;
 }
 
+ZPhysicalMemory ZPhysicalMemory::split_committed() {
+  ZPhysicalMemory pmem;
+  uint32_t nsegments = 0;
+
+  for (uint32_t i = 0; i < _nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = _segments[i];
+    if (segment.is_committed()) {
+      // Transfer segment
+      pmem.add_segment(segment);
+    } else {
+      // Keep segment
+      _segments[nsegments++] = segment;
+    }
+  }
+
+  _nsegments = nsegments;
+
+  return pmem;
+}
+
 ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
     _backing(max_capacity) {
-  // Register everything as uncommitted
-  _uncommitted.free(0, max_capacity);
+  // Make the whole range free
+  _manager.free(0, max_capacity);
 }
 
 bool ZPhysicalMemoryManager::is_initialized() const {
   return _backing.is_initialized();
 }
 
-void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
-  _backing.warn_commit_limits(max);
+void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
+  _backing.warn_commit_limits(max_capacity);
 }
 
-bool ZPhysicalMemoryManager::supports_uncommit() {
+void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
   assert(!is_init_completed(), "Invalid state");
 
-  // Test if uncommit is supported by uncommitting and then re-committing a granule
-  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+  // If uncommit is not explicitly disabled, max capacity is greater than
+  // min capacity, and uncommit is supported by the platform, then uncommit
+  // will be enabled.
+  if (!ZUncommit) {
+    log_info(gc, init)("Uncommit: Disabled");
+    return;
+  }
+
+  if (max_capacity == min_capacity) {
+    log_info(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
+    FLAG_SET_ERGO(ZUncommit, false);
+    return;
+  }
+
+  // Test if uncommit is supported by the operating system by committing
+  // and then uncommitting a granule.
+  ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
+  if (!commit(pmem) || !uncommit(pmem)) {
+    log_info(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
+    FLAG_SET_ERGO(ZUncommit, false);
+    return;
+  }
+
+  log_info(gc, init)("Uncommit: Enabled");
+  log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 }
 
 void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
@@ -172,86 +335,67 @@
   }
 }
 
-size_t ZPhysicalMemoryManager::commit(size_t size) {
-  size_t committed = 0;
+void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Invalid size");
 
-  // Fill holes in the backing memory
-  while (committed < size) {
+  // Allocate segments
+  while (size > 0) {
     size_t allocated = 0;
-    const size_t remaining = size - committed;
-    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
-    if (start == UINTPTR_MAX) {
-      // No holes to commit
-      break;
+    const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
+    size -= allocated;
+  }
+}
+
+void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
+  // Free segments
+  for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    _manager.free(segment.start(), segment.size());
+  }
+}
+
+bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
+  // Commit segments
+  for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    if (segment.is_committed()) {
+      // Segment already committed
+      continue;
     }
 
-    // Try commit hole
-    const size_t filled = _backing.commit(start, allocated);
-    if (filled > 0) {
-      // Successful or partialy successful
-      _committed.free(start, filled);
-      committed += filled;
-    }
-    if (filled < allocated) {
-      // Failed or partialy failed
-      _uncommitted.free(start + filled, allocated - filled);
-      return committed;
+    // Commit segment
+    const size_t committed = _backing.commit(segment.start(), segment.size());
+    if (!pmem.commit_segment(i, committed)) {
+      // Failed or partially failed
+      return false;
     }
   }
 
-  return committed;
+  // Success
+  return true;
 }
 
-size_t ZPhysicalMemoryManager::uncommit(size_t size) {
-  size_t uncommitted = 0;
+bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
+  // Commit segments
+  for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    if (!segment.is_committed()) {
+      // Segment already uncommitted
+      continue;
+    }
 
-  // Punch holes in backing memory
-  while (uncommitted < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - uncommitted;
-    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
-
-    // Try punch hole
-    const size_t punched = _backing.uncommit(start, allocated);
-    if (punched > 0) {
-      // Successful or partialy successful
-      _uncommitted.free(start, punched);
-      uncommitted += punched;
-    }
-    if (punched < allocated) {
-      // Failed or partialy failed
-      _committed.free(start + punched, allocated - punched);
-      return uncommitted;
+    // Uncommit segment
+    const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
+    if (!pmem.uncommit_segment(i, uncommitted)) {
+      // Failed or partially failed
+      return false;
     }
   }
 
-  return uncommitted;
-}
-
-ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
-  assert(is_aligned(size, ZGranuleSize), "Invalid size");
-
-  ZPhysicalMemory pmem;
-
-  // Allocate segments
-  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
-    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
-    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
-  }
-
-  return pmem;
-}
-
-void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
-  const size_t nsegments = pmem.nsegments();
-
-  // Free segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    _committed.free(segment.start(), segment.size());
-  }
+  // Success
+  return true;
 }
 
 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
@@ -260,11 +404,10 @@
 }
 
 void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
-  const size_t nsegments = pmem.nsegments();
   size_t size = 0;
 
   // Map segments
-  for (size_t i = 0; i < nsegments; i++) {
+  for (uint32_t i = 0; i < pmem.nsegments(); i++) {
     const ZPhysicalMemorySegment& segment = pmem.segment(i);
     _backing.map(addr + size, segment.size(), segment.start());
     size += segment.size();
--- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -32,21 +32,30 @@
 private:
   uintptr_t _start;
   uintptr_t _end;
+  bool      _committed;
 
 public:
   ZPhysicalMemorySegment();
-  ZPhysicalMemorySegment(uintptr_t start, size_t size);
+  ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed);
 
   uintptr_t start() const;
   uintptr_t end() const;
   size_t size() const;
+
+  bool is_committed() const;
+  void set_committed(bool committed);
 };
 
 class ZPhysicalMemory {
 private:
-  size_t                  _nsegments;
+  uint32_t                _nsegments_max;
+  uint32_t                _nsegments;
   ZPhysicalMemorySegment* _segments;
 
+  void insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
+  void replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
+  void remove_segment(uint32_t index);
+
 public:
   ZPhysicalMemory();
   ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
@@ -57,18 +66,24 @@
   bool is_null() const;
   size_t size() const;
 
-  size_t nsegments() const;
-  const ZPhysicalMemorySegment& segment(size_t index) const;
+  uint32_t nsegments() const;
+  const ZPhysicalMemorySegment& segment(uint32_t index) const;
+
+  void add_segments(const ZPhysicalMemory& pmem);
+  void remove_segments();
+
   void add_segment(const ZPhysicalMemorySegment& segment);
+  bool commit_segment(uint32_t index, size_t size);
+  bool uncommit_segment(uint32_t index, size_t size);
 
   ZPhysicalMemory split(size_t size);
+  ZPhysicalMemory split_committed();
 };
 
 class ZPhysicalMemoryManager {
 private:
   ZPhysicalMemoryBacking _backing;
-  ZMemoryManager         _committed;
-  ZMemoryManager         _uncommitted;
+  ZMemoryManager         _manager;
 
   void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
   void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
@@ -82,14 +97,14 @@
 
   bool is_initialized() const;
 
-  void warn_commit_limits(size_t max) const;
-  bool supports_uncommit();
+  void warn_commit_limits(size_t max_capacity) const;
+  void try_enable_uncommit(size_t min_capacity, size_t max_capacity);
 
-  size_t commit(size_t size);
-  size_t uncommit(size_t size);
+  void alloc(ZPhysicalMemory& pmem, size_t size);
+  void free(const ZPhysicalMemory& pmem);
 
-  ZPhysicalMemory alloc(size_t size);
-  void free(const ZPhysicalMemory& pmem);
+  bool commit(ZPhysicalMemory& pmem);
+  bool uncommit(ZPhysicalMemory& pmem);
 
   void pretouch(uintptr_t offset, size_t size) const;
 
--- a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,11 +29,13 @@
 
 inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() :
     _start(UINTPTR_MAX),
-    _end(UINTPTR_MAX) {}
+    _end(UINTPTR_MAX),
+    _committed(false) {}
 
-inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size) :
+inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) :
     _start(start),
-    _end(start + size) {}
+    _end(start + size),
+    _committed(committed) {}
 
 inline uintptr_t ZPhysicalMemorySegment::start() const {
   return _start;
@@ -47,15 +49,23 @@
   return _end - _start;
 }
 
+inline bool ZPhysicalMemorySegment::is_committed() const {
+  return _committed;
+}
+
+inline void ZPhysicalMemorySegment::set_committed(bool committed) {
+  _committed = committed;
+}
+
 inline bool ZPhysicalMemory::is_null() const {
   return _nsegments == 0;
 }
 
-inline size_t ZPhysicalMemory::nsegments() const {
+inline uint32_t ZPhysicalMemory::nsegments() const {
   return _nsegments;
 }
 
-inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(size_t index) const {
+inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(uint32_t index) const {
   assert(index < _nsegments, "Invalid segment index");
   return _segments[index];
 }
--- a/src/hotspot/share/gc/z/zUncommitter.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zUncommitter.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,52 +23,73 @@
 
 #include "precompiled.hpp"
 #include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zStat.hpp"
 #include "gc/z/zUncommitter.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "logging/log.hpp"
 
-ZUncommitter::ZUncommitter() :
-    _monitor(Monitor::leaf, "ZUncommitter", false, Monitor::_safepoint_check_never),
+static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
+
+ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator) :
+    _page_allocator(page_allocator),
+    _lock(),
     _stop(false) {
   set_name("ZUncommitter");
   create_and_start();
 }
 
-bool ZUncommitter::idle(uint64_t timeout) {
-  // Idle for at least one second
-  const uint64_t expires = os::elapsedTime() + MAX2<uint64_t>(timeout, 1);
+bool ZUncommitter::wait(uint64_t timeout) const {
+  ZLocker<ZConditionLock> locker(&_lock);
+  while (!ZUncommit && !_stop) {
+    _lock.wait();
+  }
 
-  for (;;) {
-    // We might wake up spuriously from wait, so always recalculate
-    // the timeout after a wakeup to see if we need to wait again.
-    const uint64_t now = os::elapsedTime();
-    const uint64_t remaining = expires - MIN2(expires, now);
+  if (!_stop && timeout > 0) {
+    log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
+    _lock.wait(timeout * MILLIUNITS);
+  }
 
-    MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
-    if (remaining > 0 && !_stop) {
-      ml.wait(remaining * MILLIUNITS);
-    } else {
-      return !_stop;
-    }
-  }
+  return !_stop;
+}
+
+bool ZUncommitter::should_continue() const {
+  ZLocker<ZConditionLock> locker(&_lock);
+  return !_stop;
 }
 
 void ZUncommitter::run_service() {
-  for (;;) {
-    // Try uncommit unused memory
-    const uint64_t timeout = ZHeap::heap()->uncommit(ZUncommitDelay);
+  uint64_t timeout = 0;
 
-    log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
+  while (wait(timeout)) {
+    EventZUncommit event;
+    size_t uncommitted = 0;
 
-    // Idle until next attempt
-    if (!idle(timeout)) {
-      return;
+    while (should_continue()) {
+      // Uncommit chunk
+      const size_t flushed = _page_allocator->uncommit(&timeout);
+      if (flushed == 0) {
+        // Done
+        break;
+      }
+
+      uncommitted += flushed;
+    }
+
+    if (uncommitted > 0) {
+      // Update statistics
+      ZStatInc(ZCounterUncommit, uncommitted);
+      log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)",
+                         uncommitted / M, percent_of(uncommitted, ZHeap::heap()->max_capacity()));
+
+      // Send event
+      event.commit(uncommitted);
     }
   }
 }
 
 void ZUncommitter::stop_service() {
-  MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
+  ZLocker<ZConditionLock> locker(&_lock);
   _stop = true;
-  ml.notify();
+  _lock.notify_all();
 }
--- a/src/hotspot/share/gc/z/zUncommitter.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zUncommitter.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,22 +24,26 @@
 #ifndef SHARE_GC_Z_ZUNCOMMITTER_HPP
 #define SHARE_GC_Z_ZUNCOMMITTER_HPP
 
+#include "gc/z/zLock.hpp"
 #include "gc/shared/concurrentGCThread.hpp"
-#include "runtime/mutex.hpp"
+
+class ZPageAllocation;
 
 class ZUncommitter : public ConcurrentGCThread {
 private:
-  Monitor _monitor;
-  bool    _stop;
+  ZPageAllocator* const  _page_allocator;
+  mutable ZConditionLock _lock;
+  bool                   _stop;
 
-  bool idle(uint64_t timeout);
+  bool wait(uint64_t timeout) const;
+  bool should_continue() const;
 
 protected:
   virtual void run_service();
   virtual void stop_service();
 
 public:
-  ZUncommitter();
+  ZUncommitter(ZPageAllocator* page_allocator);
 };
 
 #endif // SHARE_GC_Z_ZUNCOMMITTER_HPP
--- a/src/hotspot/share/gc/z/zVirtualMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,14 +151,14 @@
   return _initialized;
 }
 
-ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool alloc_from_front) {
+ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) {
   uintptr_t start;
 
-  if (alloc_from_front || size <= ZPageSizeSmall) {
-    // Small page
+  // Small pages are allocated at low addresses, while medium/large pages
+  // are allocated at high addresses (unless forced to be at a low address).
+  if (force_low_address || size <= ZPageSizeSmall) {
     start = _manager.alloc_from_front(size);
   } else {
-    // Medium/Large page
     start = _manager.alloc_from_back(size);
   }
 
--- a/src/hotspot/share/gc/z/zVirtualMemory.hpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
 
   bool is_initialized() const;
 
-  ZVirtualMemory alloc(size_t size, bool alloc_from_front = false);
+  ZVirtualMemory alloc(size_t size, bool force_low_address);
   void free(const ZVirtualMemory& vmem);
 };
 
--- a/src/hotspot/share/jfr/metadata/metadata.xml	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/hotspot/share/jfr/metadata/metadata.xml	Tue Jun 09 11:01:09 2020 +0200
@@ -1002,18 +1002,13 @@
   <Event name="ZPageAllocation" category="Java Virtual Machine, GC, Detailed" label="ZGC Page Allocation" description="Allocation of a ZPage" thread="true" stackTrace="true">
      <Field type="ZPageTypeType" name="type" label="Type" />
      <Field type="ulong" contentType="bytes" name="size" label="Size" />
-     <Field type="ulong" contentType="bytes" name="usedAfter" label="Used After" />
-     <Field type="ulong" contentType="bytes" name="freeAfter" label="Free After" />
-     <Field type="ulong" contentType="bytes" name="inCacheAfter" label="In Cache After" />
+     <Field type="ulong" contentType="bytes" name="flushed" label="Flushed" />
+     <Field type="ulong" contentType="bytes" name="committed" label="Committed" />
+     <Field type="uint" name="segments" label="Segments" />
      <Field type="boolean" name="nonBlocking" label="Non-blocking" />
      <Field type="boolean" name="noReserve" label="No Reserve" />
   </Event>
 
-  <Event name="ZPageCacheFlush" category="Java Virtual Machine, GC, Detailed" label="ZGC Page Cache Flush" description="Flushing of ZPages" thread="true" stackTrace="true">
-     <Field type="ulong" contentType="bytes" name="flushed" label="Flushed Size" />
-     <Field type="boolean" name="forAllocation" label="For Allocation" />
-  </Event>
-
   <Event name="ZRelocationSet" category="Java Virtual Machine, GC, Detailed" label="ZGC Relocation Set" thread="true">
     <Field type="ulong" contentType="bytes" name="total" label="Total" />
     <Field type="ulong" contentType="bytes" name="empty" label="Empty" />
@@ -1047,8 +1042,6 @@
   </Event>
 
   <Event name="ZUncommit" category="Java Virtual Machine, GC, Detailed" label="ZGC Uncommit" description="Uncommitting of memory" thread="true">
-    <Field type="ulong" contentType="bytes" name="capacityBefore" label="Capacity Before" />
-    <Field type="ulong" contentType="bytes" name="capacityAfter" label="Capacity After" />
     <Field type="ulong" contentType="bytes" name="uncommitted" label="Uncommitted" />
   </Event>
 
--- a/src/jdk.jfr/share/conf/jfr/default.jfc	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/jdk.jfr/share/conf/jfr/default.jfc	Tue Jun 09 11:01:09 2020 +0200
@@ -721,12 +721,6 @@
       <setting name="threshold">1 ms</setting>
     </event>
 
-    <event name="jdk.ZPageCacheFlush">
-      <setting name="enabled">true</setting>
-      <setting name="stackTrace">true</setting>
-      <setting name="threshold">0 ms</setting>
-    </event>
-
     <event name="jdk.ZRelocationSet">
       <setting name="enabled">true</setting>
       <setting name="threshold">0 ms</setting>
--- a/src/jdk.jfr/share/conf/jfr/profile.jfc	Tue Jun 09 11:01:09 2020 +0200
+++ b/src/jdk.jfr/share/conf/jfr/profile.jfc	Tue Jun 09 11:01:09 2020 +0200
@@ -721,12 +721,6 @@
       <setting name="threshold">1 ms</setting>
     </event>
 
-    <event name="jdk.ZPageCacheFlush">
-      <setting name="enabled">true</setting>
-      <setting name="stackTrace">true</setting>
-      <setting name="threshold">0 ms</setting>
-    </event>
-
     <event name="jdk.ZRelocationSet">
       <setting name="enabled">true</setting>
       <setting name="threshold">0 ms</setting>
--- a/test/hotspot/gtest/gc/z/test_zForwarding.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/test/hotspot/gtest/gc/z/test_zForwarding.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@
   static void test(void (*function)(ZForwarding*), uint32_t size) {
     // Create page
     const ZVirtualMemory vmem(0, ZPageSizeSmall);
-    const ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZPageSizeSmall));
+    const ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZPageSizeSmall, true));
     ZPage page(ZPageTypeSmall, vmem, pmem);
 
     page.reset();
--- a/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
+++ b/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp	Tue Jun 09 11:01:09 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,8 @@
 #include "unittest.hpp"
 
 TEST(ZPhysicalMemoryTest, copy) {
-  const ZPhysicalMemorySegment seg0(0, 100);
-  const ZPhysicalMemorySegment seg1(200, 100);
+  const ZPhysicalMemorySegment seg0(0, 100, true);
+  const ZPhysicalMemorySegment seg1(200, 100, true);
 
   ZPhysicalMemory pmem0;
   pmem0.add_segment(seg0);
@@ -51,14 +51,14 @@
   EXPECT_EQ(pmem2.segment(1).size(), 100u);
 }
 
-TEST(ZPhysicalMemoryTest, segments) {
-  const ZPhysicalMemorySegment seg0(0, 1);
-  const ZPhysicalMemorySegment seg1(1, 1);
-  const ZPhysicalMemorySegment seg2(2, 1);
-  const ZPhysicalMemorySegment seg3(3, 1);
-  const ZPhysicalMemorySegment seg4(4, 1);
-  const ZPhysicalMemorySegment seg5(5, 1);
-  const ZPhysicalMemorySegment seg6(6, 1);
+TEST(ZPhysicalMemoryTest, add) {
+  const ZPhysicalMemorySegment seg0(0, 1, true);
+  const ZPhysicalMemorySegment seg1(1, 1, true);
+  const ZPhysicalMemorySegment seg2(2, 1, true);
+  const ZPhysicalMemorySegment seg3(3, 1, true);
+  const ZPhysicalMemorySegment seg4(4, 1, true);
+  const ZPhysicalMemorySegment seg5(5, 1, true);
+  const ZPhysicalMemorySegment seg6(6, 1, true);
 
   ZPhysicalMemory pmem0;
   EXPECT_EQ(pmem0.nsegments(), 0u);
@@ -113,12 +113,28 @@
   EXPECT_EQ(pmem4.is_null(), false);
 }
 
+TEST(ZPhysicalMemoryTest, remove) {
+  ZPhysicalMemory pmem;
+
+  pmem.add_segment(ZPhysicalMemorySegment(10, 10, true));
+  pmem.add_segment(ZPhysicalMemorySegment(30, 10, true));
+  pmem.add_segment(ZPhysicalMemorySegment(50, 10, true));
+  EXPECT_EQ(pmem.nsegments(), 3u);
+  EXPECT_EQ(pmem.size(), 30u);
+  EXPECT_FALSE(pmem.is_null());
+
+  pmem.remove_segments();
+  EXPECT_EQ(pmem.nsegments(), 0u);
+  EXPECT_EQ(pmem.size(), 0u);
+  EXPECT_TRUE(pmem.is_null());
+}
+
 TEST(ZPhysicalMemoryTest, split) {
   ZPhysicalMemory pmem;
 
-  pmem.add_segment(ZPhysicalMemorySegment(0, 10));
-  pmem.add_segment(ZPhysicalMemorySegment(10, 10));
-  pmem.add_segment(ZPhysicalMemorySegment(30, 10));
+  pmem.add_segment(ZPhysicalMemorySegment(0, 10, true));
+  pmem.add_segment(ZPhysicalMemorySegment(10, 10, true));
+  pmem.add_segment(ZPhysicalMemorySegment(30, 10, true));
   EXPECT_EQ(pmem.nsegments(), 2u);
   EXPECT_EQ(pmem.size(), 30u);
 
@@ -140,3 +156,19 @@
   EXPECT_EQ(pmem.nsegments(), 0u);
   EXPECT_EQ(pmem.size(), 0u);
 }
+
+TEST(ZPhysicalMemoryTest, split_committed) {
+  ZPhysicalMemory pmem0;
+  pmem0.add_segment(ZPhysicalMemorySegment(0, 10, true));
+  pmem0.add_segment(ZPhysicalMemorySegment(10, 10, false));
+  pmem0.add_segment(ZPhysicalMemorySegment(20, 10, true));
+  pmem0.add_segment(ZPhysicalMemorySegment(30, 10, false));
+  EXPECT_EQ(pmem0.nsegments(), 4u);
+  EXPECT_EQ(pmem0.size(), 40u);
+
+  ZPhysicalMemory pmem1 = pmem0.split_committed();
+  EXPECT_EQ(pmem0.nsegments(), 2u);
+  EXPECT_EQ(pmem0.size(), 20u);
+  EXPECT_EQ(pmem1.nsegments(), 2u);
+  EXPECT_EQ(pmem1.size(), 20u);
+}
--- a/test/hotspot/jtreg/gc/z/TestUncommit.java	Tue Jun 09 11:01:09 2020 +0200
+++ b/test/hotspot/jtreg/gc/z/TestUncommit.java	Tue Jun 09 11:01:09 2020 +0200
@@ -25,16 +25,11 @@
 
 /*
  * @test TestUncommit
- * @requires vm.gc.Z & !vm.graal.enabled & vm.compMode != "Xcomp"
+ * @requires vm.gc.Z & !vm.graal.enabled
  * @summary Test ZGC uncommit unused memory
- * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit true 2
- * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit false 1
- * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 -XX:-ZUncommit gc.z.TestUncommit false 1
- */
-
-/*
- * This test is disabled when running with -Xcomp, since it seems to affect
- * the timing of the test, causing memory to appear to be uncommitted too fast.
+ * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit true 2
+ * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit false 1
+ * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 -XX:-ZUncommit gc.z.TestUncommit false 1
  */
 
 import java.util.ArrayList;
@@ -98,8 +93,7 @@
         // Verify
         if (enabled) {
             if (beforeUncommit == beforeAlloc) {
-                // Temporarily disabled pending JDK-8245208
-                // throw new Exception("Uncommitted too fast");
+                throw new Exception("Uncommitted too fast");
             }
 
             if (afterUncommit >= afterAlloc) {
--- a/test/jdk/jdk/jfr/event/gc/detailed/TestZPageCacheFlushEvent.java	Tue Jun 09 11:01:09 2020 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.detailed;
-
-import java.util.List;
-
-import static gc.testlibrary.Allocation.blackHole;
-import jdk.jfr.Recording;
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.test.lib.jfr.EventNames;
-import jdk.test.lib.jfr.Events;
-
-/**
- * @test TestZPageCacheFlushEvent
- * @requires vm.hasJFR & vm.gc.Z
- * @key jfr
- * @library /test/lib /test/jdk /test/hotspot/jtreg
- * @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZPageCacheFlushEvent
- */
-
-public class TestZPageCacheFlushEvent {
-    public static void main(String[] args) throws Exception {
-        try (Recording recording = new Recording()) {
-            // Activate the event we are interested in and start recording
-            recording.enable(EventNames.ZPageCacheFlush);
-            recording.start();
-
-            // Allocate non-large objects, to fill page cache with non-large pages
-            for (int i = 0; i < 128; i++) {
-                blackHole(new byte[256 * 1024]);
-            }
-
-            // Allocate large objects, to provoke page cache flushing
-            for (int i = 0; i < 10; i++) {
-                blackHole(new byte[7 * 1024 * 1024]);
-            }
-
-            recording.stop();
-
-            // Verify recording
-            List<RecordedEvent> events = Events.fromRecording(recording);
-            System.out.println("Events: " + events.size());
-            Events.hasEvents(events);
-        }
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java	Tue Jun 09 11:01:09 2020 +0200
+++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java	Tue Jun 09 11:01:09 2020 +0200
@@ -63,12 +63,6 @@
             List<RecordedEvent> events = Events.fromRecording(recording);
             System.out.println("Events: " + events.size());
             Events.hasEvents(events);
-            for (RecordedEvent event : Events.fromRecording(recording)) {
-                System.out.println("Event:" + event);
-                final long capacityBefore = Events.assertField(event, "capacityBefore").getValue();
-                final long capacityAfter = Events.assertField(event, "capacityAfter").below(capacityBefore).getValue();
-                Events.assertField(event, "uncommitted").equal(capacityBefore - capacityAfter);
-            }
         }
     }
 }
--- a/test/lib/jdk/test/lib/jfr/EventNames.java	Tue Jun 09 11:01:09 2020 +0200
+++ b/test/lib/jdk/test/lib/jfr/EventNames.java	Tue Jun 09 11:01:09 2020 +0200
@@ -144,7 +144,6 @@
     public final static String GCPhaseConcurrentLevel1 = PREFIX + "GCPhaseConcurrentLevel1";
     public final static String ZAllocationStall = PREFIX + "ZAllocationStall";
     public final static String ZPageAllocation = PREFIX + "ZPageAllocation";
-    public final static String ZPageCacheFlush = PREFIX + "ZPageCacheFlush";
     public final static String ZRelocationSet = PREFIX + "ZRelocationSet";
     public final static String ZRelocationSetGroup = PREFIX + "ZRelocationSetGroup";
     public final static String ZUncommit = PREFIX + "ZUncommit";