changeset 59707:649ca60ef516

8237354: Add option to jcmd to write a gzipped heap dump Reviewed-by: rrich, clanger, goetz
author rschmelter
date Wed, 10 Jun 2020 12:29:01 +0200
parents fca27f266161
children 0b59aac6c848
files src/hotspot/share/gc/shared/workgroup.cpp src/hotspot/share/gc/shared/workgroup.hpp src/hotspot/share/services/diagnosticCommand.cpp src/hotspot/share/services/diagnosticCommand.hpp src/hotspot/share/services/heapDumper.cpp src/hotspot/share/services/heapDumper.hpp src/hotspot/share/services/heapDumperCompression.cpp src/hotspot/share/services/heapDumperCompression.hpp src/java.base/share/native/libzip/zip_util.c test/hotspot/jtreg/serviceability/dcmd/gc/HeapDumpCompressedTest.java test/lib/jdk/test/lib/hprof/parser/GzipRandomAccess.java test/lib/jdk/test/lib/hprof/parser/HprofReader.java test/lib/jdk/test/lib/hprof/parser/Reader.java
diffstat 13 files changed, 1709 insertions(+), 122 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Wed Jun 10 12:29:01 2020 +0200
@@ -94,6 +94,17 @@
   }
 }
 
+static void run_foreground_task_if_needed(AbstractGangTask* task, uint num_workers,
+                                          bool add_foreground_work) {
+  if (add_foreground_work) {
+    log_develop_trace(gc, workgang)("Running work gang: %s task: %s worker: foreground",
+      Thread::current()->name(), task->name());
+    task->work(num_workers);
+    log_develop_trace(gc, workgang)("Finished work gang: %s task: %s worker: foreground "
+      "thread: " PTR_FORMAT, Thread::current()->name(), task->name(), p2i(Thread::current()));
+  }
+}
+
 // WorkGang dispatcher implemented with semaphores.
 //
 // Semaphores don't require the worker threads to re-claim the lock when they wake up.
@@ -124,7 +135,7 @@
     delete _end_semaphore;
   }
 
-  void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers) {
+  void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers, bool add_foreground_work) {
     // No workers are allowed to read the state variables until they have been signaled.
     _task         = task;
     _not_finished = num_workers;
@@ -132,6 +143,8 @@
     // Dispatch 'num_workers' number of tasks.
     _start_semaphore->signal(num_workers);
 
+    run_foreground_task_if_needed(task, num_workers, add_foreground_work);
+
     // Wait for the last worker to signal the coordinator.
     _end_semaphore->wait();
 
@@ -188,7 +201,7 @@
     delete _monitor;
   }
 
-  void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers) {
+  void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers, bool add_foreground_work) {
     MonitorLocker ml(_monitor, Mutex::_no_safepoint_check_flag);
 
     _task        = task;
@@ -197,6 +210,8 @@
     // Tell the workers to get to work.
     _monitor->notify_all();
 
+    run_foreground_task_if_needed(task, num_workers, add_foreground_work);
+
     // Wait for them to finish.
     while (_finished < _num_workers) {
       ml.wait();
@@ -263,14 +278,14 @@
   run_task(task, active_workers());
 }
 
-void WorkGang::run_task(AbstractGangTask* task, uint num_workers) {
+void WorkGang::run_task(AbstractGangTask* task, uint num_workers, bool add_foreground_work) {
   guarantee(num_workers <= total_workers(),
             "Trying to execute task %s with %u workers which is more than the amount of total workers %u.",
             task->name(), num_workers, total_workers());
   guarantee(num_workers > 0, "Trying to execute task %s with zero workers", task->name());
   uint old_num_workers = _active_workers;
   update_active_workers(num_workers);
-  _dispatcher->coordinator_execute_on_workers(task, num_workers);
+  _dispatcher->coordinator_execute_on_workers(task, num_workers, add_foreground_work);
   update_active_workers(old_num_workers);
 }
 
--- a/src/hotspot/share/gc/shared/workgroup.hpp	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/hotspot/share/gc/shared/workgroup.hpp	Wed Jun 10 12:29:01 2020 +0200
@@ -90,7 +90,8 @@
 
   // Distributes the task out to num_workers workers.
   // Returns when the task has been completed by all workers.
-  virtual void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers) = 0;
+  virtual void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers,
+                                              bool add_foreground_work) = 0;
 
   // Worker API.
 
@@ -215,8 +216,9 @@
   // Run a task with the given number of workers, returns
   // when the task is done. The number of workers must be at most the number of
   // active workers.  Additional workers may be created if an insufficient
-  // number currently exists.
-  void run_task(AbstractGangTask* task, uint num_workers);
+  // number currently exists. If the add_foreground_work flag is true, the current thread
+  // is used to run the task too.
+  void run_task(AbstractGangTask* task, uint num_workers, bool add_foreground_work = false);
 
 protected:
   virtual AbstractGangWorker* allocate_worker(uint which);
--- a/src/hotspot/share/services/diagnosticCommand.cpp	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/hotspot/share/services/diagnosticCommand.cpp	Wed Jun 10 12:29:01 2020 +0200
@@ -504,17 +504,32 @@
                            DCmdWithParser(output, heap),
   _filename("filename","Name of the dump file", "STRING",true),
   _all("-all", "Dump all objects, including unreachable objects",
-       "BOOLEAN", false, "false") {
+       "BOOLEAN", false, "false"),
+  _gzip("-gz", "If specified, the heap dump is written in gzipped format "
+               "using the given compression level. 1 (recommended) is the fastest, "
+               "9 the strongest compression.", "INT", false, "1") {
   _dcmdparser.add_dcmd_option(&_all);
   _dcmdparser.add_dcmd_argument(&_filename);
+  _dcmdparser.add_dcmd_option(&_gzip);
 }
 
 void HeapDumpDCmd::execute(DCmdSource source, TRAPS) {
+  jlong level = -1; // -1 means no compression.
+
+  if (_gzip.is_set()) {
+    level = _gzip.value();
+
+    if (level < 1 || level > 9) {
+      output()->print_cr("Compression level out of range (1-9): " JLONG_FORMAT, level);
+      return;
+    }
+  }
+
   // Request a full GC before heap dump if _all is false
   // This helps reduces the amount of unreachable objects in the dump
   // and makes it easier to browse.
   HeapDumper dumper(!_all.value() /* request GC if _all is false*/);
-  dumper.dump(_filename.value(), output());
+  dumper.dump(_filename.value(), output(), (int) level);
 }
 
 int HeapDumpDCmd::num_arguments() {
--- a/src/hotspot/share/services/diagnosticCommand.hpp	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/hotspot/share/services/diagnosticCommand.hpp	Wed Jun 10 12:29:01 2020 +0200
@@ -330,6 +330,7 @@
 protected:
   DCmdArgument<char*> _filename;
   DCmdArgument<bool>  _all;
+  DCmdArgument<jlong> _gzip;
 public:
   HeapDumpDCmd(outputStream* output, bool heap);
   static const char* name() {
--- a/src/hotspot/share/services/heapDumper.cpp	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/hotspot/share/services/heapDumper.cpp	Wed Jun 10 12:29:01 2020 +0200
@@ -32,6 +32,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcVMOperations.hpp"
+#include "gc/shared/workgroup.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
@@ -52,6 +53,7 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vmOperations.hpp"
 #include "services/heapDumper.hpp"
+#include "services/heapDumperCompression.hpp"
 #include "services/threadService.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
@@ -377,19 +379,16 @@
   INITIAL_CLASS_COUNT = 200
 };
 
-// Supports I/O operations on a dump file
+// Supports I/O operations for a dump
 
 class DumpWriter : public StackObj {
  private:
   enum {
-    io_buffer_max_size = 8*M,
-    io_buffer_min_size = 64*K,
+    io_buffer_max_size = 1*M,
+    io_buffer_max_waste = 10*K,
     dump_segment_header_size = 9
   };
 
-  int _fd;              // file descriptor (-1 if dump file not open)
-  julong _bytes_written; // number of byte written to dump file
-
   char* _buffer;    // internal buffer
   size_t _size;
   size_t _pos;
@@ -399,12 +398,8 @@
   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 
-  char* _error;   // error message when I/O fails
+  CompressionBackend _backend; // Does the actual writing.
 
-  void set_file_descriptor(int fd)              { _fd = fd; }
-  int file_descriptor() const                   { return _fd; }
-
-  bool is_open() const                          { return file_descriptor() >= 0; }
   void flush();
 
   char* buffer() const                          { return _buffer; }
@@ -412,25 +407,26 @@
   size_t position() const                       { return _pos; }
   void set_position(size_t pos)                 { _pos = pos; }
 
-  void set_error(const char* error)             { _error = (char*)os::strdup(error); }
+  // Can be called if we have enough room in the buffer.
+  void write_fast(void* s, size_t len);
 
-  // all I/O go through this function
-  void write_internal(void* s, size_t len);
+  // Returns true if we have enough room in the buffer for 'len' bytes.
+  bool can_write_fast(size_t len);
 
  public:
-  DumpWriter(const char* path);
+  // Takes ownership of the writer and compressor.
+  DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor);
+
   ~DumpWriter();
 
-  void close();
+  // total number of bytes written to the disk
+  julong bytes_written() const          { return (julong) _backend.get_written(); }
 
-  // total number of bytes written to the disk
-  julong bytes_written() const          { return _bytes_written; }
-
-  char* error() const                   { return _error; }
+  char const* error() const             { return _backend.error(); }
 
   // writer functions
   void write_raw(void* s, size_t len);
-  void write_u1(u1 x)                   { write_raw((void*)&x, 1); }
+  void write_u1(u1 x);
   void write_u2(u2 x);
   void write_u4(u4 x);
   void write_u8(u8 x);
@@ -445,70 +441,38 @@
   void end_sub_record();
   // Finishes the current dump segment if not already finished.
   void finish_dump_segment();
+
+  // Called by threads used for parallel writing.
+  void writer_loop()                    { _backend.thread_loop(false); }
+  // Called when finished to release the threads.
+  void deactivate()                     { flush(); _backend.deactivate(); }
 };
 
-DumpWriter::DumpWriter(const char* path) : _fd(-1), _bytes_written(0), _pos(0),
-                                           _in_dump_segment(false), _error(NULL) {
-  // try to allocate an I/O buffer of io_buffer_size. If there isn't
-  // sufficient memory then reduce size until we can allocate something.
-  _size = io_buffer_max_size;
-  do {
-    _buffer = (char*)os::malloc(_size, mtInternal);
-    if (_buffer == NULL) {
-      _size = _size >> 1;
-    }
-  } while (_buffer == NULL && _size >= io_buffer_min_size);
-
-  if (_buffer == NULL) {
-    set_error("Could not allocate buffer memory for heap dump");
-  } else {
-    _fd = os::create_binary_file(path, false);    // don't replace existing file
-
-    // if the open failed we record the error
-    if (_fd < 0) {
-      set_error(os::strerror(errno));
-    }
-  }
+// Check for error after constructing the object and destroy it in case of an error.
+DumpWriter::DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor) :
+  _buffer(NULL),
+  _size(0),
+  _pos(0),
+  _in_dump_segment(false),
+  _backend(writer, compressor, io_buffer_max_size, io_buffer_max_waste) {
+  flush();
 }
 
 DumpWriter::~DumpWriter() {
-  close();
-  os::free(_buffer);
-  os::free(_error);
+  flush();
 }
 
-// closes dump file (if open)
-void DumpWriter::close() {
-  // flush and close dump file
-  if (is_open()) {
-    flush();
-    os::close(file_descriptor());
-    set_file_descriptor(-1);
-  }
+void DumpWriter::write_fast(void* s, size_t len) {
+  assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
+  assert(buffer_size() - position() >= len, "Must fit");
+  debug_only(_sub_record_left -= len);
+
+  memcpy(buffer() + position(), s, len);
+  set_position(position() + len);
 }
 
-// write directly to the file
-void DumpWriter::write_internal(void* s, size_t len) {
-  if (is_open()) {
-    const char* pos = (char*)s;
-    ssize_t n = 0;
-    while (len > 0) {
-      uint tmp = (uint)MIN2(len, (size_t)INT_MAX);
-      n = os::write(file_descriptor(), pos, tmp);
-
-      if (n < 0) {
-        // EINTR cannot happen here, os::write will take care of that
-        set_error(os::strerror(errno));
-        os::close(file_descriptor());
-        set_file_descriptor(-1);
-        return;
-      }
-
-      _bytes_written += n;
-      pos += n;
-      len -= n;
-    }
-  }
+bool DumpWriter::can_write_fast(size_t len) {
+  return buffer_size() - position() >= len;
 }
 
 // write raw bytes
@@ -516,17 +480,17 @@
   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
   debug_only(_sub_record_left -= len);
 
-  // flush buffer to make room
-  if (len > buffer_size() - position()) {
-    assert(!_in_dump_segment || _is_huge_sub_record, "Cannot overflow in non-huge sub-record.");
+  // flush buffer to make room.
+  while (len > buffer_size() - position()) {
+    assert(!_in_dump_segment || _is_huge_sub_record,
+           "Cannot overflow in non-huge sub-record.");
+
+    size_t to_write = buffer_size() - position();
+    memcpy(buffer() + position(), s, to_write);
+    s = (void*) ((char*) s + to_write);
+    len -= to_write;
+    set_position(position() + to_write);
     flush();
-
-    // If larger than the buffer, just write it directly.
-    if (len > buffer_size()) {
-      write_internal(s, len);
-
-      return;
-    }
   }
 
   memcpy(buffer() + position(), s, len);
@@ -535,26 +499,33 @@
 
 // flush any buffered bytes to the file
 void DumpWriter::flush() {
-  write_internal(buffer(), position());
-  set_position(0);
+  _backend.get_new_buffer(&_buffer, &_pos, &_size);
+}
+
+// Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
+#define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
+                                      else write_raw((p), (len)); } while (0)
+
+void DumpWriter::write_u1(u1 x) {
+  WRITE_KNOWN_TYPE((void*) &x, 1);
 }
 
 void DumpWriter::write_u2(u2 x) {
   u2 v;
   Bytes::put_Java_u2((address)&v, x);
-  write_raw((void*)&v, 2);
+  WRITE_KNOWN_TYPE((void*)&v, 2);
 }
 
 void DumpWriter::write_u4(u4 x) {
   u4 v;
   Bytes::put_Java_u4((address)&v, x);
-  write_raw((void*)&v, 4);
+  WRITE_KNOWN_TYPE((void*)&v, 4);
 }
 
 void DumpWriter::write_u8(u8 x) {
   u8 v;
   Bytes::put_Java_u8((address)&v, x);
-  write_raw((void*)&v, 8);
+  WRITE_KNOWN_TYPE((void*)&v, 8);
 }
 
 void DumpWriter::write_objectID(oop o) {
@@ -597,7 +568,8 @@
     // (in which case the segment length was already set to the correct value initially).
     if (!_is_huge_sub_record) {
       assert(position() > dump_segment_header_size, "Dump segment should have some content");
-      Bytes::put_Java_u4((address) (buffer() + 5), (u4) (position() - dump_segment_header_size));
+      Bytes::put_Java_u4((address) (buffer() + 5),
+                         (u4) (position() - dump_segment_header_size));
     }
 
     flush();
@@ -609,9 +581,10 @@
   if (!_in_dump_segment) {
     if (position() > 0) {
       flush();
-      assert(position() == 0, "Must be at the start");
     }
 
+    assert(position() == 0, "Must be at the start");
+
     write_u1(HPROF_HEAP_DUMP_SEGMENT);
     write_u4(0); // timestamp
     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
@@ -1503,7 +1476,7 @@
 }
 
 // The VM operation that performs the heap dump
-class VM_HeapDumper : public VM_GC_Operation {
+class VM_HeapDumper : public VM_GC_Operation, public AbstractGangTask {
  private:
   static VM_HeapDumper* _global_dumper;
   static DumpWriter*    _global_writer;
@@ -1559,7 +1532,8 @@
     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
                     GCCause::_heap_dump /* GC Cause */,
                     0 /* total full collections, dummy, ignored */,
-                    gc_before_heap_dump) {
+                    gc_before_heap_dump),
+    AbstractGangTask("dump heap") {
     _local_writer = writer;
     _gc_before_heap_dump = gc_before_heap_dump;
     _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
@@ -1590,8 +1564,10 @@
 
   VMOp_Type type() const { return VMOp_HeapDumper; }
   void doit();
+  void work(uint worker_id);
 };
 
+
 VM_HeapDumper* VM_HeapDumper::_global_dumper = NULL;
 DumpWriter*    VM_HeapDumper::_global_writer = NULL;
 
@@ -1820,8 +1796,26 @@
   set_global_dumper();
   set_global_writer();
 
+  WorkGang* gang = ch->get_safepoint_workers();
+
+  if (gang == NULL) {
+    work(0);
+  } else {
+    gang->run_task(this, gang->active_workers(), true);
+  }
+
+  // Now we clear the global variables, so that a future dumper can run.
+  clear_global_dumper();
+  clear_global_writer();
+}
+
+void VM_HeapDumper::work(uint worker_id) {
+  if (!Thread::current()->is_VM_thread()) {
+    writer()->writer_loop();
+    return;
+  }
+
   // Write the file header - we always use 1.0.2
-  size_t used = ch->used();
   const char* header = "JAVA PROFILE 1.0.2";
 
   // header is few bytes long - no chance to overflow int
@@ -1884,9 +1878,8 @@
   // Writes the HPROF_HEAP_DUMP_END record.
   DumperSupport::end_of_dump(writer());
 
-  // Now we clear the global variables, so that a future dumper might run.
-  clear_global_dumper();
-  clear_global_writer();
+  // We are done with writing. Release the worker threads.
+  writer()->deactivate();
 }
 
 void VM_HeapDumper::dump_stack_traces() {
@@ -1902,6 +1895,7 @@
     oop threadObj = thread->threadObj();
     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
       // dump thread stack trace
+      ResourceMark rm;
       ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
       stack_trace->dump_stack_at_safepoint(-1);
       _stack_traces[_num_threads++] = stack_trace;
@@ -1944,7 +1938,7 @@
 }
 
 // dump the heap to given path.
-int HeapDumper::dump(const char* path, outputStream* out) {
+int HeapDumper::dump(const char* path, outputStream* out, int compression) {
   assert(path != NULL && strlen(path) > 0, "path missing");
 
   // print message in interactive case
@@ -1956,8 +1950,19 @@
   // create JFR event
   EventHeapDump event;
 
-  // create the dump writer. If the file can be opened then bail
-  DumpWriter writer(path);
+  AbstractCompressor* compressor = NULL;
+
+  if (compression > 0) {
+    compressor = new (std::nothrow) GZipCompressor(compression);
+
+    if (compressor == NULL) {
+      set_error("Could not allocate gzip compressor");
+      return -1;
+    }
+  }
+
+  DumpWriter writer(new (std::nothrow) FileWriter(path), compressor);
+
   if (writer.error() != NULL) {
     set_error(writer.error());
     if (out != NULL) {
@@ -1976,8 +1981,7 @@
     VMThread::execute(&dumper);
   }
 
-  // close dump file and record any error that the writer may have encountered
-  writer.close();
+  // record any error that the writer may have encountered
   set_error(writer.error());
 
   // emit JFR event
@@ -2024,7 +2028,7 @@
 }
 
 // set the error string
-void HeapDumper::set_error(char* error) {
+void HeapDumper::set_error(char const* error) {
   if (_error != NULL) {
     os::free(_error);
   }
--- a/src/hotspot/share/services/heapDumper.hpp	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/hotspot/share/services/heapDumper.hpp	Wed Jun 10 12:29:01 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@
 
   // string representation of error
   char* error() const                   { return _error; }
-  void set_error(char* error);
+  void set_error(char const* error);
 
   // internal timer.
   elapsedTimer* timer()                 { return &_t; }
@@ -70,7 +70,8 @@
 
   // dumps the heap to the specified file, returns 0 if success.
   // additional info is written to out if not NULL.
-  int dump(const char* path, outputStream* out = NULL);
+  // compression >= 0 creates a gzipped file with the given compression level.
+  int dump(const char* path, outputStream* out = NULL, int compression = -1);
 
   // returns error message (resource allocated), or NULL if no error
   char* error_as_C_string() const;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/services/heapDumperCompression.cpp	Wed Jun 10 12:29:01 2020 +0200
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2020 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "services/heapDumperCompression.hpp"
+
+
+char const* FileWriter::open_writer() {
+  assert(_fd < 0, "Must not already be open");
+
+  _fd = os::create_binary_file(_path, false);    // don't replace existing file
+
+  if (_fd < 0) {
+    return os::strerror(errno);
+  }
+
+  return NULL;
+}
+
+FileWriter::~FileWriter() {
+  if (_fd >= 0) {
+    os::close(_fd);
+    _fd = -1;
+  }
+}
+
+char const* FileWriter::write_buf(char* buf, ssize_t size) {
+  assert(_fd >= 0, "Must be open");
+  assert(size > 0, "Must write at least one byte");
+
+  ssize_t n = (ssize_t) os::write(_fd, buf, (uint) size);
+
+  if (n <= 0) {
+    return os::strerror(errno);
+  }
+
+  return NULL;
+}
+
+
+typedef char const* (*GzipInitFunc)(size_t, size_t*, size_t*, int);
+typedef size_t(*GzipCompressFunc)(char*, size_t, char*, size_t, char*, size_t,
+                                  int, char*, char const**);
+
+static GzipInitFunc gzip_init_func;
+static GzipCompressFunc gzip_compress_func;
+
+void* GZipCompressor::load_gzip_func(char const* name) {
+  char path[JVM_MAXPATHLEN];
+  char ebuf[1024];
+  void* handle;
+  MutexLocker locker(Zip_lock, Monitor::_no_safepoint_check_flag);
+
+  if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
+    handle = os::dll_load(path, ebuf, sizeof ebuf);
+
+    if (handle != NULL) {
+      return os::dll_lookup(handle, name);
+    }
+  }
+
+  return NULL;
+}
+
+char const* GZipCompressor::init(size_t block_size, size_t* needed_out_size,
+                                 size_t* needed_tmp_size) {
+  _block_size = block_size;
+  _is_first = true;
+
+  if (gzip_compress_func == NULL) {
+    gzip_compress_func = (GzipCompressFunc) load_gzip_func("ZIP_GZip_Fully");
+
+    if (gzip_compress_func == NULL) {
+      return "Cannot get ZIP_GZip_Fully function";
+    }
+  }
+
+  if (gzip_init_func == NULL) {
+    gzip_init_func = (GzipInitFunc) load_gzip_func("ZIP_GZip_InitParams");
+
+    if (gzip_init_func == NULL) {
+      return "Cannot get ZIP_GZip_InitParams function";
+    }
+  }
+
+  char const* result = gzip_init_func(block_size, needed_out_size,
+                                      needed_tmp_size, _level);
+  *needed_out_size += 1024; // Add extra space for the comment in the first chunk.
+
+  return result;
+}
+
+char const* GZipCompressor::compress(char* in, size_t in_size, char* out, size_t out_size,
+                                     char* tmp, size_t tmp_size, size_t* compressed_size) {
+  char const* msg = NULL;
+
+  if (_is_first) {
+    char buf[128];
+    // Write the block size used as a comment in the first gzip chunk, so the
+    // code used to read it later can make a good choice of the buffer sizes it uses.
+    jio_snprintf(buf, sizeof(buf), "HPROF BLOCKSIZE=" SIZE_FORMAT, _block_size);
+    *compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level,
+                                          buf, &msg);
+    _is_first = false;
+  } else {
+    *compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level,
+                                          NULL, &msg);
+  }
+
+  return msg;
+}
+
+WorkList::WorkList() {
+  _head._next = &_head;
+  _head._prev = &_head;
+}
+
+void WorkList::insert(WriteWork* before, WriteWork* work) {
+  work->_prev = before;
+  work->_next = before->_next;
+  before->_next = work;
+  work->_next->_prev = work;
+}
+
+WriteWork* WorkList::remove(WriteWork* work) {
+  if (work != NULL) {
+    assert(work->_next != work, "Invalid next");
+    assert(work->_prev != work, "Invalid prev");
+    work->_prev->_next = work->_next;;
+    work->_next->_prev = work->_prev;
+    work->_next = NULL;
+    work->_prev = NULL;
+  }
+
+  return work;
+}
+
+void WorkList::add_by_id(WriteWork* work) {
+  if (is_empty()) {
+    add_first(work);
+  } else {
+    WriteWork* last_curr = &_head;
+    WriteWork* curr = _head._next;
+
+    while (curr->_id < work->_id) {
+      last_curr = curr;
+      curr = curr->_next;
+
+      if (curr == &_head) {
+        add_last(work);
+        return;
+      }
+    }
+
+    insert(last_curr, work);
+  }
+}
+
+
+
+CompressionBackend::CompressionBackend(AbstractWriter* writer,
+     AbstractCompressor* compressor, size_t block_size, size_t max_waste) :
+  _active(false),
+  _err(NULL),
+  _nr_of_threads(0),
+  _works_created(0),
+  _work_creation_failed(false),
+  _id_to_write(0),
+  _next_id(0),
+  _in_size(block_size),
+  _max_waste(max_waste),
+  _out_size(0),
+  _tmp_size(0),
+  _written(0),
+  _writer(writer),
+  _compressor(compressor),
+  _lock(new (std::nothrow) PaddedMonitor(Mutex::leaf, "HProf Compression Backend",
+    true, Mutex::_safepoint_check_never)) {
+  if (_writer == NULL) {
+    set_error("Could not allocate writer");
+  } else if (_lock == NULL) {
+    set_error("Could not allocate lock");
+  } else {
+    set_error(_writer->open_writer());
+  }
+
+  if (_compressor != NULL) {
+    set_error(_compressor->init(_in_size, &_out_size, &_tmp_size));
+  }
+
+  _current = allocate_work(_in_size, _out_size, _tmp_size);
+
+  if (_current == NULL) {
+    set_error("Could not allocate memory for buffer");
+  }
+
+  _active = (_err == NULL);
+}
+
+CompressionBackend::~CompressionBackend() {
+  assert(!_active, "Must not be active by now");
+  assert(_nr_of_threads == 0, "Must have no active threads");
+  assert(_to_compress.is_empty() && _finished.is_empty(), "Still work to do");
+
+  free_work_list(&_unused);
+  free_work(_current);
+  assert(_works_created == 0, "All work must have been freed");
+
+  delete _compressor;
+  delete _writer;
+  delete _lock;
+}
+
+void CompressionBackend::deactivate() {
+  assert(_active, "Must be active");
+
+  MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+
+  // Make sure we write the last partially filled buffer.
+  if ((_current != NULL) && (_current->_in_used > 0)) {
+    _current->_id = _next_id++;
+    _to_compress.add_last(_current);
+    _current = NULL;
+    ml.notify_all();
+  }
+
+  // Wait for the threads to drain the compression work list.
+  while (!_to_compress.is_empty()) {
+    // If we have no threads, compress the current one itself.
+    if (_nr_of_threads == 0) {
+      MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
+      thread_loop(true);
+    } else {
+      ml.wait();
+    }
+  }
+
+  _active = false;
+  ml.notify_all();
+}
+
+void CompressionBackend::thread_loop(bool single_run) {
+  // Register if this is a worker thread.
+  if (!single_run) {
+    MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+    _nr_of_threads++;
+  }
+
+  while (true) {
+    WriteWork* work = get_work();
+
+    if (work == NULL) {
+      assert(!single_run, "Should never happen for single thread");
+      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+      _nr_of_threads--;
+      assert(_nr_of_threads >= 0, "Too many threads finished");
+      ml.notify_all();
+
+      return;
+    } else {
+      do_compress(work);
+      finish_work(work);
+    }
+
+    if (single_run) {
+      return;
+    }
+  }
+}
+
+void CompressionBackend::set_error(char const* new_error) {
+  if ((new_error != NULL) && (_err == NULL)) {
+    _err = new_error;
+  }
+}
+
+WriteWork* CompressionBackend::allocate_work(size_t in_size, size_t out_size,
+                                             size_t tmp_size) {
+  WriteWork* result = (WriteWork*) os::malloc(sizeof(WriteWork), mtInternal);
+
+  if (result == NULL) {
+    _work_creation_failed = true;
+    return NULL;
+  }
+
+  _works_created++;
+  result->_in = (char*) os::malloc(in_size, mtInternal);
+  result->_in_max = in_size;
+  result->_in_used = 0;
+  result->_out = NULL;
+  result->_tmp = NULL;
+
+  if (result->_in == NULL) {
+    goto fail;
+  }
+
+  if (out_size > 0) {
+    result->_out = (char*) os::malloc(out_size, mtInternal);
+    result->_out_used = 0;
+    result->_out_max = out_size;
+
+    if (result->_out == NULL) {
+      goto fail;
+    }
+  }
+
+  if (tmp_size > 0) {
+    result->_tmp = (char*) os::malloc(tmp_size, mtInternal);
+    result->_tmp_max = tmp_size;
+
+    if (result->_tmp == NULL) {
+      goto fail;
+    }
+  }
+
+  return result;
+
+fail:
+  free_work(result);
+  _work_creation_failed = true;
+  return NULL;
+}
+
+void CompressionBackend::free_work(WriteWork* work) {
+  if (work != NULL) {
+    os::free(work->_in);
+    os::free(work->_out);
+    os::free(work->_tmp);
+    os::free(work);
+    --_works_created;
+  }
+}
+
+void CompressionBackend::free_work_list(WorkList* list) {
+  while (!list->is_empty()) {
+    free_work(list->remove_first());
+  }
+}
+
+WriteWork* CompressionBackend::get_work() {
+  MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+
+  while (_active && _to_compress.is_empty()) {
+    ml.wait();
+  }
+
+  return _to_compress.remove_first();
+}
+
+void CompressionBackend::get_new_buffer(char** buffer, size_t* used, size_t* max) {
+  if (_active) {
+    MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+
+    if (*used > 0) {
+      _current->_in_used += *used;
+
+      // Check if we do not waste more than _max_waste. If yes, write the buffer.
+      // Otherwise return the rest of the buffer as the new buffer.
+      if (_current->_in_max - _current->_in_used <= _max_waste) {
+        _current->_id = _next_id++;
+        _to_compress.add_last(_current);
+        _current = NULL;
+        ml.notify_all();
+      } else {
+        *buffer = _current->_in + _current->_in_used;
+        *used = 0;
+        *max = _current->_in_max - _current->_in_used;
+
+        return;
+      }
+    }
+
+    while ((_current == NULL) && _unused.is_empty() && _active) {
+      // Add more work objects if needed.
+      if (!_work_creation_failed && (_works_created <= _nr_of_threads)) {
+        WriteWork* work = allocate_work(_in_size, _out_size, _tmp_size);
+
+        if (work != NULL) {
+          _unused.add_first(work);
+        }
+      } else if (!_to_compress.is_empty() && (_nr_of_threads == 0)) {
+        // If we have no threads, compress the current one itself.
+        MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
+        thread_loop(true);
+      } else {
+        ml.wait();
+      }
+    }
+
+    if (_current == NULL) {
+      _current = _unused.remove_first();
+    }
+
+    if (_current != NULL) {
+      _current->_in_used = 0;
+      _current->_out_used = 0;
+      *buffer = _current->_in;
+      *used = 0;
+      *max = _current->_in_max;
+
+      return;
+    }
+  }
+
+  *buffer = NULL;
+  *used = 0;
+  *max = 0;
+
+  return;
+}
+
+void CompressionBackend::do_compress(WriteWork* work) {
+  if (_compressor != NULL) {
+    char const* msg = _compressor->compress(work->_in, work->_in_used, work->_out,
+                                            work->_out_max,
+    work->_tmp, _tmp_size, &work->_out_used);
+
+    if (msg != NULL) {
+      MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+      set_error(msg);
+    }
+  }
+}
+
+void CompressionBackend::finish_work(WriteWork* work) {
+  MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+
+  _finished.add_by_id(work);
+
+  // Write all finished works as far as we can.
+  while (!_finished.is_empty() && (_finished.first()->_id == _id_to_write)) {
+    WriteWork* to_write = _finished.remove_first();
+    size_t size = _compressor == NULL ? to_write->_in_used : to_write->_out_used;
+    char* p = _compressor == NULL ? to_write->_in : to_write->_out;
+    char const* msg = NULL;
+
+    if (_err == NULL) {
+      _written += size;
+      MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
+      msg = _writer->write_buf(p, (ssize_t) size);
+    }
+
+    set_error(msg);
+    _unused.add_first(to_write);
+    _id_to_write++;
+  }
+
+  ml.notify_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/services/heapDumperCompression.hpp	Wed Jun 10 12:29:01 2020 +0200
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2020 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_SERVICES_HEAPDUMPERCOMPRESSION_HPP
+#define SHARE_SERVICES_HEAPDUMPERCOMPRESSION_HPP
+
+#include "memory/allocation.hpp"
+
+
+// Interface for a compression  implementation.
+class AbstractCompressor : public CHeapObj<mtInternal> {
+public:
+  virtual ~AbstractCompressor() { }
+
+  // Initializes the compressor. Returns a static error message in case of an error.
+  // Otherwise initializes the needed out and tmp size for the given block size.
+  virtual char const* init(size_t block_size, size_t* needed_out_size,
+                           size_t* needed_tmp_size) = 0;
+
+  // Does the actual compression. Returns NULL on success and a static error
+  // message otherwise. Sets the 'compressed_size'.
+  virtual char const* compress(char* in, size_t in_size, char* out, size_t out_size,
+                               char* tmp, size_t tmp_size, size_t* compressed_size) = 0;
+};
+
+// Interface for a writer implementation.
+class AbstractWriter : public CHeapObj<mtInternal> {
+public:
+  virtual ~AbstractWriter() { }
+
+  // Opens the writer. Returns NULL on success and a static error message otherwise.
+  virtual char const* open_writer() = 0;
+
+  // Does the write. Returns NULL on success and a static error message otherwise.
+  virtual char const* write_buf(char* buf, ssize_t size) = 0;
+};
+
+
+// A writer for a file.
+class FileWriter : public AbstractWriter {
+private:
+  char const* _path;
+  int _fd;
+
+public:
+  FileWriter(char const* path) : _path(path), _fd(-1) { }
+
+  ~FileWriter();
+
+  // Opens the writer. Returns NULL on success and a static error message otherwise.
+  virtual char const* open_writer();
+
+  // Does the write. Returns NULL on success and a static error message otherwise.
+  virtual char const* write_buf(char* buf, ssize_t size);
+};
+
+
+// A compressor using the gzip format.
+class GZipCompressor : public AbstractCompressor {
+private:
+  int _level;
+  size_t _block_size;
+  bool _is_first;
+
+  void* load_gzip_func(char const* name);
+
+public:
+  GZipCompressor(int level) : _level(level), _block_size(0), _is_first(false) {
+  }
+
+  virtual char const* init(size_t block_size, size_t* needed_out_size,
+                           size_t* needed_tmp_size);
+
+  virtual char const* compress(char* in, size_t in_size, char* out, size_t out_size,
+                               char* tmp, size_t tmp_size, size_t* compressed_size);
+};
+
+
+// The data needed to write a single buffer (and compress it optionally).
+struct WriteWork {
+  // The id of the work.
+  int64_t _id;
+
+  // The input buffer where the raw data is
+  char* _in;
+  size_t _in_used;
+  size_t _in_max;
+
+  // The output buffer where the compressed data is. Is NULL when compression is disabled.
+  char* _out;
+  size_t _out_used;
+  size_t _out_max;
+
+  // The temporary space needed for compression. Is NULL when compression is disabled.
+  char* _tmp;
+  size_t _tmp_max;
+
+  // Used to link WriteWorks into lists.
+  WriteWork* _next;
+  WriteWork* _prev;
+};
+
+// A list for works.
+class WorkList {
+private:
+  WriteWork _head;
+
+  void insert(WriteWork* before, WriteWork* work);
+  WriteWork* remove(WriteWork* work);
+
+public:
+  WorkList();
+
+  // Return true if the list is empty.
+  bool is_empty() { return _head._next == &_head; }
+
+  // Adds to the beginning of the list.
+  void add_first(WriteWork* work) { insert(&_head, work); }
+
+  // Adds to the end of the list.
+  void add_last(WriteWork* work) { insert(_head._prev, work); }
+
+  // Adds so the ids are ordered.
+  void add_by_id(WriteWork* work);
+
+  // Returns the first element.
+  WriteWork* first() { return is_empty() ? NULL : _head._next; }
+
+  // Returns the last element.
+  WriteWork* last() { return is_empty() ? NULL : _head._prev; }
+
+  // Removes the first element. Returns NULL if empty.
+  WriteWork* remove_first() { return remove(first()); }
+
+  // Removes the last element. Returns NULL if empty.
+  WriteWork* remove_last() { return remove(first()); }
+};
+
+
+class Monitor;
+
+// This class is used by the DumpWriter class. It supplies the DumpWriter with
+// chunks of memory to write the heap dump data into. When the DumpWriter needs a
+// new memory chunk, it calls get_new_buffer(), which commits the old chunk used
+// and returns a new chunk. The old chunk is then added to a queue to be compressed
+// and then written in the background.
+class CompressionBackend : StackObj {
+  bool _active;
+  char const * _err;
+
+  int _nr_of_threads;
+  int _works_created;
+  bool _work_creation_failed;
+
+  int64_t _id_to_write;
+  int64_t _next_id;
+
+  size_t _in_size;
+  size_t _max_waste;
+  size_t _out_size;
+  size_t _tmp_size;
+
+  size_t _written;
+
+  AbstractWriter* const _writer;
+  AbstractCompressor* const _compressor;
+
+  Monitor* const _lock;
+
+  WriteWork* _current;
+  WorkList _to_compress;
+  WorkList _unused;
+  WorkList _finished;
+
+  void set_error(char const* new_error);
+
+  WriteWork* allocate_work(size_t in_size, size_t out_size, size_t tmp_size);
+  void free_work(WriteWork* work);
+  void free_work_list(WorkList* list);
+
+  WriteWork* get_work();
+  void do_compress(WriteWork* work);
+  void finish_work(WriteWork* work);
+
+public:
+  // compressor can be NULL if no compression is used.
+  // Takes ownership of the writer and compressor.
+  // block_size is the buffer size of a WriteWork.
+  // max_waste is the maximum number of bytes to leave
+  // empty in the buffer when it is written.
+  CompressionBackend(AbstractWriter* writer, AbstractCompressor* compressor,
+    size_t block_size, size_t max_waste);
+
+  ~CompressionBackend();
+
+  size_t get_written() const { return _written; }
+
+  char const* error() const { return _err; }
+
+  // Commits the old buffer (using the value in *used) and sets up a new one.
+  void get_new_buffer(char** buffer, size_t* used, size_t* max);
+
+  // The entry point for a worker thread. If single_run is true, we only handle one entry.
+  void thread_loop(bool single_run);
+
+  // Shuts down the backend, releasing all threads.
+  void deactivate();
+};
+
+
+#endif // SHARE_SERVICES_HEAPDUMPERCOMPRESSION_HPP
--- a/src/java.base/share/native/libzip/zip_util.c	Wed Jun 10 11:08:19 2020 +0100
+++ b/src/java.base/share/native/libzip/zip_util.c	Wed Jun 10 12:29:01 2020 +0200
@@ -1596,3 +1596,109 @@
     inflateEnd(&strm);
     return JNI_TRUE;
 }
+
+static voidpf tracking_zlib_alloc(voidpf opaque, uInt items, uInt size) {
+  size_t* needed = (size_t*) opaque;
+  *needed += (size_t) items * (size_t) size;
+  return (voidpf) calloc((size_t) items, (size_t) size);
+}
+
+static void tracking_zlib_free(voidpf opaque, voidpf address) {
+  free((void*) address);
+}
+
+static voidpf zlib_block_alloc(voidpf opaque, uInt items, uInt size) {
+  char** range = (char**) opaque;
+  voidpf result = NULL;
+  size_t needed = (size_t) items * (size_t) size;
+
+  if (range[1] - range[0] >= (ptrdiff_t) needed) {
+    result = (voidpf) range[0];
+    range[0] += needed;
+  }
+
+  return result;
+}
+
+static void zlib_block_free(voidpf opaque, voidpf address) {
+  /* Nothing to do. */
+}
+
+static char const* deflateInit2Wrapper(z_stream* strm, int level) {
+  int err = deflateInit2(strm, level >= 0 && level <= 9 ? level : Z_DEFAULT_COMPRESSION,
+                         Z_DEFLATED, 31, 8, Z_DEFAULT_STRATEGY);
+  if (err == Z_MEM_ERROR) {
+    return "Out of memory in deflateInit2";
+  }
+
+  if (err != Z_OK) {
+    return "Internal error in deflateInit2";
+  }
+
+  return NULL;
+}
+
+JNIEXPORT char const*
+ZIP_GZip_InitParams(size_t inLen, size_t* outLen, size_t* tmpLen, int level) {
+  z_stream strm;
+  *tmpLen = 0;
+  char const* errorMsg;
+
+  memset(&strm, 0, sizeof(z_stream));
+  strm.zalloc = tracking_zlib_alloc;
+  strm.zfree = tracking_zlib_free;
+  strm.opaque = (voidpf) tmpLen;
+
+  errorMsg = deflateInit2Wrapper(&strm, level);
+
+  if (errorMsg == NULL) {
+    *outLen = (size_t) deflateBound(&strm, (uLong) inLen);
+    deflateEnd(&strm);
+  }
+
+  return errorMsg;
+}
+
+JNIEXPORT size_t
+ZIP_GZip_Fully(char* inBuf, size_t inLen, char* outBuf, size_t outLen, char* tmp, size_t tmpLen,
+               int level, char* comment, char const** pmsg) {
+  z_stream strm;
+  gz_header hdr;
+  int err;
+  char* block[] = {tmp, tmpLen + tmp};
+  size_t result = 0;
+
+  memset(&strm, 0, sizeof(z_stream));
+  strm.zalloc = zlib_block_alloc;
+  strm.zfree = zlib_block_free;
+  strm.opaque = (voidpf) block;
+
+  *pmsg = deflateInit2Wrapper(&strm, level);
+
+  if (*pmsg == NULL) {
+    strm.next_out = (Bytef *) outBuf;
+    strm.avail_out = (uInt) outLen;
+    strm.next_in = (Bytef *) inBuf;
+    strm.avail_in = (uInt) inLen;
+
+    if (comment != NULL) {
+      memset(&hdr, 0, sizeof(hdr));
+      hdr.comment = (Bytef*) comment;
+      deflateSetHeader(&strm, &hdr);
+    }
+
+    err = deflate(&strm, Z_FINISH);
+
+    if (err == Z_OK || err == Z_BUF_ERROR) {
+      *pmsg = "Buffer too small";
+    } else if (err != Z_STREAM_END) {
+      *pmsg = "Intern deflate error";
+    } else {
+      result = (size_t) strm.total_out;
+    }
+
+    deflateEnd(&strm);
+  }
+
+  return result;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/dcmd/gc/HeapDumpCompressedTest.java	Wed Jun 10 12:29:01 2020 +0200
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2020 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.File;
+import java.nio.file.Files;
+import java.io.IOException;
+import java.util.List;
+
+import jdk.test.lib.hprof.HprofParser;
+import jdk.test.lib.hprof.parser.Reader;
+import jdk.test.lib.hprof.model.Snapshot;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.dcmd.PidJcmdExecutor;
+import jdk.test.lib.process.OutputAnalyzer;
+
+/*
+ * @test
+ * @summary Test of diagnostic command GC.heap_dump with gzipped output (Serial, Parallel and G1)
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.compiler
+ *          java.management
+ *          jdk.internal.jvmstat/sun.jvmstat.monitor
+ * @run main/othervm -XX:+UseSerialGC HeapDumpCompressedTest
+ * @run main/othervm -XX:+UseParallelGC HeapDumpCompressedTest
+ * @run main/othervm -XX:+UseG1GC HeapDumpCompressedTest
+ */
+
+/*
+ * @test
+ * @requires vm.gc.Z
+ * @summary Test of diagnostic command GC.heap_dump with gzipped output (Z GC)
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.compiler
+ *          java.management
+ *          jdk.internal.jvmstat/sun.jvmstat.monitor
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC HeapDumpCompressedTest
+ */
+
+/*
+ * @test
+ * @requires vm.gc.Shenandoah
+ * @summary Test of diagnostic command GC.heap_dump with gzipped output (Shenandoah GC)
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.compiler
+ *          java.management
+ *          jdk.internal.jvmstat/sun.jvmstat.monitor
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC HeapDumpCompressedTest
+ */
+
+/*
+ * @test
+ * @requires vm.gc.Epsilon
+ * @summary Test of diagnostic command GC.heap_dump with gzipped output (Epsilon GC)
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.compiler
+ *          java.management
+ *          jdk.internal.jvmstat/sun.jvmstat.monitor
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC HeapDumpCompressedTest
+ */
+
+public class HeapDumpCompressedTest {
+    public static HeapDumpCompressedTest ref;
+
+    public static void main(String[] args) throws Exception {
+        PidJcmdExecutor executor = new PidJcmdExecutor();
+        ref = new HeapDumpCompressedTest();
+        File dump = new File("jcmd.gc.heap_dump." + System.currentTimeMillis() + ".hprof.gz");
+
+        if (dump.exists()) {
+            dump.delete();
+        }
+
+        // Check we detect an invalid compression level.
+        OutputAnalyzer output = executor.execute("GC.heap_dump -gz=0 " +
+                                                  dump.getAbsolutePath());
+        output.shouldContain("Compression level out of range");
+
+        // Check we can create a gzipped dump.
+        output = executor.execute("GC.heap_dump -gz=1 " + dump.getAbsolutePath());
+        output.shouldContain("Heap dump file created");
+
+        // Check we detect an already present heap dump.
+        output = executor.execute("GC.heap_dump -gz=1 " + dump.getAbsolutePath());
+        output.shouldContain("Unable to create ");
+
+        verifyHeapDump(dump);
+        dump.delete();
+    }
+
+    private static void verifyHeapDump(File dump) throws Exception {
+
+        Asserts.assertTrue(dump.exists() && dump.isFile(),
+                           "Could not create dump file " + dump.getAbsolutePath());
+
+        try {
+            File out = HprofParser.parse(dump);
+
+            Asserts.assertTrue(out != null && out.exists() && out.isFile(),
+                               "Could not find hprof parser output file");
+            List<String> lines = Files.readAllLines(out.toPath());
+            Asserts.assertTrue(lines.size() > 0, "hprof parser output file is empty");
+            for (String line : lines) {
+                Asserts.assertFalse(line.matches(".*WARNING(?!.*Failed to resolve " +
+                                                 "object.*constantPoolOop.*).*"));
+            }
+
+            out.delete();
+        } catch (Exception e) {
+            e.printStackTrace();
+            Asserts.fail("Could not parse dump file " + dump.getAbsolutePath());
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/lib/jdk/test/lib/hprof/parser/GzipRandomAccess.java	Wed Jun 10 12:29:01 2020 +0200
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2020 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.test.lib.hprof.parser;
+
+import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.zip.DataFormatException;
+import java.util.zip.Inflater;
+
+public class GzipRandomAccess implements AutoCloseable, Closeable {
+    // A comparator which compares chunks by their file offset.
+    private static FileOffsetComparator fileOffsetComp = new FileOffsetComparator();
+
+    // A comparator which compares chunks by their offset.
+    private static OffsetComparator offsetComp = new OffsetComparator();
+
+    // The size to use when reading from the random access file.
+    private static final int READ_SIZE = 65536;
+
+    // The last used buffer.
+    private Buffer last;
+
+    // The underlying random access file to use.
+    private final RandomAccessFile raf;
+
+    // The length of the random access file.
+    private final long fileSize;
+
+    // The maximum size of a buffer cache.
+    private final int cacheSize;
+
+    // The maximum numbers of buffers to cache.
+    private final int maxCachedBuffers;
+
+    // A buffer used to read from the file.
+    private final byte[] in;
+
+    // A sorted list of the buffers we know so far.
+    private final ArrayList<Buffer> buffers;
+
+    // The inflater to use.
+    private final Inflater inf;
+
+    // The head of the list which contains the buffers with cached data.
+    private final Buffer cacheHead;
+
+    // The number of cached buffers in the list.
+    private int cachedBuffers;
+
+    // This is private to ensure we only handle the specific hprof gzip files
+    // written by the VM.
+    private GzipRandomAccess(String file, int bufferSize, int maxCachedBuffers)
+            throws IOException {
+        last = null;
+        raf = new RandomAccessFile(file, "r");
+        fileSize = raf.length();
+        this.cacheSize = bufferSize;
+        this.maxCachedBuffers = maxCachedBuffers;
+        cachedBuffers = 0;
+        in = new byte[READ_SIZE];
+        buffers = new ArrayList<>();
+        inf = new Inflater(true);
+        cacheHead = new Buffer(-1, -1);
+        cacheHead.next = cacheHead;
+        cacheHead.prev = cacheHead;
+        buffers.add(new Buffer(0, 0));
+    }
+
+    /**
+     * Clears the cache.
+     */
+    public synchronized void clearCache() {
+        while (cacheHead.next != cacheHead) {
+            assert cacheHead.next.cache != null;
+            Buffer buf = cacheHead.next;
+            remove(buf);
+            buf.cache = null;
+        }
+
+        last = null;
+        cachedBuffers = 0;
+    }
+
+    /**
+     * Returns an approximate file offset for the given offset. The return value should
+     * only be used for progress indication and the like. Note that you should only query
+     * offsets you've already read.
+     *
+     * @param offset The offset.
+     * @return The approximate file offset.
+     */
+    public synchronized long getFileOffset(long offset) {
+        int pos = Collections.binarySearch(buffers, new Buffer(0, offset), offsetComp);
+        int realPos = pos >= 0 ? pos : -pos - 2;
+
+        if (realPos >= buffers.size() - 1) {
+            return buffers.get(buffers.size() - 1).fileOffset;
+        }
+
+        // Assume uniform compression.
+        Buffer buf = buffers.get(realPos);
+        long diff = offset - buf.offset;
+        long bufFileEnd = buffers.get(realPos + 1).fileOffset;
+        long fileDiff = bufFileEnd - buf.fileOffset;
+        double filePerDiff = (double) Math.max(1, fileDiff) / Math.max(1, buf.cacheLen);
+
+        return buf.fileOffset + (long) (filePerDiff * diff);
+    }
+
+    /**
+     * @return Returns the size of the underlying file.
+     */
+    public long getFileSize() {
+        return fileSize;
+    }
+
+    /**
+     * Returns an @link {@link InputStream} to read from the given offset. Note
+     * that closing the input stream does not close the underlying @link
+     * {@link GzipRandomAccess} object.
+     *
+     * @param offset The offset.
+     * @return The input stream.
+     */
+    public InputStream asStream(long offset) {
+        return new InputStreamImpl(offset, this);
+    }
+
+    /**
+     * Returns a @link ReadBuffer which uses this object to do the actual
+     * operation. Note that closing the returned object closes the
+     * underlying @link {@link GzipRandomAccess} object.
+     *
+     * @return The @link ReadBuffer.
+     */
+    public ReadBuffer asFileBuffer() {
+        return new ReadBufferImpl(this);
+    }
+
+    /**
+     * Closes the object and clears the cache. The object is unusable after this
+     * call.
+     */
+    @Override
+    public synchronized void close() throws IOException {
+        clearCache();
+        buffers.clear();
+        raf.close();
+        inf.end();
+    }
+
+    /**
+     * Reads bytes from the gzip file.
+     *
+     * @param offset The offset from which to start the read.
+     * @param b The buffer to read into.
+     * @param off The offset in the buffer to use.
+     * @param len The number of bytes to read at most.
+     * @return The number of bytes read or -1 if we are at the end of the file.
+     * @throws IOException On error.
+     */
+    public synchronized int read(long offset, byte[] b, int off, int len) throws IOException {
+        Buffer buf = last;
+
+        while (buf == null || (buf.offset > offset) || (buf.offset + buf.cacheLen <= offset)) {
+            int pos = Collections.binarySearch(buffers, new Buffer(0, offset), offsetComp);
+            buf = buffers.get(pos >= 0 ? pos : -pos - 2);
+
+            if (buf.fileOffset >= fileSize) {
+                return -1;
+            }
+
+            if (buf.cache != null) {
+                // If already loaded, move to front of the cache list.
+                last = buf;
+
+                if (cacheHead.next != buf) {
+                    remove(buf);
+                    addFirst(buf);
+                }
+            } else {
+                try {
+                    // Note that the load will also add the following buffer to the list,
+                    // so the while loop will eventually terminate.
+                    loadBuffer(buf);
+                } catch (DataFormatException e) {
+                    throw new IOException(e);
+                }
+            }
+        }
+
+        int copyOffset = (int) (offset - buf.offset);
+        int toCopyMax = buf.cacheLen - copyOffset;
+        int toCopy = Math.min(toCopyMax, len);
+
+        if (toCopy <= 0) {
+            return -1;
+        }
+
+        System.arraycopy(buf.cache, copyOffset, b, off, toCopy);
+
+        return toCopy;
+    }
+
+    /**
+     * Returns the access object for the given file or <code>null</code> if not
+     * supported for the file.
+     *
+     * @param file The file name.
+     * @param cacheSizeInMB The size of the cache to use in megabytes.
+     * @return The access object or <code>null</code>.
+     */
+    public static GzipRandomAccess getAccess(String file, int cacheSizeInMB)
+            throws IOException {
+        try  (RandomAccessFile raf = new RandomAccessFile(file, "r")) {
+            int header = raf.readInt();
+
+            if ((header >>> 8) != 0x1f8b08) {
+                // No gzip with deflate.
+                return null;
+            }
+
+            if ((header & 16) == 0) {
+                // No comment
+                return null;
+            }
+
+            raf.readInt(); // timestamp
+            raf.readChar(); // Extra flags and os.
+
+            if ((header & 4) != 0) {
+                // Skip extra flags.
+                raf.skipBytes(raf.read() + (raf.read() << 256));
+            }
+
+            // Skip name
+            if ((header & 8) != 0) {
+                while (raf.read() != 0) {
+                    // Wait for the last 0.
+                }
+            }
+
+            // Read the comment.
+            ByteArrayOutputStream bos = new ByteArrayOutputStream();
+            int b;
+
+            while ((b = raf.read()) > 0) {
+                bos.write(b);
+            }
+
+            // Check if the block size is included in the comment.
+            String comment = bos.toString("UTF-8");
+            String expectedPrefix = "HPROF BLOCKSIZE=";
+
+            if (comment.startsWith(expectedPrefix)) {
+                String chunkSizeStr = comment.substring(expectedPrefix.length()).split(" ")[0];
+
+                try {
+                    int chunkSize = Integer.parseInt(chunkSizeStr);
+
+                    if (chunkSize > 0) {
+                        long nrOfChunks = Math.max(1000, cacheSizeInMB * 1024L * 1024L /
+                                                         chunkSize);
+
+                        return new GzipRandomAccess(file, chunkSize, (int) nrOfChunks);
+                    }
+                } catch (NumberFormatException e) {
+                    // Could not parse.
+                }
+            }
+        }
+
+        return null;
+    }
+
+    // Loads the content of a buffer. If this is the first time the buffer is
+    // loaded, the next buffer is added too (but not loaded).
+    private void loadBuffer(Buffer buf) throws IOException, DataFormatException {
+        // If we have used all caches, take a cache from the least recently used cached buffer.
+        if (cachedBuffers >= maxCachedBuffers) {
+            Buffer toRemove = cacheHead.prev;
+            remove(toRemove);
+            buf.cache = toRemove.cache;
+            toRemove.cache = null;
+        } else {
+            // Otherwise allocate a new cache.
+            buf.cache = new byte[cacheSize];
+            cachedBuffers += 1;
+        }
+
+        // Move to front of LRU list.
+        last = buf;
+        addFirst(buf);
+
+        // Fill in the cache
+        inf.reset();
+        raf.seek(buf.fileOffset);
+
+        int read = raf.read(in, 0, READ_SIZE);
+        int inCount = read;
+        int outCount = 0;
+
+        // Skip header, but check at least a little
+        if (read < 4) {
+            throw new IOException("Missing data");
+        }
+
+        if ((in[0] != 0x1f) || ((in[1] & 0xff) != 0x8b)) {
+            throw new IOException("Missing gzip id");
+        }
+
+        if (in[2] != 8) {
+            throw new IOException("Only supports deflate");
+        }
+
+        int off = 10;
+
+        // Extras
+        if ((in[3] & 4) != 0) {
+            int len = (in[off + 1] & 0xff) * 256 + (in[off] & 0xff);
+            off += 2 + len;
+        }
+
+        // Name
+        if ((in[3] & 8) != 0) {
+            int len = 0;
+
+            while (in[off + len] != 0) {
+                ++len;
+            }
+
+            off += len + 1;
+        }
+
+        // Comment
+        if ((in[3] & 16) != 0) {
+            int len = 0;
+
+            while (in[off + len] != 0) {
+                ++len;
+            }
+
+            off += len + 1;
+        }
+
+        // Header CRC
+        if ((in[3] & 2) != 0) {
+            off += 2;
+        }
+
+        inf.setInput(in, off, read - off);
+        outCount = inf.inflate(buf.cache, 0, buf.cache.length);
+
+        while (!inf.finished()) {
+            if (inf.needsInput()) {
+                read = raf.read(in, 0, READ_SIZE);
+                inf.setInput(in, 0, read);
+                inCount += read;
+            }
+
+            outCount += inf.inflate(buf.cache, outCount, buf.cache.length - outCount);
+        }
+
+        // Add the following buffer too if needed.
+        if ((inf.getRemaining() != 0) || (inCount + buf.fileOffset + 8 != fileSize)) {
+            long nextFileOffset = inCount - inf.getRemaining() + buf.fileOffset + 8 /* CRC */;
+            long nextOffset = outCount + buf.offset;
+
+            Buffer nextChunk = new Buffer(nextFileOffset, nextOffset);
+            int pos = Collections.binarySearch(buffers, nextChunk, fileOffsetComp);
+
+            if (pos < 0) {
+                buffers.add(-pos - 1, nextChunk);
+            }
+        }
+
+        buf.cacheLen = outCount;
+    }
+
+    // Adds the buffer to the front of the LRU list.
+    private void addFirst(Buffer buf) {
+        assert buf.next == null;
+        assert buf.prev == null;
+        assert buf.cache != null;
+
+        if (cacheHead.prev == cacheHead) {
+            cacheHead.prev = buf;
+        }
+
+        cacheHead.next.prev = buf;
+        buf.next = cacheHead.next;
+        buf.prev = cacheHead;
+        cacheHead.next = buf;
+    }
+
+    // Removes the buffer from the LRU list.
+    private void remove(Buffer buf) {
+        assert buf.prev != null;
+        assert buf.next != null;
+        assert buf.cache != null;
+        assert cacheHead.prev != cacheHead;
+
+        buf.prev.next = buf.next;
+        buf.next.prev = buf.prev;
+        buf.next = null;
+        buf.prev = null;
+    }
+
+    // Represents a gzipped buffer. The gzipped hprof file consists of a list of these buffers.
+    private static class Buffer {
+        public byte[] cache;
+        public int cacheLen;
+        public final long fileOffset;
+        public final long offset;
+        public Buffer next;
+        public Buffer prev;
+
+        public Buffer(long fileOffset, long offset) {
+            this.cache = null;
+            this.cacheLen = 0;
+            this.fileOffset = fileOffset;
+            this.offset = offset;
+            this.next = null;
+            this.prev = null;
+        }
+    }
+
+    // Compares chunks by file offset.
+    private static class FileOffsetComparator implements Comparator<Buffer> {
+
+        @Override
+        public int compare(Buffer x, Buffer y) {
+            return Long.compare(x.fileOffset, y.fileOffset);
+        }
+    }
+
+    // Compares chunks by offset.
+    private static class OffsetComparator implements Comparator<Buffer> {
+
+        @Override
+        public int compare(Buffer x, Buffer y) {
+            return Long.compare(x.offset, y.offset);
+        }
+    }
+
+    // Implements an InputStream for this object.
+    private static class InputStreamImpl extends InputStream {
+
+        private long offset;
+        private final GzipRandomAccess access;
+
+        public InputStreamImpl(long offset, GzipRandomAccess access) {
+            this.offset = offset;
+            this.access = access;
+        }
+
+        @Override
+        public synchronized int read(byte[] b, int off, int len) throws IOException {
+            int read = access.read(offset, b, off, len);
+
+            if (read > 0) {
+                this.offset += read;
+            }
+
+            return read;
+        }
+
+        @Override
+        public int read() throws IOException {
+            byte[] b = new byte[1];
+            int read = read(b, 0, 1);
+
+            if (read != 1) {
+                return -1;
+            }
+
+            return b[0] & 0xff;
+        }
+    }
+
+    // Implements a ReadBuffer for this object.
+    public static class ReadBufferImpl implements ReadBuffer {
+
+        private final GzipRandomAccess access;
+        private final byte[] tmp = new byte[8];
+
+        public ReadBufferImpl(GzipRandomAccess access) {
+            this.access = access;
+        }
+
+        private void readFully(long pos, int len) throws IOException {
+            int left = len;
+            int off = 0;
+
+            while (left > 0) {
+                int read = access.read(pos, tmp, off, left);
+
+                if (read <= 0) {
+                    throw new EOFException("Could not read at " + pos);
+                }
+
+                left -= read;
+                off += read;
+                pos += read;
+            }
+        }
+
+        private int readInt(int offset) {
+            return (((tmp[offset + 0] & 0xff) << 24) | ((tmp[offset + 1] & 0xff) << 16) |
+                     ((tmp[offset + 2] & 0xff) << 8) | (tmp[offset + 3] & 0xff));
+        }
+
+        @Override
+        public void close() throws Exception {
+            access.close();
+        }
+
+        @Override
+        public char getChar(long pos) throws IOException {
+            readFully(pos, 2);
+            return (char) (((tmp[0] & 0xff) << 8) | (tmp[1] & 0xff));
+        }
+
+        @Override
+        public byte getByte(long pos) throws IOException {
+            readFully(pos, 1);
+            return tmp[0];
+        }
+
+        @Override
+        public short getShort(long pos) throws IOException {
+            return (short) getChar(pos);
+        }
+
+        @Override
+        public int getInt(long pos) throws IOException {
+            readFully(pos, 4);
+            return readInt(0);
+        }
+
+        @Override
+        public long getLong(long pos) throws IOException {
+            readFully(pos, 8);
+            long i1 = readInt(0);
+            int i2 = readInt(4);
+
+            return (i1 << 32) | (i2 & 0xffffffffl);
+        }
+    }
+}
--- a/test/lib/jdk/test/lib/hprof/parser/HprofReader.java	Wed Jun 10 11:08:19 2020 +0100
+++ b/test/lib/jdk/test/lib/hprof/parser/HprofReader.java	Wed Jun 10 12:29:01 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -156,12 +156,11 @@
         return (numberRead == MAGIC_NUMBER);
     }
 
-    public HprofReader(String fileName, PositionDataInputStream in,
+    public HprofReader(ReadBuffer readBuffer, PositionDataInputStream in,
                        int dumpNumber, boolean callStack, int debugLevel)
                        throws IOException {
         super(in);
-        RandomAccessFile file = new RandomAccessFile(fileName, "r");
-        this.snapshot = new Snapshot(MappedReadBuffer.create(file));
+        this.snapshot = new Snapshot(readBuffer);
         this.dumpsToSkip = dumpNumber - 1;
         this.callStack = callStack;
         this.debugLevel = debugLevel;
@@ -175,6 +174,13 @@
         }
     }
 
+    public HprofReader(String fileName, PositionDataInputStream in,
+                       int dumpNumber, boolean callStack, int debugLevel)
+                       throws IOException {
+        this(MappedReadBuffer.create(new RandomAccessFile(fileName, "r")),
+            in, dumpNumber, callStack, debugLevel);
+    }
+
     public Snapshot read() throws IOException {
         currPos = 4;    // 4 because of the magic number
         version = readVersionHeader();
--- a/test/lib/jdk/test/lib/hprof/parser/Reader.java	Wed Jun 10 11:08:19 2020 +0100
+++ b/test/lib/jdk/test/lib/hprof/parser/Reader.java	Wed Jun 10 12:29:01 2020 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,6 +81,7 @@
             }
             heapFile = heapFile.substring(0, pos);
         }
+        GzipRandomAccess access = null;
         try (PositionDataInputStream in = new PositionDataInputStream(
                 new BufferedInputStream(new FileInputStream(heapFile)))) {
             int i = in.readInt();
@@ -89,6 +90,20 @@
                     = new HprofReader(heapFile, in, dumpNumber,
                                       callStack, debugLevel);
                 return r.read();
+            } else if ((access = GzipRandomAccess.getAccess(heapFile, 16)) != null) {
+                in.close();
+                try (PositionDataInputStream in2 = new PositionDataInputStream(
+                        new BufferedInputStream(access.asStream(0)))) {
+                    i = in2.readInt();
+                    if (i == HprofReader.MAGIC_NUMBER) {
+                        Reader r
+                            = new HprofReader(access.asFileBuffer(), in2, dumpNumber,
+                                              callStack, debugLevel);
+                        return r.read();
+                    } else {
+                        throw new IOException("Wrong magic number in gzipped file: " + i);
+                    }
+                }
             } else {
                 throw new IOException("Unrecognized magic number: " + i);
             }